Page MenuHomeFreeBSD

No OneTemporary

This file is larger than 256 KB, so syntax highlighting was skipped.
diff --git a/cmd/zdb/zdb.c b/cmd/zdb/zdb.c
index 6e3539d938ef..02fb55944e05 100644
--- a/cmd/zdb/zdb.c
+++ b/cmd/zdb/zdb.c
@@ -1,4639 +1,5209 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Lawrence Livermore National Security, LLC.
* Copyright (c) 2015, 2017, Intel Corporation.
*/
#include <stdio.h>
#include <unistd.h>
#include <stdio_ext.h>
#include <stdlib.h>
#include <ctype.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_sa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_pool.h>
#include <sys/dbuf.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/dmu_traverse.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/zfs_fuid.h>
#include <sys/arc.h>
#include <sys/ddt.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/blkptr.h>
#include <sys/dsl_crypt.h>
#include <zfs_comutil.h>
#include <libzfs.h>
#include "zdb.h"
#define ZDB_COMPRESS_NAME(idx) ((idx) < ZIO_COMPRESS_FUNCTIONS ? \
zio_compress_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_CHECKSUM_NAME(idx) ((idx) < ZIO_CHECKSUM_FUNCTIONS ? \
zio_checksum_table[(idx)].ci_name : "UNKNOWN")
#define ZDB_OT_TYPE(idx) ((idx) < DMU_OT_NUMTYPES ? (idx) : \
- (((idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA) ? \
- DMU_OT_ZAP_OTHER : DMU_OT_NUMTYPES))
+ (idx) == DMU_OTN_ZAP_DATA || (idx) == DMU_OTN_ZAP_METADATA ? \
+ DMU_OT_ZAP_OTHER : \
+ (idx) == DMU_OTN_UINT64_DATA || (idx) == DMU_OTN_UINT64_METADATA ? \
+ DMU_OT_UINT64_OTHER : DMU_OT_NUMTYPES)
static char *
zdb_ot_name(dmu_object_type_t type)
{
if (type < DMU_OT_NUMTYPES)
return (dmu_ot[type].ot_name);
else if ((type & DMU_OT_NEWTYPE) &&
((type & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS))
return (dmu_ot_byteswap[type & DMU_OT_BYTESWAP_MASK].ob_name);
else
return ("UNKNOWN");
}
extern int reference_tracking_enable;
extern int zfs_recover;
extern uint64_t zfs_arc_max, zfs_arc_meta_limit;
extern int zfs_vdev_async_read_max_active;
static const char cmdname[] = "zdb";
uint8_t dump_opt[256];
typedef void object_viewer_t(objset_t *, uint64_t, void *data, size_t size);
uint64_t *zopt_object = NULL;
static unsigned zopt_objects = 0;
libzfs_handle_t *g_zfs;
uint64_t max_inflight = 1000;
static void snprintf_blkptr_compact(char *, size_t, const blkptr_t *);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
usage(void)
{
(void) fprintf(stderr,
"Usage:\t%s [-AbcdDFGhiLMPsvX] [-e [-V] [-p <path> ...]] "
"[-I <inflight I/Os>]\n"
"\t\t[-o <var>=<value>]... [-t <txg>] [-U <cache>] [-x <dumpdir>]\n"
"\t\t[<poolname> [<object> ...]]\n"
"\t%s [-AdiPv] [-e [-V] [-p <path> ...]] [-U <cache>] <dataset>\n"
"\t\t[<object> ...]\n"
"\t%s -C [-A] [-U <cache>]\n"
"\t%s -l [-Aqu] <device>\n"
"\t%s -m [-AFLPX] [-e [-V] [-p <path> ...]] [-t <txg>] "
"[-U <cache>]\n\t\t<poolname> [<vdev> [<metaslab> ...]]\n"
"\t%s -O <dataset> <path>\n"
"\t%s -R [-A] [-e [-V] [-p <path> ...]] [-U <cache>]\n"
"\t\t<poolname> <vdev>:<offset>:<size>[:<flags>]\n"
"\t%s -E [-A] word0:word1:...:word15\n"
"\t%s -S [-AP] [-e [-V] [-p <path> ...]] [-U <cache>] "
"<poolname>\n\n",
cmdname, cmdname, cmdname, cmdname, cmdname, cmdname, cmdname,
cmdname, cmdname);
(void) fprintf(stderr, " Dataset name must include at least one "
"separator character '/' or '@'\n");
(void) fprintf(stderr, " If dataset name is specified, only that "
"dataset is dumped\n");
(void) fprintf(stderr, " If object numbers are specified, only "
"those objects are dumped\n\n");
(void) fprintf(stderr, " Options to control amount of output:\n");
(void) fprintf(stderr, " -b block statistics\n");
(void) fprintf(stderr, " -c checksum all metadata (twice for "
"all data) blocks\n");
(void) fprintf(stderr, " -C config (or cachefile if alone)\n");
(void) fprintf(stderr, " -d dataset(s)\n");
(void) fprintf(stderr, " -D dedup statistics\n");
(void) fprintf(stderr, " -E decode and display block from an "
"embedded block pointer\n");
(void) fprintf(stderr, " -h pool history\n");
(void) fprintf(stderr, " -i intent logs\n");
(void) fprintf(stderr, " -l read label contents\n");
(void) fprintf(stderr, " -L disable leak tracking (do not "
"load spacemaps)\n");
(void) fprintf(stderr, " -m metaslabs\n");
(void) fprintf(stderr, " -M metaslab groups\n");
(void) fprintf(stderr, " -O perform object lookups by path\n");
(void) fprintf(stderr, " -R read and display block from a "
"device\n");
(void) fprintf(stderr, " -s report stats on zdb's I/O\n");
(void) fprintf(stderr, " -S simulate dedup to measure effect\n");
(void) fprintf(stderr, " -v verbose (applies to all "
"others)\n\n");
(void) fprintf(stderr, " Below options are intended for use "
"with other options:\n");
(void) fprintf(stderr, " -A ignore assertions (-A), enable "
"panic recovery (-AA) or both (-AAA)\n");
(void) fprintf(stderr, " -e pool is exported/destroyed/"
"has altroot/not in a cachefile\n");
(void) fprintf(stderr, " -F attempt automatic rewind within "
"safe range of transaction groups\n");
(void) fprintf(stderr, " -G dump zfs_dbgmsg buffer before "
"exiting\n");
(void) fprintf(stderr, " -I <number of inflight I/Os> -- "
"specify the maximum number of\n "
"checksumming I/Os [default is 200]\n");
(void) fprintf(stderr, " -o <variable>=<value> set global "
"variable to an unsigned 32-bit integer\n");
(void) fprintf(stderr, " -p <path> -- use one or more with "
"-e to specify path to vdev dir\n");
(void) fprintf(stderr, " -P print numbers in parseable form\n");
(void) fprintf(stderr, " -q don't print label contents\n");
(void) fprintf(stderr, " -t <txg> -- highest txg to use when "
"searching for uberblocks\n");
(void) fprintf(stderr, " -u uberblock\n");
(void) fprintf(stderr, " -U <cachefile_path> -- use alternate "
"cachefile\n");
(void) fprintf(stderr, " -V do verbatim import\n");
(void) fprintf(stderr, " -x <dumpdir> -- "
"dump all read blocks into specified directory\n");
(void) fprintf(stderr, " -X attempt extreme rewind (does not "
"work with dataset)\n");
(void) fprintf(stderr, "Specify an option more than once (e.g. -bb) "
"to make only that option verbose\n");
(void) fprintf(stderr, "Default is to dump everything non-verbosely\n");
exit(1);
}
static void
dump_debug_buffer(void)
{
if (dump_opt['G']) {
(void) printf("\n");
zfs_dbgmsg_print("zdb");
}
}
/*
* Called for usage errors that are discovered after a call to spa_open(),
* dmu_bonus_hold(), or pool_match(). abort() is called for other errors.
*/
static void
fatal(const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) fprintf(stderr, "%s: ", cmdname);
(void) vfprintf(stderr, fmt, ap);
va_end(ap);
(void) fprintf(stderr, "\n");
dump_debug_buffer();
exit(1);
}
/* ARGSUSED */
static void
dump_packed_nvlist(objset_t *os, uint64_t object, void *data, size_t size)
{
nvlist_t *nv;
size_t nvsize = *(uint64_t *)data;
char *packed = umem_alloc(nvsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, object, 0, nvsize, packed, DMU_READ_PREFETCH));
VERIFY(nvlist_unpack(packed, nvsize, &nv, 0) == 0);
umem_free(packed, nvsize);
dump_nvlist(nv, 8);
nvlist_free(nv);
}
/* ARGSUSED */
static void
dump_history_offsets(objset_t *os, uint64_t object, void *data, size_t size)
{
spa_history_phys_t *shp = data;
if (shp == NULL)
return;
(void) printf("\t\tpool_create_len = %llu\n",
(u_longlong_t)shp->sh_pool_create_len);
(void) printf("\t\tphys_max_off = %llu\n",
(u_longlong_t)shp->sh_phys_max_off);
(void) printf("\t\tbof = %llu\n",
(u_longlong_t)shp->sh_bof);
(void) printf("\t\teof = %llu\n",
(u_longlong_t)shp->sh_eof);
(void) printf("\t\trecords_lost = %llu\n",
(u_longlong_t)shp->sh_records_lost);
}
static void
zdb_nicenum(uint64_t num, char *buf, size_t buflen)
{
if (dump_opt['P'])
(void) snprintf(buf, buflen, "%llu", (longlong_t)num);
else
nicenum(num, buf, sizeof (buf));
}
static const char histo_stars[] = "****************************************";
static const uint64_t histo_width = sizeof (histo_stars) - 1;
static void
dump_histogram(const uint64_t *histo, int size, int offset)
{
int i;
int minidx = size - 1;
int maxidx = 0;
uint64_t max = 0;
for (i = 0; i < size; i++) {
if (histo[i] > max)
max = histo[i];
if (histo[i] > 0 && i > maxidx)
maxidx = i;
if (histo[i] > 0 && i < minidx)
minidx = i;
}
if (max < histo_width)
max = histo_width;
for (i = minidx; i <= maxidx; i++) {
(void) printf("\t\t\t%3u: %6llu %s\n",
i + offset, (u_longlong_t)histo[i],
&histo_stars[(max - histo[i]) * histo_width / max]);
}
}
static void
dump_zap_stats(objset_t *os, uint64_t object)
{
int error;
zap_stats_t zs;
error = zap_get_stats(os, object, &zs);
if (error)
return;
if (zs.zs_ptrtbl_len == 0) {
ASSERT(zs.zs_num_blocks == 1);
(void) printf("\tmicrozap: %llu bytes, %llu entries\n",
(u_longlong_t)zs.zs_blocksize,
(u_longlong_t)zs.zs_num_entries);
return;
}
(void) printf("\tFat ZAP stats:\n");
(void) printf("\t\tPointer table:\n");
(void) printf("\t\t\t%llu elements\n",
(u_longlong_t)zs.zs_ptrtbl_len);
(void) printf("\t\t\tzt_blk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_blk);
(void) printf("\t\t\tzt_numblks: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_numblks);
(void) printf("\t\t\tzt_shift: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_zt_shift);
(void) printf("\t\t\tzt_blks_copied: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_blks_copied);
(void) printf("\t\t\tzt_nextblk: %llu\n",
(u_longlong_t)zs.zs_ptrtbl_nextblk);
(void) printf("\t\tZAP entries: %llu\n",
(u_longlong_t)zs.zs_num_entries);
(void) printf("\t\tLeaf blocks: %llu\n",
(u_longlong_t)zs.zs_num_leafs);
(void) printf("\t\tTotal blocks: %llu\n",
(u_longlong_t)zs.zs_num_blocks);
(void) printf("\t\tzap_block_type: 0x%llx\n",
(u_longlong_t)zs.zs_block_type);
(void) printf("\t\tzap_magic: 0x%llx\n",
(u_longlong_t)zs.zs_magic);
(void) printf("\t\tzap_salt: 0x%llx\n",
(u_longlong_t)zs.zs_salt);
(void) printf("\t\tLeafs with 2^n pointers:\n");
dump_histogram(zs.zs_leafs_with_2n_pointers, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks with n*5 entries:\n");
dump_histogram(zs.zs_blocks_with_n5_entries, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBlocks n/10 full:\n");
dump_histogram(zs.zs_blocks_n_tenths_full, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tEntries with n chunks:\n");
dump_histogram(zs.zs_entries_using_n_chunks, ZAP_HISTOGRAM_SIZE, 0);
(void) printf("\t\tBuckets with n entries:\n");
dump_histogram(zs.zs_buckets_with_n_entries, ZAP_HISTOGRAM_SIZE, 0);
}
/*ARGSUSED*/
static void
dump_none(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_unknown(objset_t *os, uint64_t object, void *data, size_t size)
{
(void) printf("\tUNKNOWN OBJECT TYPE\n");
}
/*ARGSUSED*/
static void
dump_uint8(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_uint64(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
void *prop;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
prop = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
(void) zap_lookup(os, object, attr.za_name,
attr.za_integer_length, attr.za_num_integers, prop);
if (attr.za_integer_length == 1) {
(void) printf("%s", (char *)prop);
} else {
for (i = 0; i < attr.za_num_integers; i++) {
switch (attr.za_integer_length) {
case 2:
(void) printf("%u ",
((uint16_t *)prop)[i]);
break;
case 4:
(void) printf("%u ",
((uint32_t *)prop)[i]);
break;
case 8:
(void) printf("%lld ",
(u_longlong_t)((int64_t *)prop)[i]);
break;
}
}
}
(void) printf("\n");
umem_free(prop, attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
static void
dump_bpobj(objset_t *os, uint64_t object, void *data, size_t size)
{
bpobj_phys_t *bpop = data;
uint64_t i;
char bytes[32], comp[32], uncomp[32];
/* make sure the output won't get truncated */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
if (bpop == NULL)
return;
zdb_nicenum(bpop->bpo_bytes, bytes, sizeof (bytes));
zdb_nicenum(bpop->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpop->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf("\t\tnum_blkptrs = %llu\n",
(u_longlong_t)bpop->bpo_num_blkptrs);
(void) printf("\t\tbytes = %s\n", bytes);
if (size >= BPOBJ_SIZE_V1) {
(void) printf("\t\tcomp = %s\n", comp);
(void) printf("\t\tuncomp = %s\n", uncomp);
}
if (size >= sizeof (*bpop)) {
(void) printf("\t\tsubobjs = %llu\n",
(u_longlong_t)bpop->bpo_subobjs);
(void) printf("\t\tnum_subobjs = %llu\n",
(u_longlong_t)bpop->bpo_num_subobjs);
}
if (dump_opt['d'] < 5)
return;
for (i = 0; i < bpop->bpo_num_blkptrs; i++) {
char blkbuf[BP_SPRINTF_LEN];
blkptr_t bp;
int err = dmu_read(os, object,
i * sizeof (bp), sizeof (bp), &bp, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
break;
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), &bp);
(void) printf("\t%s\n", blkbuf);
}
}
/* ARGSUSED */
static void
dump_bpobj_subobjs(objset_t *os, uint64_t object, void *data, size_t size)
{
dmu_object_info_t doi;
int64_t i;
VERIFY0(dmu_object_info(os, object, &doi));
uint64_t *subobjs = kmem_alloc(doi.doi_max_offset, KM_SLEEP);
int err = dmu_read(os, object, 0, doi.doi_max_offset, subobjs, 0);
if (err != 0) {
(void) printf("got error %u from dmu_read\n", err);
kmem_free(subobjs, doi.doi_max_offset);
return;
}
int64_t last_nonzero = -1;
for (i = 0; i < doi.doi_max_offset / 8; i++) {
if (subobjs[i] != 0)
last_nonzero = i;
}
for (i = 0; i <= last_nonzero; i++) {
(void) printf("\t%llu\n", (u_longlong_t)subobjs[i]);
}
kmem_free(subobjs, doi.doi_max_offset);
}
/*ARGSUSED*/
static void
dump_ddt_zap(objset_t *os, uint64_t object, void *data, size_t size)
{
dump_zap_stats(os, object);
/* contents are printed elsewhere, properly decoded */
}
/*ARGSUSED*/
static void
dump_sa_attrs(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = ", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
(void) printf(" %llx : [%d:%d:%d]\n",
(u_longlong_t)attr.za_first_integer,
(int)ATTR_LENGTH(attr.za_first_integer),
(int)ATTR_BSWAP(attr.za_first_integer),
(int)ATTR_NUM(attr.za_first_integer));
}
zap_cursor_fini(&zc);
}
/*ARGSUSED*/
static void
dump_sa_layouts(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
uint16_t *layout_attrs;
unsigned i;
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = [", attr.za_name);
if (attr.za_num_integers == 0) {
(void) printf("\n");
continue;
}
VERIFY(attr.za_integer_length == 2);
layout_attrs = umem_zalloc(attr.za_num_integers *
attr.za_integer_length, UMEM_NOFAIL);
VERIFY(zap_lookup(os, object, attr.za_name,
attr.za_integer_length,
attr.za_num_integers, layout_attrs) == 0);
for (i = 0; i != attr.za_num_integers; i++)
(void) printf(" %d ", (int)layout_attrs[i]);
(void) printf("]\n");
umem_free(layout_attrs,
attr.za_num_integers * attr.za_integer_length);
}
zap_cursor_fini(&zc);
}
/*ARGSUSED*/
static void
dump_zpldir(objset_t *os, uint64_t object, void *data, size_t size)
{
zap_cursor_t zc;
zap_attribute_t attr;
const char *typenames[] = {
/* 0 */ "not specified",
/* 1 */ "FIFO",
/* 2 */ "Character Device",
/* 3 */ "3 (invalid)",
/* 4 */ "Directory",
/* 5 */ "5 (invalid)",
/* 6 */ "Block Device",
/* 7 */ "7 (invalid)",
/* 8 */ "Regular File",
/* 9 */ "9 (invalid)",
/* 10 */ "Symbolic Link",
/* 11 */ "11 (invalid)",
/* 12 */ "Socket",
/* 13 */ "Door",
/* 14 */ "Event Port",
/* 15 */ "15 (invalid)",
};
dump_zap_stats(os, object);
(void) printf("\n");
for (zap_cursor_init(&zc, os, object);
zap_cursor_retrieve(&zc, &attr) == 0;
zap_cursor_advance(&zc)) {
(void) printf("\t\t%s = %lld (type: %s)\n",
attr.za_name, ZFS_DIRENT_OBJ(attr.za_first_integer),
typenames[ZFS_DIRENT_TYPE(attr.za_first_integer)]);
}
zap_cursor_fini(&zc);
}
static int
get_dtl_refcount(vdev_t *vd)
{
int refcount = 0;
if (vd->vdev_ops->vdev_op_leaf) {
space_map_t *sm = vd->vdev_dtl_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
return (1);
return (0);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_dtl_refcount(vd->vdev_child[c]);
return (refcount);
}
static int
get_metaslab_refcount(vdev_t *vd)
{
int refcount = 0;
- if (vd->vdev_top == vd && !vd->vdev_removing) {
- for (unsigned m = 0; m < vd->vdev_ms_count; m++) {
+ if (vd->vdev_top == vd) {
+ for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
space_map_t *sm = vd->vdev_ms[m]->ms_sm;
if (sm != NULL &&
sm->sm_dbuf->db_size == sizeof (space_map_phys_t))
refcount++;
}
}
for (unsigned c = 0; c < vd->vdev_children; c++)
refcount += get_metaslab_refcount(vd->vdev_child[c]);
return (refcount);
}
+static int
+get_obsolete_refcount(vdev_t *vd)
+{
+ int refcount = 0;
+
+ uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
+ if (vd->vdev_top == vd && obsolete_sm_obj != 0) {
+ dmu_object_info_t doi;
+ VERIFY0(dmu_object_info(vd->vdev_spa->spa_meta_objset,
+ obsolete_sm_obj, &doi));
+ if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
+ refcount++;
+ }
+ } else {
+ ASSERT3P(vd->vdev_obsolete_sm, ==, NULL);
+ ASSERT3U(obsolete_sm_obj, ==, 0);
+ }
+ for (unsigned c = 0; c < vd->vdev_children; c++) {
+ refcount += get_obsolete_refcount(vd->vdev_child[c]);
+ }
+
+ return (refcount);
+}
+
+static int
+get_prev_obsolete_spacemap_refcount(spa_t *spa)
+{
+ uint64_t prev_obj =
+ spa->spa_condensing_indirect_phys.scip_prev_obsolete_sm_object;
+ if (prev_obj != 0) {
+ dmu_object_info_t doi;
+ VERIFY0(dmu_object_info(spa->spa_meta_objset, prev_obj, &doi));
+ if (doi.doi_bonus_size == sizeof (space_map_phys_t)) {
+ return (1);
+ }
+ }
+ return (0);
+}
+
static int
verify_spacemap_refcounts(spa_t *spa)
{
uint64_t expected_refcount = 0;
uint64_t actual_refcount;
(void) feature_get_refcount(spa,
&spa_feature_table[SPA_FEATURE_SPACEMAP_HISTOGRAM],
&expected_refcount);
actual_refcount = get_dtl_refcount(spa->spa_root_vdev);
actual_refcount += get_metaslab_refcount(spa->spa_root_vdev);
+ actual_refcount += get_obsolete_refcount(spa->spa_root_vdev);
+ actual_refcount += get_prev_obsolete_spacemap_refcount(spa);
if (expected_refcount != actual_refcount) {
(void) printf("space map refcount mismatch: expected %lld != "
"actual %lld\n",
(longlong_t)expected_refcount,
(longlong_t)actual_refcount);
return (2);
}
return (0);
}
static void
dump_spacemap(objset_t *os, space_map_t *sm)
{
uint64_t alloc, offset, entry;
const char *ddata[] = { "ALLOC", "FREE", "CONDENSE", "INVALID",
- "INVALID", "INVALID", "INVALID", "INVALID" };
+ "INVALID", "INVALID", "INVALID", "INVALID" };
if (sm == NULL)
return;
+ (void) printf("space map object %llu:\n",
+ (longlong_t)sm->sm_phys->smp_object);
+ (void) printf(" smp_objsize = 0x%llx\n",
+ (longlong_t)sm->sm_phys->smp_objsize);
+ (void) printf(" smp_alloc = 0x%llx\n",
+ (longlong_t)sm->sm_phys->smp_alloc);
+
/*
* Print out the freelist entries in both encoded and decoded form.
*/
alloc = 0;
for (offset = 0; offset < space_map_length(sm);
offset += sizeof (entry)) {
uint8_t mapshift = sm->sm_shift;
VERIFY0(dmu_read(os, space_map_object(sm), offset,
sizeof (entry), &entry, DMU_READ_PREFETCH));
if (SM_DEBUG_DECODE(entry)) {
(void) printf("\t [%6llu] %s: txg %llu, pass %llu\n",
(u_longlong_t)(offset / sizeof (entry)),
ddata[SM_DEBUG_ACTION_DECODE(entry)],
(u_longlong_t)SM_DEBUG_TXG_DECODE(entry),
(u_longlong_t)SM_DEBUG_SYNCPASS_DECODE(entry));
} else {
(void) printf("\t [%6llu] %c range:"
" %010llx-%010llx size: %06llx\n",
(u_longlong_t)(offset / sizeof (entry)),
SM_TYPE_DECODE(entry) == SM_ALLOC ? 'A' : 'F',
(u_longlong_t)((SM_OFFSET_DECODE(entry) <<
mapshift) + sm->sm_start),
(u_longlong_t)((SM_OFFSET_DECODE(entry) <<
mapshift) + sm->sm_start +
(SM_RUN_DECODE(entry) << mapshift)),
(u_longlong_t)(SM_RUN_DECODE(entry) << mapshift));
if (SM_TYPE_DECODE(entry) == SM_ALLOC)
alloc += SM_RUN_DECODE(entry) << mapshift;
else
alloc -= SM_RUN_DECODE(entry) << mapshift;
}
}
if (alloc != space_map_allocated(sm)) {
(void) printf("space_map_object alloc (%llu) INCONSISTENT "
"with space map summary (%llu)\n",
(u_longlong_t)space_map_allocated(sm), (u_longlong_t)alloc);
}
}
static void
dump_metaslab_stats(metaslab_t *msp)
{
char maxbuf[32];
range_tree_t *rt = msp->ms_tree;
avl_tree_t *t = &msp->ms_size_tree;
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
/* max sure nicenum has enough space */
CTASSERT(sizeof (maxbuf) >= NN_NUMBUF_SZ);
zdb_nicenum(metaslab_block_maxsize(msp), maxbuf, sizeof (maxbuf));
(void) printf("\t %25s %10lu %7s %6s %4s %4d%%\n",
"segments", avl_numnodes(t), "maxsize", maxbuf,
"freepct", free_pct);
(void) printf("\tIn-memory histogram:\n");
dump_histogram(rt->rt_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
static void
dump_metaslab(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
space_map_t *sm = msp->ms_sm;
char freebuf[32];
zdb_nicenum(msp->ms_size - space_map_allocated(sm), freebuf,
sizeof (freebuf));
(void) printf(
"\tmetaslab %6llu offset %12llx spacemap %6llu free %5s\n",
(u_longlong_t)msp->ms_id, (u_longlong_t)msp->ms_start,
(u_longlong_t)space_map_object(sm), freebuf);
if (dump_opt['m'] > 2 && !dump_opt['L']) {
mutex_enter(&msp->ms_lock);
metaslab_load_wait(msp);
if (!msp->ms_loaded) {
VERIFY0(metaslab_load(msp));
range_tree_stat_verify(msp->ms_tree);
}
dump_metaslab_stats(msp);
metaslab_unload(msp);
mutex_exit(&msp->ms_lock);
}
if (dump_opt['m'] > 1 && sm != NULL &&
spa_feature_is_active(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
/*
* The space map histogram represents free space in chunks
* of sm_shift (i.e. bucket 0 refers to 2^sm_shift).
*/
(void) printf("\tOn-disk histogram:\t\tfragmentation %llu\n",
(u_longlong_t)msp->ms_fragmentation);
dump_histogram(sm->sm_phys->smp_histogram,
SPACE_MAP_HISTOGRAM_SIZE, sm->sm_shift);
}
if (dump_opt['d'] > 5 || dump_opt['m'] > 3) {
ASSERT(msp->ms_size == (1ULL << vd->vdev_ms_shift));
- mutex_enter(&msp->ms_lock);
dump_spacemap(spa->spa_meta_objset, msp->ms_sm);
- mutex_exit(&msp->ms_lock);
}
}
static void
print_vdev_metaslab_header(vdev_t *vd)
{
(void) printf("\tvdev %10llu\n\t%-10s%5llu %-19s %-15s %-10s\n",
(u_longlong_t)vd->vdev_id,
"metaslabs", (u_longlong_t)vd->vdev_ms_count,
"offset", "spacemap", "free");
(void) printf("\t%15s %19s %15s %10s\n",
"---------------", "-------------------",
"---------------", "-------------");
}
static void
dump_metaslab_groups(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
metaslab_class_t *mc = spa_normal_class(spa);
uint64_t fragmentation;
metaslab_class_histogram_verify(mc);
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg->mg_class != mc)
continue;
metaslab_group_histogram_verify(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
(void) printf("\tvdev %10llu\t\tmetaslabs%5llu\t\t"
"fragmentation",
(u_longlong_t)tvd->vdev_id,
(u_longlong_t)tvd->vdev_ms_count);
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
(void) printf("%3s\n", "-");
} else {
(void) printf("%3llu%%\n",
(u_longlong_t)mg->mg_fragmentation);
}
dump_histogram(mg->mg_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
(void) printf("\tpool %s\tfragmentation", spa_name(spa));
fragmentation = metaslab_class_fragmentation(mc);
if (fragmentation == ZFS_FRAG_INVALID)
(void) printf("\t%3s\n", "-");
else
(void) printf("\t%3llu%%\n", (u_longlong_t)fragmentation);
dump_histogram(mc->mc_histogram, RANGE_TREE_HISTOGRAM_SIZE, 0);
}
+static void
+print_vdev_indirect(vdev_t *vd)
+{
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ vdev_indirect_births_t *vib = vd->vdev_indirect_births;
+
+ if (vim == NULL) {
+ ASSERT3P(vib, ==, NULL);
+ return;
+ }
+
+ ASSERT3U(vdev_indirect_mapping_object(vim), ==,
+ vic->vic_mapping_object);
+ ASSERT3U(vdev_indirect_births_object(vib), ==,
+ vic->vic_births_object);
+
+ (void) printf("indirect births obj %llu:\n",
+ (longlong_t)vic->vic_births_object);
+ (void) printf(" vib_count = %llu\n",
+ (longlong_t)vdev_indirect_births_count(vib));
+ for (uint64_t i = 0; i < vdev_indirect_births_count(vib); i++) {
+ vdev_indirect_birth_entry_phys_t *cur_vibe =
+ &vib->vib_entries[i];
+ (void) printf("\toffset %llx -> txg %llu\n",
+ (longlong_t)cur_vibe->vibe_offset,
+ (longlong_t)cur_vibe->vibe_phys_birth_txg);
+ }
+ (void) printf("\n");
+
+ (void) printf("indirect mapping obj %llu:\n",
+ (longlong_t)vic->vic_mapping_object);
+ (void) printf(" vim_max_offset = 0x%llx\n",
+ (longlong_t)vdev_indirect_mapping_max_offset(vim));
+ (void) printf(" vim_bytes_mapped = 0x%llx\n",
+ (longlong_t)vdev_indirect_mapping_bytes_mapped(vim));
+ (void) printf(" vim_count = %llu\n",
+ (longlong_t)vdev_indirect_mapping_num_entries(vim));
+
+ if (dump_opt['d'] <= 5 && dump_opt['m'] <= 3)
+ return;
+
+ uint32_t *counts = vdev_indirect_mapping_load_obsolete_counts(vim);
+
+ for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
+ vdev_indirect_mapping_entry_phys_t *vimep =
+ &vim->vim_entries[i];
+ (void) printf("\t<%llx:%llx:%llx> -> "
+ "<%llx:%llx:%llx> (%x obsolete)\n",
+ (longlong_t)vd->vdev_id,
+ (longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
+ (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
+ (longlong_t)DVA_GET_VDEV(&vimep->vimep_dst),
+ (longlong_t)DVA_GET_OFFSET(&vimep->vimep_dst),
+ (longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
+ counts[i]);
+ }
+ (void) printf("\n");
+
+ uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
+ if (obsolete_sm_object != 0) {
+ objset_t *mos = vd->vdev_spa->spa_meta_objset;
+ (void) printf("obsolete space map object %llu:\n",
+ (u_longlong_t)obsolete_sm_object);
+ ASSERT(vd->vdev_obsolete_sm != NULL);
+ ASSERT3U(space_map_object(vd->vdev_obsolete_sm), ==,
+ obsolete_sm_object);
+ dump_spacemap(mos, vd->vdev_obsolete_sm);
+ (void) printf("\n");
+ }
+}
+
static void
dump_metaslabs(spa_t *spa)
{
vdev_t *vd, *rvd = spa->spa_root_vdev;
uint64_t m, c = 0, children = rvd->vdev_children;
(void) printf("\nMetaslabs:\n");
if (!dump_opt['d'] && zopt_objects > 0) {
c = zopt_object[0];
if (c >= children)
(void) fatal("bad vdev id: %llu", (u_longlong_t)c);
if (zopt_objects > 1) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
for (m = 1; m < zopt_objects; m++) {
if (zopt_object[m] < vd->vdev_ms_count)
dump_metaslab(
vd->vdev_ms[zopt_object[m]]);
else
(void) fprintf(stderr, "bad metaslab "
"number %llu\n",
(u_longlong_t)zopt_object[m]);
}
(void) printf("\n");
return;
}
children = c + 1;
}
for (; c < children; c++) {
vd = rvd->vdev_child[c];
print_vdev_metaslab_header(vd);
+ print_vdev_indirect(vd);
+
for (m = 0; m < vd->vdev_ms_count; m++)
dump_metaslab(vd->vdev_ms[m]);
(void) printf("\n");
}
}
static void
dump_dde(const ddt_t *ddt, const ddt_entry_t *dde, uint64_t index)
{
const ddt_phys_t *ddp = dde->dde_phys;
const ddt_key_t *ddk = &dde->dde_key;
const char *types[4] = { "ditto", "single", "double", "triple" };
char blkbuf[BP_SPRINTF_LEN];
blkptr_t blk;
int p;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
snprintf_blkptr(blkbuf, sizeof (blkbuf), &blk);
(void) printf("index %llx refcnt %llu %s %s\n",
(u_longlong_t)index, (u_longlong_t)ddp->ddp_refcnt,
types[p], blkbuf);
}
}
static void
dump_dedup_ratio(const ddt_stat_t *dds)
{
double rL, rP, rD, D, dedup, compress, copies;
if (dds->dds_blocks == 0)
return;
rL = (double)dds->dds_ref_lsize;
rP = (double)dds->dds_ref_psize;
rD = (double)dds->dds_ref_dsize;
D = (double)dds->dds_dsize;
dedup = rD / D;
compress = rL / rP;
copies = rD / rP;
(void) printf("dedup = %.2f, compress = %.2f, copies = %.2f, "
"dedup * compress / copies = %.2f\n\n",
dedup, compress, copies, dedup * compress / copies);
}
static void
dump_ddt(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
char name[DDT_NAMELEN];
ddt_entry_t dde;
uint64_t walk = 0;
dmu_object_info_t doi;
uint64_t count, dspace, mspace;
int error;
error = ddt_object_info(ddt, type, class, &doi);
if (error == ENOENT)
return;
ASSERT(error == 0);
error = ddt_object_count(ddt, type, class, &count);
ASSERT(error == 0);
if (count == 0)
return;
dspace = doi.doi_physical_blocks_512 << 9;
mspace = doi.doi_fill_count * doi.doi_data_block_size;
ddt_object_name(ddt, type, class, name);
(void) printf("%s: %llu entries, size %llu on disk, %llu in core\n",
name,
(u_longlong_t)count,
(u_longlong_t)(dspace / count),
(u_longlong_t)(mspace / count));
if (dump_opt['D'] < 3)
return;
zpool_dump_ddt(NULL, &ddt->ddt_histogram[type][class]);
if (dump_opt['D'] < 4)
return;
if (dump_opt['D'] < 5 && class == DDT_CLASS_UNIQUE)
return;
(void) printf("%s contents:\n\n", name);
while ((error = ddt_object_walk(ddt, type, class, &walk, &dde)) == 0)
dump_dde(ddt, &dde, walk);
ASSERT(error == ENOENT);
(void) printf("\n");
}
static void
dump_all_ddts(spa_t *spa)
{
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
dump_ddt(ddt, type, class);
}
}
}
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_blocks == 0) {
(void) printf("All DDTs are empty\n");
return;
}
(void) printf("\n");
if (dump_opt['D'] > 1) {
(void) printf("DDT histogram (aggregated over all DDTs):\n");
ddt_get_dedup_histogram(spa, &ddh_total);
zpool_dump_ddt(&dds_total, &ddh_total);
}
dump_dedup_ratio(&dds_total);
}
static void
dump_dtl_seg(void *arg, uint64_t start, uint64_t size)
{
char *prefix = arg;
(void) printf("%s [%llu,%llu) length %llu\n",
prefix,
(u_longlong_t)start,
(u_longlong_t)(start + size),
(u_longlong_t)(size));
}
static void
dump_dtl(vdev_t *vd, int indent)
{
spa_t *spa = vd->vdev_spa;
boolean_t required;
const char *name[DTL_TYPES] = { "missing", "partial", "scrub",
"outage" };
char prefix[256];
spa_vdev_state_enter(spa, SCL_NONE);
required = vdev_dtl_required(vd);
(void) spa_vdev_state_exit(spa, NULL, 0);
if (indent == 0)
(void) printf("\nDirty time logs:\n\n");
(void) printf("\t%*s%s [%s]\n", indent, "",
vd->vdev_path ? vd->vdev_path :
vd->vdev_parent ? vd->vdev_ops->vdev_op_type : spa_name(spa),
required ? "DTL-required" : "DTL-expendable");
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_t *rt = vd->vdev_dtl[t];
if (range_tree_space(rt) == 0)
continue;
(void) snprintf(prefix, sizeof (prefix), "\t%*s%s",
indent + 2, "", name[t]);
- mutex_enter(rt->rt_lock);
range_tree_walk(rt, dump_dtl_seg, prefix);
- mutex_exit(rt->rt_lock);
if (dump_opt['d'] > 5 && vd->vdev_children == 0)
dump_spacemap(spa->spa_meta_objset,
vd->vdev_dtl_sm);
}
for (unsigned c = 0; c < vd->vdev_children; c++)
dump_dtl(vd->vdev_child[c], indent + 4);
}
static void
dump_history(spa_t *spa)
{
nvlist_t **events = NULL;
char *buf;
uint64_t resid, len, off = 0;
uint_t num = 0;
int error;
time_t tsec;
struct tm t;
char tbuf[30];
char internalstr[MAXPATHLEN];
if ((buf = malloc(SPA_OLD_MAXBLOCKSIZE)) == NULL) {
(void) fprintf(stderr, "%s: unable to allocate I/O buffer\n",
__func__);
return;
}
do {
len = SPA_OLD_MAXBLOCKSIZE;
if ((error = spa_history_get(spa, &off, &len, buf)) != 0) {
(void) fprintf(stderr, "Unable to read history: "
"error %d\n", error);
free(buf);
return;
}
if (zpool_history_unpack(buf, len, &resid, &events, &num) != 0)
break;
off -= resid;
} while (len != 0);
(void) printf("\nHistory:\n");
for (unsigned i = 0; i < num; i++) {
uint64_t time, txg, ievent;
char *cmd, *intstr;
boolean_t printed = B_FALSE;
if (nvlist_lookup_uint64(events[i], ZPOOL_HIST_TIME,
&time) != 0)
goto next;
if (nvlist_lookup_string(events[i], ZPOOL_HIST_CMD,
&cmd) != 0) {
if (nvlist_lookup_uint64(events[i],
ZPOOL_HIST_INT_EVENT, &ievent) != 0)
goto next;
verify(nvlist_lookup_uint64(events[i],
ZPOOL_HIST_TXG, &txg) == 0);
verify(nvlist_lookup_string(events[i],
ZPOOL_HIST_INT_STR, &intstr) == 0);
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS)
goto next;
(void) snprintf(internalstr,
sizeof (internalstr),
"[internal %s txg:%lld] %s",
zfs_history_event_names[ievent],
(longlong_t)txg, intstr);
cmd = internalstr;
}
tsec = time;
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
(void) printf("%s %s\n", tbuf, cmd);
printed = B_TRUE;
next:
if (dump_opt['h'] > 1) {
if (!printed)
(void) printf("unrecognized record:\n");
dump_nvlist(events[i], 2);
}
}
free(buf);
}
/*ARGSUSED*/
static void
dump_dnode(objset_t *os, uint64_t object, void *data, size_t size)
{
}
static uint64_t
blkid2offset(const dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb)
{
if (dnp == NULL) {
ASSERT(zb->zb_level < 0);
if (zb->zb_object == 0)
return (zb->zb_blkid);
return (zb->zb_blkid * BP_GET_LSIZE(bp));
}
ASSERT(zb->zb_level >= 0);
return ((zb->zb_blkid <<
(zb->zb_level * (dnp->dn_indblkshift - SPA_BLKPTRSHIFT))) *
dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
}
static void
snprintf_blkptr_compact(char *blkbuf, size_t buflen, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = dump_opt['d'] > 5 ? BP_GET_NDVAS(bp) : 1;
int i;
if (dump_opt['b'] >= 6) {
snprintf_blkptr(blkbuf, buflen, bp);
return;
}
if (BP_IS_EMBEDDED(bp)) {
(void) sprintf(blkbuf,
"EMBEDDED et=%u %llxL/%llxP B=%llu",
(int)BPE_GET_ETYPE(bp),
(u_longlong_t)BPE_GET_LSIZE(bp),
(u_longlong_t)BPE_GET_PSIZE(bp),
(u_longlong_t)bp->blk_birth);
return;
}
blkbuf[0] = '\0';
for (i = 0; i < ndvas; i++)
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf), "%llu:%llx:%llx ",
(u_longlong_t)DVA_GET_VDEV(&dva[i]),
(u_longlong_t)DVA_GET_OFFSET(&dva[i]),
(u_longlong_t)DVA_GET_ASIZE(&dva[i]));
if (BP_IS_HOLE(bp)) {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL B=%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)bp->blk_birth);
} else {
(void) snprintf(blkbuf + strlen(blkbuf),
buflen - strlen(blkbuf),
"%llxL/%llxP F=%llu B=%llu/%llu",
(u_longlong_t)BP_GET_LSIZE(bp),
(u_longlong_t)BP_GET_PSIZE(bp),
(u_longlong_t)BP_GET_FILL(bp),
(u_longlong_t)bp->blk_birth,
(u_longlong_t)BP_PHYSICAL_BIRTH(bp));
}
}
static void
print_indirect(blkptr_t *bp, const zbookmark_phys_t *zb,
const dnode_phys_t *dnp)
{
char blkbuf[BP_SPRINTF_LEN];
int l;
if (!BP_IS_EMBEDDED(bp)) {
ASSERT3U(BP_GET_TYPE(bp), ==, dnp->dn_type);
ASSERT3U(BP_GET_LEVEL(bp), ==, zb->zb_level);
}
(void) printf("%16llx ", (u_longlong_t)blkid2offset(dnp, bp, zb));
ASSERT(zb->zb_level >= 0);
for (l = dnp->dn_nlevels - 1; l >= -1; l--) {
if (l == zb->zb_level) {
(void) printf("L%llx", (u_longlong_t)zb->zb_level);
} else {
(void) printf(" ");
}
}
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static int
visit_indirect(spa_t *spa, const dnode_phys_t *dnp,
blkptr_t *bp, const zbookmark_phys_t *zb)
{
int err = 0;
if (bp->blk_birth == 0)
return (0);
print_indirect(bp, zb, dnp);
if (BP_GET_LEVEL(bp) > 0 && !BP_IS_HOLE(bp)) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
uint64_t fill = 0;
err = arc_read(NULL, spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_ASYNC_READ, ZIO_FLAG_CANFAIL, &flags, zb);
if (err)
return (err);
ASSERT(buf->b_data);
/* recursively visit blocks below this */
cbp = buf->b_data;
for (i = 0; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
err = visit_indirect(spa, dnp, cbp, &czb);
if (err)
break;
fill += BP_GET_FILL(cbp);
}
if (!err)
ASSERT3U(fill, ==, BP_GET_FILL(bp));
arc_buf_destroy(buf, &buf);
}
return (err);
}
/*ARGSUSED*/
static void
dump_indirect(dnode_t *dn)
{
dnode_phys_t *dnp = dn->dn_phys;
int j;
zbookmark_phys_t czb;
(void) printf("Indirect blocks:\n");
SET_BOOKMARK(&czb, dmu_objset_id(dn->dn_objset),
dn->dn_object, dnp->dn_nlevels - 1, 0);
for (j = 0; j < dnp->dn_nblkptr; j++) {
czb.zb_blkid = j;
(void) visit_indirect(dmu_objset_spa(dn->dn_objset), dnp,
&dnp->dn_blkptr[j], &czb);
}
(void) printf("\n");
}
/*ARGSUSED*/
static void
dump_dsl_dir(objset_t *os, uint64_t object, void *data, size_t size)
{
dsl_dir_phys_t *dd = data;
time_t crtime;
char nice[32];
/* make sure nicenum has enough space */
CTASSERT(sizeof (nice) >= NN_NUMBUF_SZ);
if (dd == NULL)
return;
ASSERT3U(size, >=, sizeof (dsl_dir_phys_t));
crtime = dd->dd_creation_time;
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\thead_dataset_obj = %llu\n",
(u_longlong_t)dd->dd_head_dataset_obj);
(void) printf("\t\tparent_dir_obj = %llu\n",
(u_longlong_t)dd->dd_parent_obj);
(void) printf("\t\torigin_obj = %llu\n",
(u_longlong_t)dd->dd_origin_obj);
(void) printf("\t\tchild_dir_zapobj = %llu\n",
(u_longlong_t)dd->dd_child_dir_zapobj);
zdb_nicenum(dd->dd_used_bytes, nice, sizeof (nice));
(void) printf("\t\tused_bytes = %s\n", nice);
zdb_nicenum(dd->dd_compressed_bytes, nice, sizeof (nice));
(void) printf("\t\tcompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_uncompressed_bytes, nice, sizeof (nice));
(void) printf("\t\tuncompressed_bytes = %s\n", nice);
zdb_nicenum(dd->dd_quota, nice, sizeof (nice));
(void) printf("\t\tquota = %s\n", nice);
zdb_nicenum(dd->dd_reserved, nice, sizeof (nice));
(void) printf("\t\treserved = %s\n", nice);
(void) printf("\t\tprops_zapobj = %llu\n",
(u_longlong_t)dd->dd_props_zapobj);
(void) printf("\t\tdeleg_zapobj = %llu\n",
(u_longlong_t)dd->dd_deleg_zapobj);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)dd->dd_flags);
#define DO(which) \
zdb_nicenum(dd->dd_used_breakdown[DD_USED_ ## which], nice, \
sizeof (nice)); \
(void) printf("\t\tused_breakdown[" #which "] = %s\n", nice)
DO(HEAD);
DO(SNAP);
DO(CHILD);
DO(CHILD_RSRV);
DO(REFRSRV);
#undef DO
}
/*ARGSUSED*/
static void
dump_dsl_dataset(objset_t *os, uint64_t object, void *data, size_t size)
{
dsl_dataset_phys_t *ds = data;
time_t crtime;
char used[32], compressed[32], uncompressed[32], unique[32];
char blkbuf[BP_SPRINTF_LEN];
/* make sure nicenum has enough space */
CTASSERT(sizeof (used) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (compressed) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncompressed) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (unique) >= NN_NUMBUF_SZ);
if (ds == NULL)
return;
ASSERT(size == sizeof (*ds));
crtime = ds->ds_creation_time;
zdb_nicenum(ds->ds_referenced_bytes, used, sizeof (used));
zdb_nicenum(ds->ds_compressed_bytes, compressed, sizeof (compressed));
zdb_nicenum(ds->ds_uncompressed_bytes, uncompressed,
sizeof (uncompressed));
zdb_nicenum(ds->ds_unique_bytes, unique, sizeof (unique));
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ds->ds_bp);
(void) printf("\t\tdir_obj = %llu\n",
(u_longlong_t)ds->ds_dir_obj);
(void) printf("\t\tprev_snap_obj = %llu\n",
(u_longlong_t)ds->ds_prev_snap_obj);
(void) printf("\t\tprev_snap_txg = %llu\n",
(u_longlong_t)ds->ds_prev_snap_txg);
(void) printf("\t\tnext_snap_obj = %llu\n",
(u_longlong_t)ds->ds_next_snap_obj);
(void) printf("\t\tsnapnames_zapobj = %llu\n",
(u_longlong_t)ds->ds_snapnames_zapobj);
(void) printf("\t\tnum_children = %llu\n",
(u_longlong_t)ds->ds_num_children);
(void) printf("\t\tuserrefs_obj = %llu\n",
(u_longlong_t)ds->ds_userrefs_obj);
(void) printf("\t\tcreation_time = %s", ctime(&crtime));
(void) printf("\t\tcreation_txg = %llu\n",
(u_longlong_t)ds->ds_creation_txg);
(void) printf("\t\tdeadlist_obj = %llu\n",
(u_longlong_t)ds->ds_deadlist_obj);
(void) printf("\t\tused_bytes = %s\n", used);
(void) printf("\t\tcompressed_bytes = %s\n", compressed);
(void) printf("\t\tuncompressed_bytes = %s\n", uncompressed);
(void) printf("\t\tunique = %s\n", unique);
(void) printf("\t\tfsid_guid = %llu\n",
(u_longlong_t)ds->ds_fsid_guid);
(void) printf("\t\tguid = %llu\n",
(u_longlong_t)ds->ds_guid);
(void) printf("\t\tflags = %llx\n",
(u_longlong_t)ds->ds_flags);
(void) printf("\t\tnext_clones_obj = %llu\n",
(u_longlong_t)ds->ds_next_clones_obj);
(void) printf("\t\tprops_obj = %llu\n",
(u_longlong_t)ds->ds_props_obj);
(void) printf("\t\tbp = %s\n", blkbuf);
}
/* ARGSUSED */
static int
dump_bptree_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
char blkbuf[BP_SPRINTF_LEN];
if (bp->blk_birth != 0) {
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
}
return (0);
}
static void
dump_bptree(objset_t *os, uint64_t obj, const char *name)
{
char bytes[32];
bptree_phys_t *bt;
dmu_buf_t *db;
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
VERIFY3U(0, ==, dmu_bonus_hold(os, obj, FTAG, &db));
bt = db->db_data;
zdb_nicenum(bt->bt_bytes, bytes, sizeof (bytes));
(void) printf("\n %s: %llu datasets, %s\n",
name, (unsigned long long)(bt->bt_end - bt->bt_begin), bytes);
dmu_buf_rele(db, FTAG);
if (dump_opt['d'] < 5)
return;
(void) printf("\n");
(void) bptree_iterate(os, obj, B_FALSE, dump_bptree_cb, NULL, NULL);
}
/* ARGSUSED */
static int
dump_bpobj_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
char blkbuf[BP_SPRINTF_LEN];
ASSERT(bp->blk_birth != 0);
snprintf_blkptr_compact(blkbuf, sizeof (blkbuf), bp);
(void) printf("\t%s\n", blkbuf);
return (0);
}
static void
dump_full_bpobj(bpobj_t *bpo, const char *name, int indent)
{
char bytes[32];
char comp[32];
char uncomp[32];
uint64_t i;
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
zdb_nicenum(bpo->bpo_phys->bpo_bytes, bytes, sizeof (bytes));
if (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_subobjs != 0) {
zdb_nicenum(bpo->bpo_phys->bpo_comp, comp, sizeof (comp));
zdb_nicenum(bpo->bpo_phys->bpo_uncomp, uncomp, sizeof (uncomp));
(void) printf(" %*s: object %llu, %llu local blkptrs, "
"%llu subobjs in object, %llu, %s (%s/%s comp)\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
(u_longlong_t)bpo->bpo_phys->bpo_num_subobjs,
(u_longlong_t)bpo->bpo_phys->bpo_subobjs,
bytes, comp, uncomp);
for (i = 0; i < bpo->bpo_phys->bpo_num_subobjs; i++) {
uint64_t subobj;
bpobj_t subbpo;
int error;
VERIFY0(dmu_read(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
i * sizeof (subobj), sizeof (subobj), &subobj, 0));
error = bpobj_open(&subbpo, bpo->bpo_os, subobj);
if (error != 0) {
(void) printf("ERROR %u while trying to open "
"subobj id %llu\n",
error, (u_longlong_t)subobj);
continue;
}
dump_full_bpobj(&subbpo, "subobj", indent + 1);
bpobj_close(&subbpo);
}
} else {
(void) printf(" %*s: object %llu, %llu blkptrs, %s\n",
indent * 8, name,
(u_longlong_t)bpo->bpo_object,
(u_longlong_t)bpo->bpo_phys->bpo_num_blkptrs,
bytes);
}
if (dump_opt['d'] < 5)
return;
if (indent == 0) {
(void) bpobj_iterate_nofree(bpo, dump_bpobj_cb, NULL, NULL);
(void) printf("\n");
}
}
static void
dump_deadlist(dsl_deadlist_t *dl)
{
dsl_deadlist_entry_t *dle;
uint64_t unused;
char bytes[32];
char comp[32];
char uncomp[32];
/* make sure nicenum has enough space */
CTASSERT(sizeof (bytes) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (comp) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (uncomp) >= NN_NUMBUF_SZ);
if (dump_opt['d'] < 3)
return;
if (dl->dl_oldfmt) {
dump_full_bpobj(&dl->dl_bpobj, "old-format deadlist", 0);
return;
}
zdb_nicenum(dl->dl_phys->dl_used, bytes, sizeof (bytes));
zdb_nicenum(dl->dl_phys->dl_comp, comp, sizeof (comp));
zdb_nicenum(dl->dl_phys->dl_uncomp, uncomp, sizeof (uncomp));
(void) printf("\n Deadlist: %s (%s/%s comp)\n",
bytes, comp, uncomp);
if (dump_opt['d'] < 4)
return;
(void) printf("\n");
/* force the tree to be loaded */
dsl_deadlist_space_range(dl, 0, UINT64_MAX, &unused, &unused, &unused);
for (dle = avl_first(&dl->dl_tree); dle;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
if (dump_opt['d'] >= 5) {
char buf[128];
(void) snprintf(buf, sizeof (buf),
"mintxg %llu -> obj %llu",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
dump_full_bpobj(&dle->dle_bpobj, buf, 0);
} else {
(void) printf("mintxg %llu -> obj %llu\n",
(longlong_t)dle->dle_mintxg,
(longlong_t)dle->dle_bpobj.bpo_object);
}
}
}
static avl_tree_t idx_tree;
static avl_tree_t domain_tree;
static boolean_t fuid_table_loaded;
static objset_t *sa_os = NULL;
static sa_attr_type_t *sa_attr_table = NULL;
static int
open_objset(const char *path, dmu_objset_type_t type, void *tag, objset_t **osp)
{
int err;
uint64_t sa_attrs = 0;
uint64_t version = 0;
VERIFY3P(sa_os, ==, NULL);
err = dmu_objset_own(path, type, B_TRUE, B_FALSE, tag, osp);
if (err != 0) {
(void) fprintf(stderr, "failed to own dataset '%s': %s\n", path,
strerror(err));
return (err);
}
if (dmu_objset_type(*osp) == DMU_OST_ZFS && !(*osp)->os_encrypted) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZPL_VERSION_STR,
8, 1, &version);
if (version >= ZPL_VERSION_SA) {
(void) zap_lookup(*osp, MASTER_NODE_OBJ, ZFS_SA_ATTRS,
8, 1, &sa_attrs);
}
err = sa_setup(*osp, sa_attrs, zfs_attr_table, ZPL_END,
&sa_attr_table);
if (err != 0) {
(void) fprintf(stderr, "sa_setup failed: %s\n",
strerror(err));
dmu_objset_disown(*osp, B_FALSE, tag);
*osp = NULL;
}
}
sa_os = *osp;
return (0);
}
static void
close_objset(objset_t *os, void *tag)
{
VERIFY3P(os, ==, sa_os);
if (os->os_sa != NULL)
sa_tear_down(os);
dmu_objset_disown(os, B_FALSE, tag);
sa_attr_table = NULL;
sa_os = NULL;
}
static void
fuid_table_destroy(void)
{
if (fuid_table_loaded) {
zfs_fuid_table_destroy(&idx_tree, &domain_tree);
fuid_table_loaded = B_FALSE;
}
}
/*
* print uid or gid information.
* For normal POSIX id just the id is printed in decimal format.
* For CIFS files with FUID the fuid is printed in hex followed by
* the domain-rid string.
*/
static void
print_idstr(uint64_t id, const char *id_type)
{
if (FUID_INDEX(id)) {
char *domain;
domain = zfs_fuid_idx_domain(&idx_tree, FUID_INDEX(id));
(void) printf("\t%s %llx [%s-%d]\n", id_type,
(u_longlong_t)id, domain, (int)FUID_RID(id));
} else {
(void) printf("\t%s %llu\n", id_type, (u_longlong_t)id);
}
}
static void
dump_uidgid(objset_t *os, uint64_t uid, uint64_t gid)
{
uint32_t uid_idx, gid_idx;
uid_idx = FUID_INDEX(uid);
gid_idx = FUID_INDEX(gid);
/* Load domain table, if not already loaded */
if (!fuid_table_loaded && (uid_idx || gid_idx)) {
uint64_t fuid_obj;
/* first find the fuid object. It lives in the master node */
VERIFY(zap_lookup(os, MASTER_NODE_OBJ, ZFS_FUID_TABLES,
8, 1, &fuid_obj) == 0);
zfs_fuid_avl_tree_create(&idx_tree, &domain_tree);
(void) zfs_fuid_table_load(os, fuid_obj,
&idx_tree, &domain_tree);
fuid_table_loaded = B_TRUE;
}
print_idstr(uid, "uid");
print_idstr(gid, "gid");
}
static void
dump_znode_sa_xattr(sa_handle_t *hdl)
{
nvlist_t *sa_xattr;
nvpair_t *elem = NULL;
int sa_xattr_size = 0;
int sa_xattr_entries = 0;
int error;
char *sa_xattr_packed;
error = sa_size(hdl, sa_attr_table[ZPL_DXATTR], &sa_xattr_size);
if (error || sa_xattr_size == 0)
return;
sa_xattr_packed = malloc(sa_xattr_size);
if (sa_xattr_packed == NULL)
return;
error = sa_lookup(hdl, sa_attr_table[ZPL_DXATTR],
sa_xattr_packed, sa_xattr_size);
if (error) {
free(sa_xattr_packed);
return;
}
error = nvlist_unpack(sa_xattr_packed, sa_xattr_size, &sa_xattr, 0);
if (error) {
free(sa_xattr_packed);
return;
}
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL)
sa_xattr_entries++;
(void) printf("\tSA xattrs: %d bytes, %d entries\n\n",
sa_xattr_size, sa_xattr_entries);
while ((elem = nvlist_next_nvpair(sa_xattr, elem)) != NULL) {
uchar_t *value;
uint_t cnt, idx;
(void) printf("\t\t%s = ", nvpair_name(elem));
nvpair_value_byte_array(elem, &value, &cnt);
for (idx = 0; idx < cnt; ++idx) {
if (isprint(value[idx]))
(void) putchar(value[idx]);
else
(void) printf("\\%3.3o", value[idx]);
}
(void) putchar('\n');
}
nvlist_free(sa_xattr);
free(sa_xattr_packed);
}
/*ARGSUSED*/
static void
dump_znode(objset_t *os, uint64_t object, void *data, size_t size)
{
char path[MAXPATHLEN * 2]; /* allow for xattr and failure prefix */
sa_handle_t *hdl;
uint64_t xattr, rdev, gen;
uint64_t uid, gid, mode, fsize, parent, links;
uint64_t pflags;
uint64_t acctm[2], modtm[2], chgtm[2], crtm[2];
time_t z_crtime, z_atime, z_mtime, z_ctime;
sa_bulk_attr_t bulk[12];
int idx = 0;
int error;
VERIFY3P(os, ==, sa_os);
if (sa_handle_get(os, object, NULL, SA_HDL_PRIVATE, &hdl)) {
(void) printf("Failed to get handle for SA znode\n");
return;
}
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_UID], NULL, &uid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GID], NULL, &gid, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_LINKS], NULL,
&links, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_GEN], NULL, &gen, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MODE], NULL,
&mode, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_PARENT],
NULL, &parent, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_SIZE], NULL,
&fsize, 8);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_ATIME], NULL,
acctm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_MTIME], NULL,
modtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CRTIME], NULL,
crtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_CTIME], NULL,
chgtm, 16);
SA_ADD_BULK_ATTR(bulk, idx, sa_attr_table[ZPL_FLAGS], NULL,
&pflags, 8);
if (sa_bulk_lookup(hdl, bulk, idx)) {
(void) sa_handle_destroy(hdl);
return;
}
z_crtime = (time_t)crtm[0];
z_atime = (time_t)acctm[0];
z_mtime = (time_t)modtm[0];
z_ctime = (time_t)chgtm[0];
if (dump_opt['d'] > 4) {
error = zfs_obj_to_path(os, object, path, sizeof (path));
if (error != 0) {
(void) snprintf(path, sizeof (path),
"\?\?\?<object#%llu>", (u_longlong_t)object);
}
(void) printf("\tpath %s\n", path);
}
dump_uidgid(os, uid, gid);
(void) printf("\tatime %s", ctime(&z_atime));
(void) printf("\tmtime %s", ctime(&z_mtime));
(void) printf("\tctime %s", ctime(&z_ctime));
(void) printf("\tcrtime %s", ctime(&z_crtime));
(void) printf("\tgen %llu\n", (u_longlong_t)gen);
(void) printf("\tmode %llo\n", (u_longlong_t)mode);
(void) printf("\tsize %llu\n", (u_longlong_t)fsize);
(void) printf("\tparent %llu\n", (u_longlong_t)parent);
(void) printf("\tlinks %llu\n", (u_longlong_t)links);
(void) printf("\tpflags %llx\n", (u_longlong_t)pflags);
if (dmu_objset_projectquota_enabled(os) && (pflags & ZFS_PROJID)) {
uint64_t projid;
if (sa_lookup(hdl, sa_attr_table[ZPL_PROJID], &projid,
sizeof (uint64_t)) == 0)
(void) printf("\tprojid %llu\n", (u_longlong_t)projid);
}
if (sa_lookup(hdl, sa_attr_table[ZPL_XATTR], &xattr,
sizeof (uint64_t)) == 0)
(void) printf("\txattr %llu\n", (u_longlong_t)xattr);
if (sa_lookup(hdl, sa_attr_table[ZPL_RDEV], &rdev,
sizeof (uint64_t)) == 0)
(void) printf("\trdev 0x%016llx\n", (u_longlong_t)rdev);
dump_znode_sa_xattr(hdl);
sa_handle_destroy(hdl);
}
/*ARGSUSED*/
static void
dump_acl(objset_t *os, uint64_t object, void *data, size_t size)
{
}
/*ARGSUSED*/
static void
dump_dmu_objset(objset_t *os, uint64_t object, void *data, size_t size)
{
}
static object_viewer_t *object_viewer[DMU_OT_NUMTYPES + 1] = {
dump_none, /* unallocated */
dump_zap, /* object directory */
dump_uint64, /* object array */
dump_none, /* packed nvlist */
dump_packed_nvlist, /* packed nvlist size */
dump_none, /* bpobj */
dump_bpobj, /* bpobj header */
dump_none, /* SPA space map header */
dump_none, /* SPA space map */
dump_none, /* ZIL intent log */
dump_dnode, /* DMU dnode */
dump_dmu_objset, /* DMU objset */
dump_dsl_dir, /* DSL directory */
dump_zap, /* DSL directory child map */
dump_zap, /* DSL dataset snap map */
dump_zap, /* DSL props */
dump_dsl_dataset, /* DSL dataset */
dump_znode, /* ZFS znode */
dump_acl, /* ZFS V0 ACL */
dump_uint8, /* ZFS plain file */
dump_zpldir, /* ZFS directory */
dump_zap, /* ZFS master node */
dump_zap, /* ZFS delete queue */
dump_uint8, /* zvol object */
dump_zap, /* zvol prop */
dump_uint8, /* other uint8[] */
dump_uint64, /* other uint64[] */
dump_zap, /* other ZAP */
dump_zap, /* persistent error log */
dump_uint8, /* SPA history */
dump_history_offsets, /* SPA history offsets */
dump_zap, /* Pool properties */
dump_zap, /* DSL permissions */
dump_acl, /* ZFS ACL */
dump_uint8, /* ZFS SYSACL */
dump_none, /* FUID nvlist */
dump_packed_nvlist, /* FUID nvlist size */
dump_zap, /* DSL dataset next clones */
dump_zap, /* DSL scrub queue */
dump_zap, /* ZFS user/group/project used */
dump_zap, /* ZFS user/group/project quota */
dump_zap, /* snapshot refcount tags */
dump_ddt_zap, /* DDT ZAP object */
dump_zap, /* DDT statistics */
dump_znode, /* SA object */
dump_zap, /* SA Master Node */
dump_sa_attrs, /* SA attribute registration */
dump_sa_layouts, /* SA attribute layouts */
dump_zap, /* DSL scrub translations */
dump_none, /* fake dedup BP */
dump_zap, /* deadlist */
dump_none, /* deadlist hdr */
dump_zap, /* dsl clones */
dump_bpobj_subobjs, /* bpobj subobjs */
dump_unknown, /* Unknown type, must be last */
};
static void
dump_object(objset_t *os, uint64_t object, int verbosity, int *print_header,
uint64_t *dnode_slots_used)
{
dmu_buf_t *db = NULL;
dmu_object_info_t doi;
dnode_t *dn;
boolean_t dnode_held = B_FALSE;
void *bonus = NULL;
size_t bsize = 0;
char iblk[32], dblk[32], lsize[32], asize[32], fill[32], dnsize[32];
char bonus_size[32];
char aux[50];
int error;
/* make sure nicenum has enough space */
CTASSERT(sizeof (iblk) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (dblk) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (bonus_size) >= NN_NUMBUF_SZ);
if (*print_header) {
(void) printf("\n%10s %3s %5s %5s %5s %6s %5s %6s %s\n",
"Object", "lvl", "iblk", "dblk", "dsize", "dnsize",
"lsize", "%full", "type");
*print_header = 0;
}
if (object == 0) {
dn = DMU_META_DNODE(os);
dmu_object_info_from_dnode(dn, &doi);
} else {
/*
* Encrypted datasets will have sensitive bonus buffers
* encrypted. Therefore we cannot hold the bonus buffer and
* must hold the dnode itself instead.
*/
error = dmu_object_info(os, object, &doi);
if (error)
fatal("dmu_object_info() failed, errno %u", error);
if (os->os_encrypted &&
DMU_OT_IS_ENCRYPTED(doi.doi_bonus_type)) {
error = dnode_hold(os, object, FTAG, &dn);
if (error)
fatal("dnode_hold() failed, errno %u", error);
dnode_held = B_TRUE;
} else {
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error)
fatal("dmu_bonus_hold(%llu) failed, errno %u",
object, error);
bonus = db->db_data;
bsize = db->db_size;
dn = DB_DNODE((dmu_buf_impl_t *)db);
}
}
if (dnode_slots_used)
*dnode_slots_used = doi.doi_dnodesize / DNODE_MIN_SIZE;
zdb_nicenum(doi.doi_metadata_block_size, iblk, sizeof (iblk));
zdb_nicenum(doi.doi_data_block_size, dblk, sizeof (dblk));
zdb_nicenum(doi.doi_max_offset, lsize, sizeof (lsize));
zdb_nicenum(doi.doi_physical_blocks_512 << 9, asize, sizeof (asize));
zdb_nicenum(doi.doi_bonus_size, bonus_size, sizeof (bonus_size));
zdb_nicenum(doi.doi_dnodesize, dnsize, sizeof (dnsize));
(void) sprintf(fill, "%6.2f", 100.0 * doi.doi_fill_count *
doi.doi_data_block_size / (object == 0 ? DNODES_PER_BLOCK : 1) /
doi.doi_max_offset);
aux[0] = '\0';
if (doi.doi_checksum != ZIO_CHECKSUM_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (K=%s)", ZDB_CHECKSUM_NAME(doi.doi_checksum));
}
if (doi.doi_compress != ZIO_COMPRESS_INHERIT || verbosity >= 6) {
(void) snprintf(aux + strlen(aux), sizeof (aux) - strlen(aux),
" (Z=%s)", ZDB_COMPRESS_NAME(doi.doi_compress));
}
(void) printf("%10lld %3u %5s %5s %5s %6s %5s %6s %s%s\n",
(u_longlong_t)object, doi.doi_indirection, iblk, dblk,
asize, dnsize, lsize, fill, zdb_ot_name(doi.doi_type), aux);
if (doi.doi_bonus_type != DMU_OT_NONE && verbosity > 3) {
(void) printf("%10s %3s %5s %5s %5s %5s %5s %6s %s\n",
"", "", "", "", "", "", bonus_size, "bonus",
zdb_ot_name(doi.doi_bonus_type));
}
if (verbosity >= 4) {
(void) printf("\tdnode flags: %s%s%s%s\n",
(dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) ?
"USED_BYTES " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED) ?
"USERUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) ?
"USEROBJUSED_ACCOUNTED " : "",
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ?
"SPILL_BLKPTR" : "");
(void) printf("\tdnode maxblkid: %llu\n",
(longlong_t)dn->dn_phys->dn_maxblkid);
if (!dnode_held) {
object_viewer[ZDB_OT_TYPE(doi.doi_bonus_type)](os,
object, bonus, bsize);
} else {
(void) printf("\t\t(bonus encrypted)\n");
}
if (!os->os_encrypted || !DMU_OT_IS_ENCRYPTED(doi.doi_type)) {
object_viewer[ZDB_OT_TYPE(doi.doi_type)](os, object,
NULL, 0);
} else {
(void) printf("\t\t(object encrypted)\n");
}
*print_header = 1;
}
if (verbosity >= 5)
dump_indirect(dn);
if (verbosity >= 5) {
/*
* Report the list of segments that comprise the object.
*/
uint64_t start = 0;
uint64_t end;
uint64_t blkfill = 1;
int minlvl = 1;
if (dn->dn_type == DMU_OT_DNODE) {
minlvl = 0;
blkfill = DNODES_PER_BLOCK;
}
for (;;) {
char segsize[32];
/* make sure nicenum has enough space */
CTASSERT(sizeof (segsize) >= NN_NUMBUF_SZ);
error = dnode_next_offset(dn,
0, &start, minlvl, blkfill, 0);
if (error)
break;
end = start;
error = dnode_next_offset(dn,
DNODE_FIND_HOLE, &end, minlvl, blkfill, 0);
zdb_nicenum(end - start, segsize, sizeof (segsize));
(void) printf("\t\tsegment [%016llx, %016llx)"
" size %5s\n", (u_longlong_t)start,
(u_longlong_t)end, segsize);
if (error)
break;
start = end;
}
}
if (db != NULL)
dmu_buf_rele(db, FTAG);
if (dnode_held)
dnode_rele(dn, FTAG);
}
static const char *objset_types[DMU_OST_NUMTYPES] = {
"NONE", "META", "ZPL", "ZVOL", "OTHER", "ANY" };
static void
dump_dir(objset_t *os)
{
dmu_objset_stats_t dds;
uint64_t object, object_count;
uint64_t refdbytes, usedobjs, scratch;
char numbuf[32];
char blkbuf[BP_SPRINTF_LEN + 20];
char osname[ZFS_MAX_DATASET_NAME_LEN];
const char *type = "UNKNOWN";
int verbosity = dump_opt['d'];
int print_header = 1;
unsigned i;
int error;
uint64_t total_slots_used = 0;
uint64_t max_slot_used = 0;
uint64_t dnode_slots;
/* make sure nicenum has enough space */
CTASSERT(sizeof (numbuf) >= NN_NUMBUF_SZ);
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (dds.dds_type < DMU_OST_NUMTYPES)
type = objset_types[dds.dds_type];
if (dds.dds_type == DMU_OST_META) {
dds.dds_creation_txg = TXG_INITIAL;
usedobjs = BP_GET_FILL(os->os_rootbp);
refdbytes = dsl_dir_phys(os->os_spa->spa_dsl_pool->dp_mos_dir)->
dd_used_bytes;
} else {
dmu_objset_space(os, &refdbytes, &scratch, &usedobjs, &scratch);
}
ASSERT3U(usedobjs, ==, BP_GET_FILL(os->os_rootbp));
zdb_nicenum(refdbytes, numbuf, sizeof (numbuf));
if (verbosity >= 4) {
(void) snprintf(blkbuf, sizeof (blkbuf), ", rootbp ");
(void) snprintf_blkptr(blkbuf + strlen(blkbuf),
sizeof (blkbuf) - strlen(blkbuf), os->os_rootbp);
} else {
blkbuf[0] = '\0';
}
dmu_objset_name(os, osname);
(void) printf("Dataset %s [%s], ID %llu, cr_txg %llu, "
"%s, %llu objects%s\n",
osname, type, (u_longlong_t)dmu_objset_id(os),
(u_longlong_t)dds.dds_creation_txg,
numbuf, (u_longlong_t)usedobjs, blkbuf);
if (zopt_objects != 0) {
for (i = 0; i < zopt_objects; i++)
dump_object(os, zopt_object[i], verbosity,
&print_header, NULL);
(void) printf("\n");
return;
}
if (dump_opt['i'] != 0 || verbosity >= 2)
dump_intent_log(dmu_objset_zil(os));
- if (dmu_objset_ds(os) != NULL)
- dump_deadlist(&dmu_objset_ds(os)->ds_deadlist);
+ if (dmu_objset_ds(os) != NULL) {
+ dsl_dataset_t *ds = dmu_objset_ds(os);
+ dump_deadlist(&ds->ds_deadlist);
+
+ if (dsl_dataset_remap_deadlist_exists(ds)) {
+ (void) printf("ds_remap_deadlist:\n");
+ dump_deadlist(&ds->ds_remap_deadlist);
+ }
+ }
if (verbosity < 2)
return;
if (BP_IS_HOLE(os->os_rootbp))
return;
dump_object(os, 0, verbosity, &print_header, NULL);
object_count = 0;
if (DMU_USERUSED_DNODE(os) != NULL &&
DMU_USERUSED_DNODE(os)->dn_type != 0) {
dump_object(os, DMU_USERUSED_OBJECT, verbosity, &print_header,
NULL);
dump_object(os, DMU_GROUPUSED_OBJECT, verbosity, &print_header,
NULL);
}
if (DMU_PROJECTUSED_DNODE(os) != NULL &&
DMU_PROJECTUSED_DNODE(os)->dn_type != 0)
dump_object(os, DMU_PROJECTUSED_OBJECT, verbosity,
&print_header, NULL);
object = 0;
while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
dump_object(os, object, verbosity, &print_header, &dnode_slots);
object_count++;
total_slots_used += dnode_slots;
max_slot_used = object + dnode_slots - 1;
}
(void) printf("\n");
(void) printf(" Dnode slots:\n");
(void) printf("\tTotal used: %10llu\n",
(u_longlong_t)total_slots_used);
(void) printf("\tMax used: %10llu\n",
(u_longlong_t)max_slot_used);
(void) printf("\tPercent empty: %10lf\n",
(double)(max_slot_used - total_slots_used)*100 /
(double)max_slot_used);
(void) printf("\n");
if (error != ESRCH) {
(void) fprintf(stderr, "dmu_object_next() = %d\n", error);
abort();
}
ASSERT3U(object_count, ==, usedobjs);
}
static void
dump_uberblock(uberblock_t *ub, const char *header, const char *footer)
{
time_t timestamp = ub->ub_timestamp;
(void) printf("%s", header ? header : "");
(void) printf("\tmagic = %016llx\n", (u_longlong_t)ub->ub_magic);
(void) printf("\tversion = %llu\n", (u_longlong_t)ub->ub_version);
(void) printf("\ttxg = %llu\n", (u_longlong_t)ub->ub_txg);
(void) printf("\tguid_sum = %llu\n", (u_longlong_t)ub->ub_guid_sum);
(void) printf("\ttimestamp = %llu UTC = %s",
(u_longlong_t)ub->ub_timestamp, asctime(localtime(&timestamp)));
(void) printf("\tmmp_magic = %016llx\n",
(u_longlong_t)ub->ub_mmp_magic);
if (ub->ub_mmp_magic == MMP_MAGIC)
(void) printf("\tmmp_delay = %0llu\n",
(u_longlong_t)ub->ub_mmp_delay);
if (dump_opt['u'] >= 4) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), &ub->ub_rootbp);
(void) printf("\trootbp = %s\n", blkbuf);
}
(void) printf("%s", footer ? footer : "");
}
static void
dump_config(spa_t *spa)
{
dmu_buf_t *db;
size_t nvsize = 0;
int error = 0;
error = dmu_bonus_hold(spa->spa_meta_objset,
spa->spa_config_object, FTAG, &db);
if (error == 0) {
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
(void) printf("\nMOS Configuration:\n");
dump_packed_nvlist(spa->spa_meta_objset,
spa->spa_config_object, (void *)&nvsize, 1);
} else {
(void) fprintf(stderr, "dmu_bonus_hold(%llu) failed, errno %d",
(u_longlong_t)spa->spa_config_object, error);
}
}
static void
dump_cachefile(const char *cachefile)
{
int fd;
struct stat64 statbuf;
char *buf;
nvlist_t *config;
if ((fd = open64(cachefile, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if (fstat64(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", cachefile,
strerror(errno));
exit(1);
}
if ((buf = malloc(statbuf.st_size)) == NULL) {
(void) fprintf(stderr, "failed to allocate %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
if (read(fd, buf, statbuf.st_size) != statbuf.st_size) {
(void) fprintf(stderr, "failed to read %llu bytes\n",
(u_longlong_t)statbuf.st_size);
exit(1);
}
(void) close(fd);
if (nvlist_unpack(buf, statbuf.st_size, &config, 0) != 0) {
(void) fprintf(stderr, "failed to unpack nvlist\n");
exit(1);
}
free(buf);
dump_nvlist(config, 0);
nvlist_free(config);
}
/*
* ZFS label nvlist stats
*/
typedef struct zdb_nvl_stats {
int zns_list_count;
int zns_leaf_count;
size_t zns_leaf_largest;
size_t zns_leaf_total;
nvlist_t *zns_string;
nvlist_t *zns_uint64;
nvlist_t *zns_boolean;
} zdb_nvl_stats_t;
static void
collect_nvlist_stats(nvlist_t *nvl, zdb_nvl_stats_t *stats)
{
nvlist_t *list, **array;
nvpair_t *nvp = NULL;
char *name;
uint_t i, items;
stats->zns_list_count++;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
name = nvpair_name(nvp);
switch (nvpair_type(nvp)) {
case DATA_TYPE_STRING:
fnvlist_add_string(stats->zns_string, name,
fnvpair_value_string(nvp));
break;
case DATA_TYPE_UINT64:
fnvlist_add_uint64(stats->zns_uint64, name,
fnvpair_value_uint64(nvp));
break;
case DATA_TYPE_BOOLEAN:
fnvlist_add_boolean(stats->zns_boolean, name);
break;
case DATA_TYPE_NVLIST:
if (nvpair_value_nvlist(nvp, &list) == 0)
collect_nvlist_stats(list, stats);
break;
case DATA_TYPE_NVLIST_ARRAY:
if (nvpair_value_nvlist_array(nvp, &array, &items) != 0)
break;
for (i = 0; i < items; i++) {
collect_nvlist_stats(array[i], stats);
/* collect stats on leaf vdev */
if (strcmp(name, "children") == 0) {
size_t size;
(void) nvlist_size(array[i], &size,
NV_ENCODE_XDR);
stats->zns_leaf_total += size;
if (size > stats->zns_leaf_largest)
stats->zns_leaf_largest = size;
stats->zns_leaf_count++;
}
}
break;
default:
(void) printf("skip type %d!\n", (int)nvpair_type(nvp));
}
}
}
static void
dump_nvlist_stats(nvlist_t *nvl, size_t cap)
{
zdb_nvl_stats_t stats = { 0 };
size_t size, sum = 0, total;
size_t noise;
/* requires nvlist with non-unique names for stat collection */
VERIFY0(nvlist_alloc(&stats.zns_string, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_uint64, 0, 0));
VERIFY0(nvlist_alloc(&stats.zns_boolean, 0, 0));
VERIFY0(nvlist_size(stats.zns_boolean, &noise, NV_ENCODE_XDR));
(void) printf("\n\nZFS Label NVList Config Stats:\n");
VERIFY0(nvlist_size(nvl, &total, NV_ENCODE_XDR));
(void) printf(" %d bytes used, %d bytes free (using %4.1f%%)\n\n",
(int)total, (int)(cap - total), 100.0 * total / cap);
collect_nvlist_stats(nvl, &stats);
VERIFY0(nvlist_size(stats.zns_uint64, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "integers:",
(int)fnvlist_num_pairs(stats.zns_uint64),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_string, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "strings:",
(int)fnvlist_num_pairs(stats.zns_string),
(int)size, 100.0 * size / total);
VERIFY0(nvlist_size(stats.zns_boolean, &size, NV_ENCODE_XDR));
size -= noise;
sum += size;
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n", "booleans:",
(int)fnvlist_num_pairs(stats.zns_boolean),
(int)size, 100.0 * size / total);
size = total - sum; /* treat remainder as nvlist overhead */
(void) printf("%12s %4d %6d bytes (%5.2f%%)\n\n", "nvlists:",
stats.zns_list_count, (int)size, 100.0 * size / total);
if (stats.zns_leaf_count > 0) {
size_t average = stats.zns_leaf_total / stats.zns_leaf_count;
(void) printf("%12s %4d %6d bytes average\n", "leaf vdevs:",
stats.zns_leaf_count, (int)average);
(void) printf("%24d bytes largest\n",
(int)stats.zns_leaf_largest);
if (dump_opt['l'] >= 3 && average > 0)
(void) printf(" space for %d additional leaf vdevs\n",
(int)((cap - total) / average));
}
(void) printf("\n");
nvlist_free(stats.zns_string);
nvlist_free(stats.zns_uint64);
nvlist_free(stats.zns_boolean);
}
typedef struct cksum_record {
zio_cksum_t cksum;
boolean_t labels[VDEV_LABELS];
avl_node_t link;
} cksum_record_t;
static int
cksum_record_compare(const void *x1, const void *x2)
{
const cksum_record_t *l = (cksum_record_t *)x1;
const cksum_record_t *r = (cksum_record_t *)x2;
int arraysize = ARRAY_SIZE(l->cksum.zc_word);
int difference;
for (int i = 0; i < arraysize; i++) {
difference = AVL_CMP(l->cksum.zc_word[i], r->cksum.zc_word[i]);
if (difference)
break;
}
return (difference);
}
static cksum_record_t *
cksum_record_alloc(zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = umem_zalloc(sizeof (*rec), UMEM_NOFAIL);
rec->cksum = *cksum;
rec->labels[l] = B_TRUE;
return (rec);
}
static cksum_record_t *
cksum_record_lookup(avl_tree_t *tree, zio_cksum_t *cksum)
{
cksum_record_t lookup = { .cksum = *cksum };
avl_index_t where;
return (avl_find(tree, &lookup, &where));
}
static cksum_record_t *
cksum_record_insert(avl_tree_t *tree, zio_cksum_t *cksum, int l)
{
cksum_record_t *rec;
rec = cksum_record_lookup(tree, cksum);
if (rec) {
rec->labels[l] = B_TRUE;
} else {
rec = cksum_record_alloc(cksum, l);
avl_add(tree, rec);
}
return (rec);
}
static int
first_label(cksum_record_t *rec)
{
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i])
return (i);
return (-1);
}
static void
print_label_numbers(char *prefix, cksum_record_t *rec)
{
printf("%s", prefix);
for (int i = 0; i < VDEV_LABELS; i++)
if (rec->labels[i] == B_TRUE)
printf("%d ", i);
printf("\n");
}
#define MAX_UBERBLOCK_COUNT (VDEV_UBERBLOCK_RING >> UBERBLOCK_SHIFT)
typedef struct label {
vdev_label_t label;
nvlist_t *config_nv;
cksum_record_t *config;
cksum_record_t *uberblocks[MAX_UBERBLOCK_COUNT];
boolean_t header_printed;
boolean_t read_failed;
} label_t;
static void
print_label_header(label_t *label, int l)
{
if (dump_opt['q'])
return;
if (label->header_printed == B_TRUE)
return;
(void) printf("------------------------------------\n");
(void) printf("LABEL %d\n", l);
(void) printf("------------------------------------\n");
label->header_printed = B_TRUE;
}
static void
dump_config_from_label(label_t *label, size_t buflen, int l)
{
if (dump_opt['q'])
return;
if ((dump_opt['l'] < 3) && (first_label(label->config) != l))
return;
print_label_header(label, l);
dump_nvlist(label->config_nv, 4);
print_label_numbers(" labels = ", label->config);
if (dump_opt['l'] >= 2)
dump_nvlist_stats(label->config_nv, buflen);
}
#define ZDB_MAX_UB_HEADER_SIZE 32
static void
dump_label_uberblocks(label_t *label, uint64_t ashift, int label_num)
{
vdev_t vd;
char header[ZDB_MAX_UB_HEADER_SIZE];
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)&label->label + uoff);
cksum_record_t *rec = label->uberblocks[i];
if (rec == NULL) {
if (dump_opt['u'] >= 2) {
print_label_header(label, label_num);
(void) printf(" Uberblock[%d] invalid\n", i);
}
continue;
}
if ((dump_opt['u'] < 3) && (first_label(rec) != label_num))
continue;
if ((dump_opt['u'] < 4) &&
(ub->ub_mmp_magic == MMP_MAGIC) && ub->ub_mmp_delay &&
(i >= VDEV_UBERBLOCK_COUNT(&vd) - MMP_BLOCKS_PER_LABEL))
continue;
print_label_header(label, label_num);
(void) snprintf(header, ZDB_MAX_UB_HEADER_SIZE,
" Uberblock[%d]\n", i);
dump_uberblock(ub, header, "");
print_label_numbers(" labels = ", rec);
}
}
static char curpath[PATH_MAX];
/*
* Iterate through the path components, recursively passing
* current one's obj and remaining path until we find the obj
* for the last one.
*/
static int
dump_path_impl(objset_t *os, uint64_t obj, char *name)
{
int err;
int header = 1;
uint64_t child_obj;
char *s;
dmu_buf_t *db;
dmu_object_info_t doi;
if ((s = strchr(name, '/')) != NULL)
*s = '\0';
err = zap_lookup(os, obj, name, 8, 1, &child_obj);
(void) strlcat(curpath, name, sizeof (curpath));
if (err != 0) {
(void) fprintf(stderr, "failed to lookup %s: %s\n",
curpath, strerror(err));
return (err);
}
child_obj = ZFS_DIRENT_OBJ(child_obj);
err = sa_buf_hold(os, child_obj, FTAG, &db);
if (err != 0) {
(void) fprintf(stderr,
"failed to get SA dbuf for obj %llu: %s\n",
(u_longlong_t)child_obj, strerror(err));
return (EINVAL);
}
dmu_object_info_from_db(db, &doi);
sa_buf_rele(db, FTAG);
if (doi.doi_bonus_type != DMU_OT_SA &&
doi.doi_bonus_type != DMU_OT_ZNODE) {
(void) fprintf(stderr, "invalid bonus type %d for obj %llu\n",
doi.doi_bonus_type, (u_longlong_t)child_obj);
return (EINVAL);
}
if (dump_opt['v'] > 6) {
(void) printf("obj=%llu %s type=%d bonustype=%d\n",
(u_longlong_t)child_obj, curpath, doi.doi_type,
doi.doi_bonus_type);
}
(void) strlcat(curpath, "/", sizeof (curpath));
switch (doi.doi_type) {
case DMU_OT_DIRECTORY_CONTENTS:
if (s != NULL && *(s + 1) != '\0')
return (dump_path_impl(os, child_obj, s + 1));
/*FALLTHROUGH*/
case DMU_OT_PLAIN_FILE_CONTENTS:
dump_object(os, child_obj, dump_opt['v'], &header, NULL);
return (0);
default:
(void) fprintf(stderr, "object %llu has non-file/directory "
"type %d\n", (u_longlong_t)obj, doi.doi_type);
break;
}
return (EINVAL);
}
/*
* Dump the blocks for the object specified by path inside the dataset.
*/
static int
dump_path(char *ds, char *path)
{
int err;
objset_t *os;
uint64_t root_obj;
err = open_objset(ds, DMU_OST_ZFS, FTAG, &os);
if (err != 0)
return (err);
err = zap_lookup(os, MASTER_NODE_OBJ, ZFS_ROOT_OBJ, 8, 1, &root_obj);
if (err != 0) {
(void) fprintf(stderr, "can't lookup root znode: %s\n",
strerror(err));
dmu_objset_disown(os, B_FALSE, FTAG);
return (EINVAL);
}
(void) snprintf(curpath, sizeof (curpath), "dataset=%s path=/", ds);
err = dump_path_impl(os, root_obj, path);
close_objset(os, FTAG);
return (err);
}
static int
dump_label(const char *dev)
{
char path[MAXPATHLEN];
label_t labels[VDEV_LABELS];
uint64_t psize, ashift;
struct stat64 statbuf;
boolean_t config_found = B_FALSE;
boolean_t error = B_FALSE;
avl_tree_t config_tree;
avl_tree_t uberblock_tree;
void *node, *cookie;
int fd;
bzero(labels, sizeof (labels));
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(path, dev, sizeof (path));
if (dev[0] != '/' && stat64(path, &statbuf) != 0) {
int error;
error = zfs_resolve_shortname(dev, path, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(path)) {
if (zfs_append_partition(path, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat64(path, &statbuf) != 0)) {
(void) printf("failed to find device %s, try "
"specifying absolute path instead\n", dev);
return (1);
}
}
if ((fd = open64(path, O_RDONLY)) < 0) {
(void) printf("cannot open '%s': %s\n", path, strerror(errno));
exit(1);
}
if (fstat64_blk(fd, &statbuf) != 0) {
(void) printf("failed to stat '%s': %s\n", path,
strerror(errno));
(void) close(fd);
exit(1);
}
if (S_ISBLK(statbuf.st_mode) && ioctl(fd, BLKFLSBUF) != 0)
(void) printf("failed to invalidate cache '%s' : %s\n", path,
strerror(errno));
avl_create(&config_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
avl_create(&uberblock_tree, cksum_record_compare,
sizeof (cksum_record_t), offsetof(cksum_record_t, link));
psize = statbuf.st_size;
psize = P2ALIGN(psize, (uint64_t)sizeof (vdev_label_t));
ashift = SPA_MINBLOCKSHIFT;
/*
* 1. Read the label from disk
* 2. Unpack the configuration and insert in config tree.
* 3. Traverse all uberblocks and insert in uberblock tree.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
label_t *label = &labels[l];
char *buf = label->label.vl_vdev_phys.vp_nvlist;
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
nvlist_t *config;
cksum_record_t *rec;
zio_cksum_t cksum;
vdev_t vd;
if (pread64(fd, &label->label, sizeof (label->label),
vdev_label_offset(psize, l, 0)) != sizeof (label->label)) {
if (!dump_opt['q'])
(void) printf("failed to read label %d\n", l);
label->read_failed = B_TRUE;
error = B_TRUE;
continue;
}
label->read_failed = B_FALSE;
if (nvlist_unpack(buf, buflen, &config, 0) == 0) {
nvlist_t *vdev_tree = NULL;
size_t size;
if ((nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &vdev_tree) != 0) ||
(nvlist_lookup_uint64(vdev_tree,
ZPOOL_CONFIG_ASHIFT, &ashift) != 0))
ashift = SPA_MINBLOCKSHIFT;
if (nvlist_size(config, &size, NV_ENCODE_XDR) != 0)
size = buflen;
fletcher_4_native_varsize(buf, size, &cksum);
rec = cksum_record_insert(&config_tree, &cksum, l);
label->config = rec;
label->config_nv = config;
config_found = B_TRUE;
} else {
error = B_TRUE;
}
vd.vdev_ashift = ashift;
vd.vdev_top = &vd;
for (int i = 0; i < VDEV_UBERBLOCK_COUNT(&vd); i++) {
uint64_t uoff = VDEV_UBERBLOCK_OFFSET(&vd, i);
uberblock_t *ub = (void *)((char *)label + uoff);
if (uberblock_verify(ub))
continue;
fletcher_4_native_varsize(ub, sizeof (*ub), &cksum);
rec = cksum_record_insert(&uberblock_tree, &cksum, l);
label->uberblocks[i] = rec;
}
}
/*
* Dump the label and uberblocks.
*/
for (int l = 0; l < VDEV_LABELS; l++) {
label_t *label = &labels[l];
size_t buflen = sizeof (label->label.vl_vdev_phys.vp_nvlist);
if (label->read_failed == B_TRUE)
continue;
if (label->config_nv) {
dump_config_from_label(label, buflen, l);
} else {
if (!dump_opt['q'])
(void) printf("failed to unpack label %d\n", l);
}
if (dump_opt['u'])
dump_label_uberblocks(label, ashift, l);
nvlist_free(label->config_nv);
}
cookie = NULL;
while ((node = avl_destroy_nodes(&config_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
cookie = NULL;
while ((node = avl_destroy_nodes(&uberblock_tree, &cookie)) != NULL)
umem_free(node, sizeof (cksum_record_t));
avl_destroy(&config_tree);
avl_destroy(&uberblock_tree);
(void) close(fd);
return (config_found == B_FALSE ? 2 :
(error == B_TRUE ? 1 : 0));
}
static uint64_t dataset_feature_count[SPA_FEATURES];
+static uint64_t remap_deadlist_count = 0;
/*ARGSUSED*/
static int
dump_one_dir(const char *dsname, void *arg)
{
int error;
objset_t *os;
spa_feature_t f;
error = open_objset(dsname, DMU_OST_ANY, FTAG, &os);
if (error != 0)
return (0);
for (f = 0; f < SPA_FEATURES; f++) {
if (!dmu_objset_ds(os)->ds_feature_inuse[f])
continue;
ASSERT(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET);
dataset_feature_count[f]++;
}
+ if (dsl_dataset_remap_deadlist_exists(dmu_objset_ds(os))) {
+ remap_deadlist_count++;
+ }
+
dump_dir(os);
close_objset(os, FTAG);
fuid_table_destroy();
return (0);
}
/*
* Block statistics.
*/
#define PSIZE_HISTO_SIZE (SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 2)
typedef struct zdb_blkstats {
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_count;
uint64_t zb_gangs;
uint64_t zb_ditto_samevdev;
uint64_t zb_psize_histogram[PSIZE_HISTO_SIZE];
} zdb_blkstats_t;
/*
* Extended object types to report deferred frees and dedup auto-ditto blocks.
*/
#define ZDB_OT_DEFERRED (DMU_OT_NUMTYPES + 0)
#define ZDB_OT_DITTO (DMU_OT_NUMTYPES + 1)
#define ZDB_OT_OTHER (DMU_OT_NUMTYPES + 2)
#define ZDB_OT_TOTAL (DMU_OT_NUMTYPES + 3)
static const char *zdb_ot_extname[] = {
"deferred free",
"dedup ditto",
"other",
"Total",
};
#define ZB_TOTAL DN_MAX_LEVELS
typedef struct zdb_cb {
zdb_blkstats_t zcb_type[ZB_TOTAL + 1][ZDB_OT_TOTAL + 1];
+ uint64_t zcb_removing_size;
uint64_t zcb_dedup_asize;
uint64_t zcb_dedup_blocks;
uint64_t zcb_embedded_blocks[NUM_BP_EMBEDDED_TYPES];
uint64_t zcb_embedded_histogram[NUM_BP_EMBEDDED_TYPES]
[BPE_PAYLOAD_SIZE + 1];
uint64_t zcb_start;
hrtime_t zcb_lastprint;
uint64_t zcb_totalasize;
uint64_t zcb_errors[256];
int zcb_readfails;
int zcb_haderrors;
spa_t *zcb_spa;
+ uint32_t **zcb_vd_obsolete_counts;
} zdb_cb_t;
static void
zdb_count_block(zdb_cb_t *zcb, zilog_t *zilog, const blkptr_t *bp,
dmu_object_type_t type)
{
uint64_t refcnt = 0;
int i;
ASSERT(type < ZDB_OT_TOTAL);
if (zilog && zil_bp_tree_add(zilog, bp) != 0)
return;
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : ZB_TOTAL;
int t = (i & 1) ? type : ZDB_OT_TOTAL;
int equal;
zdb_blkstats_t *zb = &zcb->zcb_type[l][t];
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_count++;
/*
* The histogram is only big enough to record blocks up to
* SPA_OLD_MAXBLOCKSIZE; larger blocks go into the last,
* "other", bucket.
*/
unsigned idx = BP_GET_PSIZE(bp) >> SPA_MINBLOCKSHIFT;
idx = MIN(idx, SPA_OLD_MAXBLOCKSIZE / SPA_MINBLOCKSIZE + 1);
zb->zb_psize_histogram[idx]++;
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]))
zb->zb_ditto_samevdev++;
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal != 0)
zb->zb_ditto_samevdev++;
break;
}
}
if (BP_IS_EMBEDDED(bp)) {
zcb->zcb_embedded_blocks[BPE_GET_ETYPE(bp)]++;
zcb->zcb_embedded_histogram[BPE_GET_ETYPE(bp)]
[BPE_GET_PSIZE(bp)]++;
return;
}
if (dump_opt['L'])
return;
if (BP_GET_DEDUP(bp)) {
ddt_t *ddt;
ddt_entry_t *dde;
ddt = ddt_select(zcb->zcb_spa, bp);
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_FALSE);
if (dde == NULL) {
refcnt = 0;
} else {
ddt_phys_t *ddp = ddt_phys_select(dde, bp);
ddt_phys_decref(ddp);
refcnt = ddp->ddp_refcnt;
if (ddt_phys_total_refcnt(dde) == 0)
ddt_remove(ddt, dde);
}
ddt_exit(ddt);
}
VERIFY3U(zio_wait(zio_claim(NULL, zcb->zcb_spa,
refcnt ? 0 : spa_first_txg(zcb->zcb_spa),
bp, NULL, NULL, ZIO_FLAG_CANFAIL)), ==, 0);
}
static void
zdb_blkptr_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
int ioerr = zio->io_error;
zdb_cb_t *zcb = zio->io_private;
zbookmark_phys_t *zb = &zio->io_bookmark;
abd_free(zio->io_abd);
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_ios--;
cv_broadcast(&spa->spa_scrub_io_cv);
if (ioerr && !(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
char blkbuf[BP_SPRINTF_LEN];
zcb->zcb_haderrors = 1;
zcb->zcb_errors[ioerr]++;
if (dump_opt['b'] >= 2)
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
else
blkbuf[0] = '\0';
(void) printf("zdb_blkptr_cb: "
"Got error %d reading "
"<%llu, %llu, %lld, %llx> %s -- skipping\n",
ioerr,
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(u_longlong_t)zb->zb_level,
(u_longlong_t)zb->zb_blkid,
blkbuf);
}
mutex_exit(&spa->spa_scrub_lock);
}
static int
zdb_blkptr_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
zdb_cb_t *zcb = arg;
dmu_object_type_t type;
boolean_t is_metadata;
if (bp == NULL)
return (0);
if (dump_opt['b'] >= 5 && bp->blk_birth > 0) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("objset %llu object %llu "
"level %lld offset 0x%llx %s\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(u_longlong_t)blkid2offset(dnp, bp, zb),
blkbuf);
}
if (BP_IS_HOLE(bp))
return (0);
type = BP_GET_TYPE(bp);
zdb_count_block(zcb, zilog, bp,
(type & DMU_OT_NEWTYPE) ? ZDB_OT_OTHER : type);
is_metadata = (BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type));
if (!BP_IS_EMBEDDED(bp) &&
(dump_opt['c'] > 1 || (dump_opt['c'] && is_metadata))) {
size_t size = BP_GET_PSIZE(bp);
abd_t *abd = abd_alloc(size, B_FALSE);
int flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCRUB | ZIO_FLAG_RAW;
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
flags |= ZIO_FLAG_SPECULATIVE;
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_ios > max_inflight)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_ios++;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(NULL, spa, bp, abd, size,
zdb_blkptr_done, zcb, ZIO_PRIORITY_ASYNC_READ, flags, zb));
}
zcb->zcb_readfails = 0;
/* only call gethrtime() every 100 blocks */
static int iters;
if (++iters > 100)
iters = 0;
else
return (0);
if (dump_opt['b'] < 5 && gethrtime() > zcb->zcb_lastprint + NANOSEC) {
uint64_t now = gethrtime();
char buf[10];
uint64_t bytes = zcb->zcb_type[ZB_TOTAL][ZDB_OT_TOTAL].zb_asize;
int kb_per_sec =
1 + bytes / (1 + ((now - zcb->zcb_start) / 1000 / 1000));
int sec_remaining =
(zcb->zcb_totalasize - bytes) / 1024 / kb_per_sec;
/* make sure nicenum has enough space */
CTASSERT(sizeof (buf) >= NN_NUMBUF_SZ);
zfs_nicebytes(bytes, buf, sizeof (buf));
(void) fprintf(stderr,
"\r%5s completed (%4dMB/s) "
"estimated time remaining: %uhr %02umin %02usec ",
buf, kb_per_sec / 1024,
sec_remaining / 60 / 60,
sec_remaining / 60 % 60,
sec_remaining % 60);
zcb->zcb_lastprint = now;
}
return (0);
}
static void
zdb_leak(void *arg, uint64_t start, uint64_t size)
{
vdev_t *vd = arg;
(void) printf("leaked space: vdev %llu, offset 0x%llx, size %llu\n",
(u_longlong_t)vd->vdev_id, (u_longlong_t)start, (u_longlong_t)size);
}
static metaslab_ops_t zdb_metaslab_ops = {
NULL /* alloc */
};
+/* ARGSUSED */
+static void
+claim_segment_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
+ uint64_t size, void *arg)
+{
+ /*
+ * This callback was called through a remap from
+ * a device being removed. Therefore, the vdev that
+ * this callback is applied to is a concrete
+ * vdev.
+ */
+ ASSERT(vdev_is_concrete(vd));
+
+ VERIFY0(metaslab_claim_impl(vd, offset, size,
+ spa_first_txg(vd->vdev_spa)));
+}
+
+static void
+claim_segment_cb(void *arg, uint64_t offset, uint64_t size)
+{
+ vdev_t *vd = arg;
+
+ vdev_indirect_ops.vdev_op_remap(vd, offset, size,
+ claim_segment_impl_cb, NULL);
+}
+
+/*
+ * After accounting for all allocated blocks that are directly referenced,
+ * we might have missed a reference to a block from a partially complete
+ * (and thus unused) indirect mapping object. We perform a secondary pass
+ * through the metaslabs we have already mapped and claim the destination
+ * blocks.
+ */
+static void
+zdb_claim_removing(spa_t *spa, zdb_cb_t *zcb)
+{
+ if (spa->spa_vdev_removal == NULL)
+ return;
+
+ spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
+
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ vdev_t *vd = svr->svr_vdev;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+
+ for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
+ metaslab_t *msp = vd->vdev_ms[msi];
+
+ if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
+ break;
+
+ ASSERT0(range_tree_space(svr->svr_allocd_segs));
+
+ if (msp->ms_sm != NULL) {
+ VERIFY0(space_map_load(msp->ms_sm,
+ svr->svr_allocd_segs, SM_ALLOC));
+
+ /*
+ * Clear everything past what has been synced,
+ * because we have not allocated mappings for it yet.
+ */
+ range_tree_clear(svr->svr_allocd_segs,
+ vdev_indirect_mapping_max_offset(vim),
+ msp->ms_sm->sm_start + msp->ms_sm->sm_size -
+ vdev_indirect_mapping_max_offset(vim));
+ }
+
+ zcb->zcb_removing_size +=
+ range_tree_space(svr->svr_allocd_segs);
+ range_tree_vacate(svr->svr_allocd_segs, claim_segment_cb, vd);
+ }
+
+ spa_config_exit(spa, SCL_CONFIG, FTAG);
+}
+
+/*
+ * vm_idxp is an in-out parameter which (for indirect vdevs) is the
+ * index in vim_entries that has the first entry in this metaslab. On
+ * return, it will be set to the first entry after this metaslab.
+ */
+static void
+zdb_leak_init_ms(metaslab_t *msp, uint64_t *vim_idxp)
+{
+ metaslab_group_t *mg = msp->ms_group;
+ vdev_t *vd = mg->mg_vd;
+ vdev_t *rvd = vd->vdev_spa->spa_root_vdev;
+
+ mutex_enter(&msp->ms_lock);
+ metaslab_unload(msp);
+
+ /*
+ * We don't want to spend the CPU manipulating the size-ordered
+ * tree, so clear the range_tree ops.
+ */
+ msp->ms_tree->rt_ops = NULL;
+
+ (void) fprintf(stderr,
+ "\rloading vdev %llu of %llu, metaslab %llu of %llu ...",
+ (longlong_t)vd->vdev_id,
+ (longlong_t)rvd->vdev_children,
+ (longlong_t)msp->ms_id,
+ (longlong_t)vd->vdev_ms_count);
+
+ /*
+ * For leak detection, we overload the metaslab ms_tree to
+ * contain allocated segments instead of free segments. As a
+ * result, we can't use the normal metaslab_load/unload
+ * interfaces.
+ */
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ for (; *vim_idxp < vdev_indirect_mapping_num_entries(vim);
+ (*vim_idxp)++) {
+ vdev_indirect_mapping_entry_phys_t *vimep =
+ &vim->vim_entries[*vim_idxp];
+ uint64_t ent_offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
+ uint64_t ent_len = DVA_GET_ASIZE(&vimep->vimep_dst);
+ ASSERT3U(ent_offset, >=, msp->ms_start);
+ if (ent_offset >= msp->ms_start + msp->ms_size)
+ break;
+
+ /*
+ * Mappings do not cross metaslab boundaries,
+ * because we create them by walking the metaslabs.
+ */
+ ASSERT3U(ent_offset + ent_len, <=,
+ msp->ms_start + msp->ms_size);
+ range_tree_add(msp->ms_tree, ent_offset, ent_len);
+ }
+ } else if (msp->ms_sm != NULL) {
+ VERIFY0(space_map_load(msp->ms_sm, msp->ms_tree, SM_ALLOC));
+ }
+
+ if (!msp->ms_loaded) {
+ msp->ms_loaded = B_TRUE;
+ }
+ mutex_exit(&msp->ms_lock);
+}
+
+/* ARGSUSED */
+static int
+increment_indirect_mapping_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
+{
+ zdb_cb_t *zcb = arg;
+ spa_t *spa = zcb->zcb_spa;
+ vdev_t *vd;
+ const dva_t *dva = &bp->blk_dva[0];
+
+ ASSERT(!dump_opt['L']);
+ ASSERT3U(BP_GET_NDVAS(bp), ==, 1);
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ vd = vdev_lookup_top(zcb->zcb_spa, DVA_GET_VDEV(dva));
+ ASSERT3P(vd, !=, NULL);
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
+ ASSERT3P(zcb->zcb_vd_obsolete_counts[vd->vdev_id], !=, NULL);
+
+ vdev_indirect_mapping_increment_obsolete_count(
+ vd->vdev_indirect_mapping,
+ DVA_GET_OFFSET(dva), DVA_GET_ASIZE(dva),
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
+
+ return (0);
+}
+
+static uint32_t *
+zdb_load_obsolete_counts(vdev_t *vd)
+{
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ spa_t *spa = vd->vdev_spa;
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+ uint32_t *counts;
+
+ EQUIV(vdev_obsolete_sm_object(vd) != 0, vd->vdev_obsolete_sm != NULL);
+ counts = vdev_indirect_mapping_load_obsolete_counts(vim);
+ if (vd->vdev_obsolete_sm != NULL) {
+ vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
+ vd->vdev_obsolete_sm);
+ }
+ if (scip->scip_vdev == vd->vdev_id &&
+ scip->scip_prev_obsolete_sm_object != 0) {
+ space_map_t *prev_obsolete_sm = NULL;
+ VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
+ scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
+ space_map_update(prev_obsolete_sm);
+ vdev_indirect_mapping_load_obsolete_spacemap(vim, counts,
+ prev_obsolete_sm);
+ space_map_close(prev_obsolete_sm);
+ }
+ return (counts);
+}
+
static void
zdb_ddt_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
ddt_bookmark_t ddb;
ddt_entry_t dde;
int error;
int p;
bzero(&ddb, sizeof (ddb));
while ((error = ddt_walk(spa, &ddb, &dde)) == 0) {
blkptr_t blk;
ddt_phys_t *ddp = dde.dde_phys;
if (ddb.ddb_class == DDT_CLASS_UNIQUE)
return;
ASSERT(ddt_phys_total_refcnt(&dde) > 1);
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0)
continue;
ddt_bp_create(ddb.ddb_checksum,
&dde.dde_key, ddp, &blk);
if (p == DDT_PHYS_DITTO) {
zdb_count_block(zcb, NULL, &blk, ZDB_OT_DITTO);
} else {
zcb->zcb_dedup_asize +=
BP_GET_ASIZE(&blk) * (ddp->ddp_refcnt - 1);
zcb->zcb_dedup_blocks++;
}
}
if (!dump_opt['L']) {
ddt_t *ddt = spa->spa_ddt[ddb.ddb_checksum];
ddt_enter(ddt);
VERIFY(ddt_lookup(ddt, &blk, B_TRUE) != NULL);
ddt_exit(ddt);
}
}
ASSERT(error == ENOENT);
}
static void
zdb_leak_init(spa_t *spa, zdb_cb_t *zcb)
{
zcb->zcb_spa = spa;
- uint64_t c, m;
+ uint64_t c;
if (!dump_opt['L']) {
+ dsl_pool_t *dp = spa->spa_dsl_pool;
vdev_t *rvd = spa->spa_root_vdev;
/*
* We are going to be changing the meaning of the metaslab's
* ms_tree. Ensure that the allocator doesn't try to
* use the tree.
*/
spa->spa_normal_class->mc_ops = &zdb_metaslab_ops;
spa->spa_log_class->mc_ops = &zdb_metaslab_ops;
+ zcb->zcb_vd_obsolete_counts =
+ umem_zalloc(rvd->vdev_children * sizeof (uint32_t *),
+ UMEM_NOFAIL);
+
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
- ASSERTV(metaslab_group_t *mg = vd->vdev_mg);
- for (m = 0; m < vd->vdev_ms_count; m++) {
- metaslab_t *msp = vd->vdev_ms[m];
- ASSERT3P(msp->ms_group, ==, mg);
- mutex_enter(&msp->ms_lock);
- metaslab_unload(msp);
+ uint64_t vim_idx = 0;
+
+ ASSERT3U(c, ==, vd->vdev_id);
+
+ /*
+ * Note: we don't check for mapping leaks on
+ * removing vdevs because their ms_tree's are
+ * used to look for leaks in allocated space.
+ */
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ zcb->zcb_vd_obsolete_counts[c] =
+ zdb_load_obsolete_counts(vd);
/*
- * For leak detection, we overload the metaslab
- * ms_tree to contain allocated segments
- * instead of free segments. As a result,
- * we can't use the normal metaslab_load/unload
- * interfaces.
+ * Normally, indirect vdevs don't have any
+ * metaslabs. We want to set them up for
+ * zio_claim().
*/
- if (msp->ms_sm != NULL) {
- (void) fprintf(stderr,
- "\rloading space map for "
- "vdev %llu of %llu, "
- "metaslab %llu of %llu ...",
- (longlong_t)c,
- (longlong_t)rvd->vdev_children,
- (longlong_t)m,
- (longlong_t)vd->vdev_ms_count);
-
- /*
- * We don't want to spend the CPU
- * manipulating the size-ordered
- * tree, so clear the range_tree
- * ops.
- */
- msp->ms_tree->rt_ops = NULL;
- VERIFY0(space_map_load(msp->ms_sm,
- msp->ms_tree, SM_ALLOC));
-
- if (!msp->ms_loaded)
- msp->ms_loaded = B_TRUE;
- }
- mutex_exit(&msp->ms_lock);
+ VERIFY0(vdev_metaslab_init(vd, 0));
+ }
+
+ for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
+ zdb_leak_init_ms(vd->vdev_ms[m], &vim_idx);
+ }
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ ASSERT3U(vim_idx, ==,
+ vdev_indirect_mapping_num_entries(
+ vd->vdev_indirect_mapping));
}
}
(void) fprintf(stderr, "\n");
+
+ if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
+ ASSERT(spa_feature_is_enabled(spa,
+ SPA_FEATURE_DEVICE_REMOVAL));
+ (void) bpobj_iterate_nofree(&dp->dp_obsolete_bpobj,
+ increment_indirect_mapping_cb, zcb, NULL);
+ }
}
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
zdb_ddt_leak_init(spa, zcb);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
-static void
-zdb_leak_fini(spa_t *spa)
+static boolean_t
+zdb_check_for_obsolete_leaks(vdev_t *vd, zdb_cb_t *zcb)
{
+ boolean_t leaks = B_FALSE;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ uint64_t total_leaked = 0;
+
+ ASSERT(vim != NULL);
+
+ for (uint64_t i = 0; i < vdev_indirect_mapping_num_entries(vim); i++) {
+ vdev_indirect_mapping_entry_phys_t *vimep =
+ &vim->vim_entries[i];
+ uint64_t obsolete_bytes = 0;
+ uint64_t offset = DVA_MAPPING_GET_SRC_OFFSET(vimep);
+ metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+
+ /*
+ * This is not very efficient but it's easy to
+ * verify correctness.
+ */
+ for (uint64_t inner_offset = 0;
+ inner_offset < DVA_GET_ASIZE(&vimep->vimep_dst);
+ inner_offset += 1 << vd->vdev_ashift) {
+ if (range_tree_contains(msp->ms_tree,
+ offset + inner_offset, 1 << vd->vdev_ashift)) {
+ obsolete_bytes += 1 << vd->vdev_ashift;
+ }
+ }
+
+ int64_t bytes_leaked = obsolete_bytes -
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id][i];
+ ASSERT3U(DVA_GET_ASIZE(&vimep->vimep_dst), >=,
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id][i]);
+ if (bytes_leaked != 0 &&
+ (vdev_obsolete_counts_are_precise(vd) ||
+ dump_opt['d'] >= 5)) {
+ (void) printf("obsolete indirect mapping count "
+ "mismatch on %llu:%llx:%llx : %llx bytes leaked\n",
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)DVA_MAPPING_GET_SRC_OFFSET(vimep),
+ (u_longlong_t)DVA_GET_ASIZE(&vimep->vimep_dst),
+ (u_longlong_t)bytes_leaked);
+ }
+ total_leaked += ABS(bytes_leaked);
+ }
+
+ if (!vdev_obsolete_counts_are_precise(vd) && total_leaked > 0) {
+ int pct_leaked = total_leaked * 100 /
+ vdev_indirect_mapping_bytes_mapped(vim);
+ (void) printf("cannot verify obsolete indirect mapping "
+ "counts of vdev %llu because precise feature was not "
+ "enabled when it was removed: %d%% (%llx bytes) of mapping"
+ "unreferenced\n",
+ (u_longlong_t)vd->vdev_id, pct_leaked,
+ (u_longlong_t)total_leaked);
+ } else if (total_leaked > 0) {
+ (void) printf("obsolete indirect mapping count mismatch "
+ "for vdev %llu -- %llx total bytes mismatched\n",
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)total_leaked);
+ leaks |= B_TRUE;
+ }
+
+ vdev_indirect_mapping_free_obsolete_counts(vim,
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id]);
+ zcb->zcb_vd_obsolete_counts[vd->vdev_id] = NULL;
+
+ return (leaks);
+}
+
+static boolean_t
+zdb_leak_fini(spa_t *spa, zdb_cb_t *zcb)
+{
+ boolean_t leaks = B_FALSE;
if (!dump_opt['L']) {
vdev_t *rvd = spa->spa_root_vdev;
for (unsigned c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
ASSERTV(metaslab_group_t *mg = vd->vdev_mg);
- for (unsigned m = 0; m < vd->vdev_ms_count; m++) {
+
+ if (zcb->zcb_vd_obsolete_counts[c] != NULL) {
+ leaks |= zdb_check_for_obsolete_leaks(vd, zcb);
+ }
+
+ for (uint64_t m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
ASSERT3P(mg, ==, msp->ms_group);
- mutex_enter(&msp->ms_lock);
/*
* The ms_tree has been overloaded to
* contain allocated segments. Now that we
* finished traversing all blocks, any
* block that remains in the ms_tree
* represents an allocated block that we
* did not claim during the traversal.
* Claimed blocks would have been removed
- * from the ms_tree.
+ * from the ms_tree. For indirect vdevs,
+ * space remaining in the tree represents
+ * parts of the mapping that are not
+ * referenced, which is not a bug.
*/
- range_tree_vacate(msp->ms_tree, zdb_leak, vd);
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ range_tree_vacate(msp->ms_tree,
+ NULL, NULL);
+ } else {
+ range_tree_vacate(msp->ms_tree,
+ zdb_leak, vd);
+ }
if (msp->ms_loaded)
msp->ms_loaded = B_FALSE;
-
- mutex_exit(&msp->ms_lock);
}
}
+
+ umem_free(zcb->zcb_vd_obsolete_counts,
+ rvd->vdev_children * sizeof (uint32_t *));
+ zcb->zcb_vd_obsolete_counts = NULL;
}
+ return (leaks);
}
/* ARGSUSED */
static int
count_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zdb_cb_t *zcb = arg;
if (dump_opt['b'] >= 5) {
char blkbuf[BP_SPRINTF_LEN];
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("[%s] %s\n",
"deferred free", blkbuf);
}
zdb_count_block(zcb, NULL, bp, ZDB_OT_DEFERRED);
return (0);
}
static int
dump_block_stats(spa_t *spa)
{
zdb_cb_t zcb;
zdb_blkstats_t *zb, *tzb;
uint64_t norm_alloc, norm_space, total_alloc, total_found;
int flags = TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT | TRAVERSE_HARD;
boolean_t leaks = B_FALSE;
int e, c, err;
bp_embedded_type_t i;
bzero(&zcb, sizeof (zcb));
(void) printf("\nTraversing all blocks %s%s%s%s%s...\n\n",
(dump_opt['c'] || !dump_opt['L']) ? "to verify " : "",
(dump_opt['c'] == 1) ? "metadata " : "",
dump_opt['c'] ? "checksums " : "",
(dump_opt['c'] && !dump_opt['L']) ? "and verify " : "",
!dump_opt['L'] ? "nothing leaked " : "");
/*
* Load all space maps as SM_ALLOC maps, then traverse the pool
* claiming each block we discover. If the pool is perfectly
* consistent, the space maps will be empty when we're done.
* Anything left over is a leak; any block we can't claim (because
* it's not part of any space map) is a double allocation,
* reference to a freed block, or an unclaimed log block.
*/
bzero(&zcb, sizeof (zdb_cb_t));
zdb_leak_init(spa, &zcb);
/*
* If there's a deferred-free bplist, process that first.
*/
(void) bpobj_iterate_nofree(&spa->spa_deferred_bpobj,
count_block_cb, &zcb, NULL);
+
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_iterate_nofree(&spa->spa_dsl_pool->dp_free_bpobj,
count_block_cb, &zcb, NULL);
}
+
+ zdb_claim_removing(spa, &zcb);
+
if (spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
VERIFY3U(0, ==, bptree_iterate(spa->spa_meta_objset,
spa->spa_dsl_pool->dp_bptree_obj, B_FALSE, count_block_cb,
&zcb, NULL));
}
if (dump_opt['c'] > 1)
flags |= TRAVERSE_PREFETCH_DATA;
zcb.zcb_totalasize = metaslab_class_get_alloc(spa_normal_class(spa));
zcb.zcb_start = zcb.zcb_lastprint = gethrtime();
err = traverse_pool(spa, 0, flags, zdb_blkptr_cb, &zcb);
/*
* If we've traversed the data blocks then we need to wait for those
* I/Os to complete. We leverage "The Godfather" zio to wait on
* all async I/Os to complete.
*/
if (dump_opt['c']) {
for (c = 0; c < max_ncpus; c++) {
(void) zio_wait(spa->spa_async_zio_root[c]);
spa->spa_async_zio_root[c] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
}
/*
* Done after zio_wait() since zcb_haderrors is modified in
* zdb_blkptr_done()
*/
zcb.zcb_haderrors |= err;
if (zcb.zcb_haderrors) {
(void) printf("\nError counts:\n\n");
(void) printf("\t%5s %s\n", "errno", "count");
for (e = 0; e < 256; e++) {
if (zcb.zcb_errors[e] != 0) {
(void) printf("\t%5d %llu\n",
e, (u_longlong_t)zcb.zcb_errors[e]);
}
}
}
/*
* Report any leaked segments.
*/
- zdb_leak_fini(spa);
+ leaks |= zdb_leak_fini(spa, &zcb);
tzb = &zcb.zcb_type[ZB_TOTAL][ZDB_OT_TOTAL];
norm_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
norm_space = metaslab_class_get_space(spa_normal_class(spa));
total_alloc = norm_alloc + metaslab_class_get_alloc(spa_log_class(spa));
- total_found = tzb->zb_asize - zcb.zcb_dedup_asize;
+ total_found = tzb->zb_asize - zcb.zcb_dedup_asize +
+ zcb.zcb_removing_size;
if (total_found == total_alloc) {
if (!dump_opt['L'])
(void) printf("\n\tNo leaks (block sum matches space"
" maps exactly)\n");
} else {
(void) printf("block traversal size %llu != alloc %llu "
"(%s %lld)\n",
(u_longlong_t)total_found,
(u_longlong_t)total_alloc,
(dump_opt['L']) ? "unreachable" : "leaked",
(longlong_t)(total_alloc - total_found));
leaks = B_TRUE;
}
if (tzb->zb_count == 0)
return (2);
(void) printf("\n");
(void) printf("\tbp count: %10llu\n",
(u_longlong_t)tzb->zb_count);
(void) printf("\tganged count: %10llu\n",
(longlong_t)tzb->zb_gangs);
(void) printf("\tbp logical: %10llu avg: %6llu\n",
(u_longlong_t)tzb->zb_lsize,
(u_longlong_t)(tzb->zb_lsize / tzb->zb_count));
(void) printf("\tbp physical: %10llu avg:"
" %6llu compression: %6.2f\n",
(u_longlong_t)tzb->zb_psize,
(u_longlong_t)(tzb->zb_psize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_psize);
(void) printf("\tbp allocated: %10llu avg:"
" %6llu compression: %6.2f\n",
(u_longlong_t)tzb->zb_asize,
(u_longlong_t)(tzb->zb_asize / tzb->zb_count),
(double)tzb->zb_lsize / tzb->zb_asize);
(void) printf("\tbp deduped: %10llu ref>1:"
" %6llu deduplication: %6.2f\n",
(u_longlong_t)zcb.zcb_dedup_asize,
(u_longlong_t)zcb.zcb_dedup_blocks,
(double)zcb.zcb_dedup_asize / tzb->zb_asize + 1.0);
(void) printf("\tSPA allocated: %10llu used: %5.2f%%\n",
(u_longlong_t)norm_alloc, 100.0 * norm_alloc / norm_space);
for (i = 0; i < NUM_BP_EMBEDDED_TYPES; i++) {
if (zcb.zcb_embedded_blocks[i] == 0)
continue;
(void) printf("\n");
(void) printf("\tadditional, non-pointer bps of type %u: "
"%10llu\n",
i, (u_longlong_t)zcb.zcb_embedded_blocks[i]);
if (dump_opt['b'] >= 3) {
(void) printf("\t number of (compressed) bytes: "
"number of bps\n");
dump_histogram(zcb.zcb_embedded_histogram[i],
sizeof (zcb.zcb_embedded_histogram[i]) /
sizeof (zcb.zcb_embedded_histogram[i][0]), 0);
}
}
if (tzb->zb_ditto_samevdev != 0) {
(void) printf("\tDittoed blocks on same vdev: %llu\n",
(longlong_t)tzb->zb_ditto_samevdev);
}
+ for (uint64_t v = 0; v < spa->spa_root_vdev->vdev_children; v++) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[v];
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+
+ if (vim == NULL) {
+ continue;
+ }
+
+ char mem[32];
+ zdb_nicenum(vdev_indirect_mapping_num_entries(vim),
+ mem, vdev_indirect_mapping_size(vim));
+
+ (void) printf("\tindirect vdev id %llu has %llu segments "
+ "(%s in memory)\n",
+ (longlong_t)vd->vdev_id,
+ (longlong_t)vdev_indirect_mapping_num_entries(vim), mem);
+ }
+
if (dump_opt['b'] >= 2) {
int l, t, level;
(void) printf("\nBlocks\tLSIZE\tPSIZE\tASIZE"
"\t avg\t comp\t%%Total\tType\n");
for (t = 0; t <= ZDB_OT_TOTAL; t++) {
char csize[32], lsize[32], psize[32], asize[32];
char avg[32], gang[32];
const char *typename;
/* make sure nicenum has enough space */
CTASSERT(sizeof (csize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (lsize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (psize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (asize) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (avg) >= NN_NUMBUF_SZ);
CTASSERT(sizeof (gang) >= NN_NUMBUF_SZ);
if (t < DMU_OT_NUMTYPES)
typename = dmu_ot[t].ot_name;
else
typename = zdb_ot_extname[t - DMU_OT_NUMTYPES];
if (zcb.zcb_type[ZB_TOTAL][t].zb_asize == 0) {
(void) printf("%6s\t%5s\t%5s\t%5s"
"\t%5s\t%5s\t%6s\t%s\n",
"-",
"-",
"-",
"-",
"-",
"-",
"-",
typename);
continue;
}
for (l = ZB_TOTAL - 1; l >= -1; l--) {
level = (l == -1 ? ZB_TOTAL : l);
zb = &zcb.zcb_type[level][t];
if (zb->zb_asize == 0)
continue;
if (dump_opt['b'] < 3 && level != ZB_TOTAL)
continue;
if (level == 0 && zb->zb_asize ==
zcb.zcb_type[ZB_TOTAL][t].zb_asize)
continue;
zdb_nicenum(zb->zb_count, csize,
sizeof (csize));
zdb_nicenum(zb->zb_lsize, lsize,
sizeof (lsize));
zdb_nicenum(zb->zb_psize, psize,
sizeof (psize));
zdb_nicenum(zb->zb_asize, asize,
sizeof (asize));
zdb_nicenum(zb->zb_asize / zb->zb_count, avg,
sizeof (avg));
zdb_nicenum(zb->zb_gangs, gang, sizeof (gang));
(void) printf("%6s\t%5s\t%5s\t%5s\t%5s"
"\t%5.2f\t%6.2f\t",
csize, lsize, psize, asize, avg,
(double)zb->zb_lsize / zb->zb_psize,
100.0 * zb->zb_asize / tzb->zb_asize);
if (level == ZB_TOTAL)
(void) printf("%s\n", typename);
else
(void) printf(" L%d %s\n",
level, typename);
if (dump_opt['b'] >= 3 && zb->zb_gangs > 0) {
(void) printf("\t number of ganged "
"blocks: %s\n", gang);
}
if (dump_opt['b'] >= 4) {
(void) printf("psize "
"(in 512-byte sectors): "
"number of blocks\n");
dump_histogram(zb->zb_psize_histogram,
PSIZE_HISTO_SIZE, 0);
}
}
}
}
(void) printf("\n");
if (leaks)
return (2);
if (zcb.zcb_haderrors)
return (3);
return (0);
}
typedef struct zdb_ddt_entry {
ddt_key_t zdde_key;
uint64_t zdde_ref_blocks;
uint64_t zdde_ref_lsize;
uint64_t zdde_ref_psize;
uint64_t zdde_ref_dsize;
avl_node_t zdde_node;
} zdb_ddt_entry_t;
/* ARGSUSED */
static int
zdb_ddt_add_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
avl_tree_t *t = arg;
avl_index_t where;
zdb_ddt_entry_t *zdde, zdde_search;
if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return (0);
if (dump_opt['S'] > 1 && zb->zb_level == ZB_ROOT_LEVEL) {
(void) printf("traversing objset %llu, %llu objects, "
"%lu blocks so far\n",
(u_longlong_t)zb->zb_objset,
(u_longlong_t)BP_GET_FILL(bp),
avl_numnodes(t));
}
if (BP_IS_HOLE(bp) || BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_OFF ||
BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
return (0);
ddt_key_fill(&zdde_search.zdde_key, bp);
zdde = avl_find(t, &zdde_search, &where);
if (zdde == NULL) {
zdde = umem_zalloc(sizeof (*zdde), UMEM_NOFAIL);
zdde->zdde_key = zdde_search.zdde_key;
avl_insert(t, zdde, where);
}
zdde->zdde_ref_blocks += 1;
zdde->zdde_ref_lsize += BP_GET_LSIZE(bp);
zdde->zdde_ref_psize += BP_GET_PSIZE(bp);
zdde->zdde_ref_dsize += bp_get_dsize_sync(spa, bp);
return (0);
}
static void
dump_simulated_ddt(spa_t *spa)
{
avl_tree_t t;
void *cookie = NULL;
zdb_ddt_entry_t *zdde;
ddt_histogram_t ddh_total;
ddt_stat_t dds_total;
bzero(&ddh_total, sizeof (ddh_total));
bzero(&dds_total, sizeof (dds_total));
avl_create(&t, ddt_entry_compare,
sizeof (zdb_ddt_entry_t), offsetof(zdb_ddt_entry_t, zdde_node));
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
(void) traverse_pool(spa, 0, TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, zdb_ddt_add_cb, &t);
spa_config_exit(spa, SCL_CONFIG, FTAG);
while ((zdde = avl_destroy_nodes(&t, &cookie)) != NULL) {
ddt_stat_t dds;
uint64_t refcnt = zdde->zdde_ref_blocks;
ASSERT(refcnt != 0);
dds.dds_blocks = zdde->zdde_ref_blocks / refcnt;
dds.dds_lsize = zdde->zdde_ref_lsize / refcnt;
dds.dds_psize = zdde->zdde_ref_psize / refcnt;
dds.dds_dsize = zdde->zdde_ref_dsize / refcnt;
dds.dds_ref_blocks = zdde->zdde_ref_blocks;
dds.dds_ref_lsize = zdde->zdde_ref_lsize;
dds.dds_ref_psize = zdde->zdde_ref_psize;
dds.dds_ref_dsize = zdde->zdde_ref_dsize;
ddt_stat_add(&ddh_total.ddh_stat[highbit64(refcnt) - 1],
&dds, 0);
umem_free(zdde, sizeof (*zdde));
}
avl_destroy(&t);
ddt_histogram_stat(&dds_total, &ddh_total);
(void) printf("Simulated DDT histogram:\n");
zpool_dump_ddt(&dds_total, &ddh_total);
dump_dedup_ratio(&dds_total);
}
+static int
+verify_device_removal_feature_counts(spa_t *spa)
+{
+ uint64_t dr_feature_refcount = 0;
+ uint64_t oc_feature_refcount = 0;
+ uint64_t indirect_vdev_count = 0;
+ uint64_t precise_vdev_count = 0;
+ uint64_t obsolete_counts_object_count = 0;
+ uint64_t obsolete_sm_count = 0;
+ uint64_t obsolete_counts_count = 0;
+ uint64_t scip_count = 0;
+ uint64_t obsolete_bpobj_count = 0;
+ int ret = 0;
+
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+ if (scip->scip_next_mapping_object != 0) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[scip->scip_vdev];
+ ASSERT(scip->scip_prev_obsolete_sm_object != 0);
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ (void) printf("Condensing indirect vdev %llu: new mapping "
+ "object %llu, prev obsolete sm %llu\n",
+ (u_longlong_t)scip->scip_vdev,
+ (u_longlong_t)scip->scip_next_mapping_object,
+ (u_longlong_t)scip->scip_prev_obsolete_sm_object);
+ if (scip->scip_prev_obsolete_sm_object != 0) {
+ space_map_t *prev_obsolete_sm = NULL;
+ VERIFY0(space_map_open(&prev_obsolete_sm,
+ spa->spa_meta_objset,
+ scip->scip_prev_obsolete_sm_object,
+ 0, vd->vdev_asize, 0));
+ space_map_update(prev_obsolete_sm);
+ dump_spacemap(spa->spa_meta_objset, prev_obsolete_sm);
+ (void) printf("\n");
+ space_map_close(prev_obsolete_sm);
+ }
+
+ scip_count += 2;
+ }
+
+ for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+
+ if (vic->vic_mapping_object != 0) {
+ ASSERT(vd->vdev_ops == &vdev_indirect_ops ||
+ vd->vdev_removing);
+ indirect_vdev_count++;
+
+ if (vd->vdev_indirect_mapping->vim_havecounts) {
+ obsolete_counts_count++;
+ }
+ }
+ if (vdev_obsolete_counts_are_precise(vd)) {
+ ASSERT(vic->vic_mapping_object != 0);
+ precise_vdev_count++;
+ }
+ if (vdev_obsolete_sm_object(vd) != 0) {
+ ASSERT(vic->vic_mapping_object != 0);
+ obsolete_sm_count++;
+ }
+ }
+
+ (void) feature_get_refcount(spa,
+ &spa_feature_table[SPA_FEATURE_DEVICE_REMOVAL],
+ &dr_feature_refcount);
+ (void) feature_get_refcount(spa,
+ &spa_feature_table[SPA_FEATURE_OBSOLETE_COUNTS],
+ &oc_feature_refcount);
+
+ if (dr_feature_refcount != indirect_vdev_count) {
+ ret = 1;
+ (void) printf("Number of indirect vdevs (%llu) " \
+ "does not match feature count (%llu)\n",
+ (u_longlong_t)indirect_vdev_count,
+ (u_longlong_t)dr_feature_refcount);
+ } else {
+ (void) printf("Verified device_removal feature refcount " \
+ "of %llu is correct\n",
+ (u_longlong_t)dr_feature_refcount);
+ }
+
+ if (zap_contains(spa_meta_objset(spa), DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_OBSOLETE_BPOBJ) == 0) {
+ obsolete_bpobj_count++;
+ }
+
+
+ obsolete_counts_object_count = precise_vdev_count;
+ obsolete_counts_object_count += obsolete_sm_count;
+ obsolete_counts_object_count += obsolete_counts_count;
+ obsolete_counts_object_count += scip_count;
+ obsolete_counts_object_count += obsolete_bpobj_count;
+ obsolete_counts_object_count += remap_deadlist_count;
+
+ if (oc_feature_refcount != obsolete_counts_object_count) {
+ ret = 1;
+ (void) printf("Number of obsolete counts objects (%llu) " \
+ "does not match feature count (%llu)\n",
+ (u_longlong_t)obsolete_counts_object_count,
+ (u_longlong_t)oc_feature_refcount);
+ (void) printf("pv:%llu os:%llu oc:%llu sc:%llu "
+ "ob:%llu rd:%llu\n",
+ (u_longlong_t)precise_vdev_count,
+ (u_longlong_t)obsolete_sm_count,
+ (u_longlong_t)obsolete_counts_count,
+ (u_longlong_t)scip_count,
+ (u_longlong_t)obsolete_bpobj_count,
+ (u_longlong_t)remap_deadlist_count);
+ } else {
+ (void) printf("Verified indirect_refcount feature refcount " \
+ "of %llu is correct\n",
+ (u_longlong_t)oc_feature_refcount);
+ }
+ return (ret);
+}
+
static void
dump_zpool(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
int rc = 0;
if (dump_opt['S']) {
dump_simulated_ddt(spa);
return;
}
if (!dump_opt['e'] && dump_opt['C'] > 1) {
(void) printf("\nCached configuration:\n");
dump_nvlist(spa->spa_config, 8);
}
if (dump_opt['C'])
dump_config(spa);
if (dump_opt['u'])
dump_uberblock(&spa->spa_uberblock, "\nUberblock:\n", "\n");
if (dump_opt['D'])
dump_all_ddts(spa);
if (dump_opt['d'] > 2 || dump_opt['m'])
dump_metaslabs(spa);
if (dump_opt['M'])
dump_metaslab_groups(spa);
if (dump_opt['d'] || dump_opt['i']) {
spa_feature_t f;
dump_dir(dp->dp_meta_objset);
if (dump_opt['d'] >= 3) {
+ dsl_pool_t *dp = spa->spa_dsl_pool;
dump_full_bpobj(&spa->spa_deferred_bpobj,
"Deferred frees", 0);
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
- dump_full_bpobj(
- &spa->spa_dsl_pool->dp_free_bpobj,
+ dump_full_bpobj(&dp->dp_free_bpobj,
"Pool snapshot frees", 0);
}
+ if (bpobj_is_open(&dp->dp_obsolete_bpobj)) {
+ ASSERT(spa_feature_is_enabled(spa,
+ SPA_FEATURE_DEVICE_REMOVAL));
+ dump_full_bpobj(&dp->dp_obsolete_bpobj,
+ "Pool obsolete blocks", 0);
+ }
if (spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dump_bptree(spa->spa_meta_objset,
- spa->spa_dsl_pool->dp_bptree_obj,
+ dp->dp_bptree_obj,
"Pool dataset frees");
}
dump_dtl(spa->spa_root_vdev, 0);
}
(void) dmu_objset_find(spa_name(spa), dump_one_dir,
NULL, DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
for (f = 0; f < SPA_FEATURES; f++) {
uint64_t refcount;
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET) ||
!spa_feature_is_enabled(spa, f)) {
ASSERT0(dataset_feature_count[f]);
continue;
}
if (feature_get_refcount(spa, &spa_feature_table[f],
&refcount) == ENOTSUP)
continue;
if (dataset_feature_count[f] != refcount) {
(void) printf("%s feature refcount mismatch: "
"%lld datasets != %lld refcount\n",
spa_feature_table[f].fi_uname,
(longlong_t)dataset_feature_count[f],
(longlong_t)refcount);
rc = 2;
} else {
(void) printf("Verified %s feature refcount "
"of %llu is correct\n",
spa_feature_table[f].fi_uname,
(longlong_t)refcount);
}
}
+
+ if (rc == 0) {
+ rc = verify_device_removal_feature_counts(spa);
+ }
}
if (rc == 0 && (dump_opt['b'] || dump_opt['c']))
rc = dump_block_stats(spa);
if (rc == 0)
rc = verify_spacemap_refcounts(spa);
if (dump_opt['s'])
show_pool_stats(spa);
if (dump_opt['h'])
dump_history(spa);
if (rc != 0) {
dump_debug_buffer();
exit(rc);
}
}
#define ZDB_FLAG_CHECKSUM 0x0001
#define ZDB_FLAG_DECOMPRESS 0x0002
#define ZDB_FLAG_BSWAP 0x0004
#define ZDB_FLAG_GBH 0x0008
#define ZDB_FLAG_INDIRECT 0x0010
#define ZDB_FLAG_PHYS 0x0020
#define ZDB_FLAG_RAW 0x0040
#define ZDB_FLAG_PRINT_BLKPTR 0x0080
static int flagbits[256];
static void
zdb_print_blkptr(blkptr_t *bp, int flags)
{
char blkbuf[BP_SPRINTF_LEN];
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array((void *)bp, sizeof (blkptr_t));
snprintf_blkptr(blkbuf, sizeof (blkbuf), bp);
(void) printf("%s\n", blkbuf);
}
static void
zdb_dump_indirect(blkptr_t *bp, int nbps, int flags)
{
int i;
for (i = 0; i < nbps; i++)
zdb_print_blkptr(&bp[i], flags);
}
static void
zdb_dump_gbh(void *buf, int flags)
{
zdb_dump_indirect((blkptr_t *)buf, SPA_GBH_NBLKPTRS, flags);
}
static void
zdb_dump_block_raw(void *buf, uint64_t size, int flags)
{
if (flags & ZDB_FLAG_BSWAP)
byteswap_uint64_array(buf, size);
VERIFY(write(fileno(stdout), buf, size) == size);
}
static void
zdb_dump_block(char *label, void *buf, uint64_t size, int flags)
{
uint64_t *d = (uint64_t *)buf;
unsigned nwords = size / sizeof (uint64_t);
int do_bswap = !!(flags & ZDB_FLAG_BSWAP);
unsigned i, j;
const char *hdr;
char *c;
if (do_bswap)
hdr = " 7 6 5 4 3 2 1 0 f e d c b a 9 8";
else
hdr = " 0 1 2 3 4 5 6 7 8 9 a b c d e f";
(void) printf("\n%s\n%6s %s 0123456789abcdef\n", label, "", hdr);
#ifdef _LITTLE_ENDIAN
/* correct the endianness */
do_bswap = !do_bswap;
#endif
for (i = 0; i < nwords; i += 2) {
(void) printf("%06llx: %016llx %016llx ",
(u_longlong_t)(i * sizeof (uint64_t)),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i]) : d[i]),
(u_longlong_t)(do_bswap ? BSWAP_64(d[i + 1]) : d[i + 1]));
c = (char *)&d[i];
for (j = 0; j < 2 * sizeof (uint64_t); j++)
(void) printf("%c", isprint(c[j]) ? c[j] : '.');
(void) printf("\n");
}
}
/*
* There are two acceptable formats:
* leaf_name - For example: c1t0d0 or /tmp/ztest.0a
* child[.child]* - For example: 0.1.1
*
* The second form can be used to specify arbitrary vdevs anywhere
* in the hierarchy. For example, in a pool with a mirror of
* RAID-Zs, you can specify either RAID-Z vdev with 0.0 or 0.1 .
*/
static vdev_t *
zdb_vdev_lookup(vdev_t *vdev, const char *path)
{
char *s, *p, *q;
unsigned i;
if (vdev == NULL)
return (NULL);
/* First, assume the x.x.x.x format */
i = strtoul(path, &s, 10);
if (s == path || (s && *s != '.' && *s != '\0'))
goto name;
if (i >= vdev->vdev_children)
return (NULL);
vdev = vdev->vdev_child[i];
if (s && *s == '\0')
return (vdev);
return (zdb_vdev_lookup(vdev, s+1));
name:
for (i = 0; i < vdev->vdev_children; i++) {
vdev_t *vc = vdev->vdev_child[i];
if (vc->vdev_path == NULL) {
vc = zdb_vdev_lookup(vc, path);
if (vc == NULL)
continue;
else
return (vc);
}
p = strrchr(vc->vdev_path, '/');
p = p ? p + 1 : vc->vdev_path;
q = &vc->vdev_path[strlen(vc->vdev_path) - 2];
if (strcmp(vc->vdev_path, path) == 0)
return (vc);
if (strcmp(p, path) == 0)
return (vc);
if (strcmp(q, "s0") == 0 && strncmp(p, path, q - p) == 0)
return (vc);
}
return (NULL);
}
/*
* Read a block from a pool and print it out. The syntax of the
* block descriptor is:
*
* pool:vdev_specifier:offset:size[:flags]
*
* pool - The name of the pool you wish to read from
* vdev_specifier - Which vdev (see comment for zdb_vdev_lookup)
* offset - offset, in hex, in bytes
* size - Amount of data to read, in hex, in bytes
* flags - A string of characters specifying options
* b: Decode a blkptr at given offset within block
* *c: Calculate and display checksums
* d: Decompress data before dumping
* e: Byteswap data before dumping
* g: Display data as a gang block header
* i: Display as an indirect block
* p: Do I/O to physical offset
* r: Dump raw data to stdout
*
* * = not yet implemented
*/
static void
zdb_read_block(char *thing, spa_t *spa)
{
blkptr_t blk, *bp = &blk;
dva_t *dva = bp->blk_dva;
int flags = 0;
uint64_t offset = 0, size = 0, psize = 0, lsize = 0, blkptr_offset = 0;
zio_t *zio;
vdev_t *vd;
abd_t *pabd;
void *lbuf, *buf;
const char *s, *vdev;
char *p, *dup, *flagstr;
int i, error;
boolean_t borrowed = B_FALSE;
dup = strdup(thing);
s = strtok(dup, ":");
vdev = s ? s : "";
s = strtok(NULL, ":");
offset = strtoull(s ? s : "", NULL, 16);
s = strtok(NULL, ":");
size = strtoull(s ? s : "", NULL, 16);
s = strtok(NULL, ":");
if (s)
flagstr = strdup(s);
else
flagstr = strdup("");
s = NULL;
if (size == 0)
s = "size must not be zero";
if (!IS_P2ALIGNED(size, DEV_BSIZE))
s = "size must be a multiple of sector size";
if (!IS_P2ALIGNED(offset, DEV_BSIZE))
s = "offset must be a multiple of sector size";
if (s) {
(void) printf("Invalid block specifier: %s - %s\n", thing, s);
free(flagstr);
free(dup);
return;
}
for (s = strtok(flagstr, ":"); s; s = strtok(NULL, ":")) {
for (i = 0; flagstr[i]; i++) {
int bit = flagbits[(uchar_t)flagstr[i]];
if (bit == 0) {
(void) printf("***Invalid flag: %c\n",
flagstr[i]);
continue;
}
flags |= bit;
/* If it's not something with an argument, keep going */
if ((bit & (ZDB_FLAG_CHECKSUM |
ZDB_FLAG_PRINT_BLKPTR)) == 0)
continue;
p = &flagstr[i + 1];
if (bit == ZDB_FLAG_PRINT_BLKPTR) {
blkptr_offset = strtoull(p, &p, 16);
i = p - &flagstr[i + 1];
}
if (*p != ':' && *p != '\0') {
(void) printf("***Invalid flag arg: '%s'\n", s);
free(flagstr);
free(dup);
return;
}
}
}
free(flagstr);
vd = zdb_vdev_lookup(spa->spa_root_vdev, vdev);
if (vd == NULL) {
(void) printf("***Invalid vdev: %s\n", vdev);
free(dup);
return;
} else {
if (vd->vdev_path)
(void) fprintf(stderr, "Found vdev: %s\n",
vd->vdev_path);
else
(void) fprintf(stderr, "Found vdev type: %s\n",
vd->vdev_ops->vdev_op_type);
}
psize = size;
lsize = size;
pabd = abd_alloc_for_io(SPA_MAXBLOCKSIZE, B_FALSE);
lbuf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
BP_ZERO(bp);
DVA_SET_VDEV(&dva[0], vd->vdev_id);
DVA_SET_OFFSET(&dva[0], offset);
DVA_SET_GANG(&dva[0], !!(flags & ZDB_FLAG_GBH));
DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, psize));
BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
BP_SET_LSIZE(bp, lsize);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
BP_SET_TYPE(bp, DMU_OT_NONE);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
zio = zio_root(spa, NULL, NULL, 0);
if (vd == vd->vdev_top) {
/*
* Treat this as a normal block read.
*/
zio_nowait(zio_read(zio, spa, bp, pabd, psize, NULL, NULL,
ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL));
} else {
/*
* Treat this as a vdev child I/O.
*/
zio_nowait(zio_vdev_child_io(zio, bp, vd, offset, pabd,
psize, ZIO_TYPE_READ, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE |
ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY |
- ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW, NULL, NULL));
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_RAW | ZIO_FLAG_OPTIONAL,
+ NULL, NULL));
}
error = zio_wait(zio);
spa_config_exit(spa, SCL_STATE, FTAG);
if (error) {
(void) printf("Read of %s failed, error: %d\n", thing, error);
goto out;
}
if (flags & ZDB_FLAG_DECOMPRESS) {
/*
* We don't know how the data was compressed, so just try
* every decompress function at every inflated blocksize.
*/
enum zio_compress c;
void *lbuf2 = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
/*
* XXX - On the one hand, with SPA_MAXBLOCKSIZE at 16MB,
* this could take a while and we should let the user know
* we are not stuck. On the other hand, printing progress
* info gets old after a while. What to do?
*/
for (lsize = psize + SPA_MINBLOCKSIZE;
lsize <= SPA_MAXBLOCKSIZE; lsize += SPA_MINBLOCKSIZE) {
for (c = 0; c < ZIO_COMPRESS_FUNCTIONS; c++) {
/*
* ZLE can easily decompress non zle stream.
* So have an option to disable it.
*/
if (c == ZIO_COMPRESS_ZLE &&
getenv("ZDB_NO_ZLE"))
continue;
(void) fprintf(stderr,
"Trying %05llx -> %05llx (%s)\n",
(u_longlong_t)psize, (u_longlong_t)lsize,
zio_compress_table[c].ci_name);
/*
* We randomize lbuf2, and decompress to both
* lbuf and lbuf2. This way, we will know if
* decompression fill exactly to lsize.
*/
VERIFY0(random_get_pseudo_bytes(lbuf2, lsize));
if (zio_decompress_data(c, pabd,
lbuf, psize, lsize) == 0 &&
zio_decompress_data(c, pabd,
lbuf2, psize, lsize) == 0 &&
bcmp(lbuf, lbuf2, lsize) == 0)
break;
}
if (c != ZIO_COMPRESS_FUNCTIONS)
break;
}
umem_free(lbuf2, SPA_MAXBLOCKSIZE);
if (lsize > SPA_MAXBLOCKSIZE) {
(void) printf("Decompress of %s failed\n", thing);
goto out;
}
buf = lbuf;
size = lsize;
} else {
size = psize;
buf = abd_borrow_buf_copy(pabd, size);
borrowed = B_TRUE;
}
if (flags & ZDB_FLAG_PRINT_BLKPTR)
zdb_print_blkptr((blkptr_t *)(void *)
((uintptr_t)buf + (uintptr_t)blkptr_offset), flags);
else if (flags & ZDB_FLAG_RAW)
zdb_dump_block_raw(buf, size, flags);
else if (flags & ZDB_FLAG_INDIRECT)
zdb_dump_indirect((blkptr_t *)buf, size / sizeof (blkptr_t),
flags);
else if (flags & ZDB_FLAG_GBH)
zdb_dump_gbh(buf, flags);
else
zdb_dump_block(thing, buf, size, flags);
if (borrowed)
abd_return_buf_copy(pabd, buf, size);
out:
abd_free(pabd);
umem_free(lbuf, SPA_MAXBLOCKSIZE);
free(dup);
}
static void
zdb_embedded_block(char *thing)
{
blkptr_t bp;
unsigned long long *words = (void *)&bp;
char *buf;
int err;
buf = umem_alloc(SPA_MAXBLOCKSIZE, UMEM_NOFAIL);
bzero(&bp, sizeof (bp));
err = sscanf(thing, "%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx:"
"%llx:%llx:%llx:%llx:%llx:%llx:%llx:%llx",
words + 0, words + 1, words + 2, words + 3,
words + 4, words + 5, words + 6, words + 7,
words + 8, words + 9, words + 10, words + 11,
words + 12, words + 13, words + 14, words + 15);
if (err != 16) {
(void) printf("invalid input format\n");
exit(1);
}
ASSERT3U(BPE_GET_LSIZE(&bp), <=, SPA_MAXBLOCKSIZE);
err = decode_embedded_bp(&bp, buf, BPE_GET_LSIZE(&bp));
if (err != 0) {
(void) printf("decode failed: %u\n", err);
exit(1);
}
zdb_dump_block_raw(buf, BPE_GET_LSIZE(&bp), 0);
umem_free(buf, SPA_MAXBLOCKSIZE);
}
int
main(int argc, char **argv)
{
int c;
struct rlimit rl = { 1024, 1024 };
spa_t *spa = NULL;
objset_t *os = NULL;
int dump_all = 1;
int verbose = 0;
int error = 0;
char **searchdirs = NULL;
int nsearch = 0;
char *target, *target_pool;
nvlist_t *policy = NULL;
uint64_t max_txg = UINT64_MAX;
int flags = ZFS_IMPORT_MISSING_LOG;
int rewind = ZPOOL_NEVER_REWIND;
char *spa_config_path_env;
boolean_t target_is_spa = B_TRUE;
(void) setrlimit(RLIMIT_NOFILE, &rl);
(void) enable_extended_FILE_stdio(-1, -1);
dprintf_setup(&argc, argv);
/*
* If there is an environment variable SPA_CONFIG_PATH it overrides
* default spa_config_path setting. If -U flag is specified it will
* override this environment variable settings once again.
*/
spa_config_path_env = getenv("SPA_CONFIG_PATH");
if (spa_config_path_env != NULL)
spa_config_path = spa_config_path_env;
while ((c = getopt(argc, argv,
"AbcCdDeEFGhiI:lLmMo:Op:PqRsSt:uU:vVx:X")) != -1) {
switch (c) {
case 'b':
case 'c':
case 'C':
case 'd':
case 'D':
case 'E':
case 'G':
case 'h':
case 'i':
case 'l':
case 'm':
case 'M':
case 'O':
case 'R':
case 's':
case 'S':
case 'u':
dump_opt[c]++;
dump_all = 0;
break;
case 'A':
case 'e':
case 'F':
case 'L':
case 'P':
case 'q':
case 'X':
dump_opt[c]++;
break;
/* NB: Sort single match options below. */
case 'I':
max_inflight = strtoull(optarg, NULL, 0);
if (max_inflight == 0) {
(void) fprintf(stderr, "maximum number "
"of inflight I/Os must be greater "
"than 0\n");
usage();
}
break;
case 'o':
error = set_global_var(optarg);
if (error != 0)
usage();
break;
case 'p':
if (searchdirs == NULL) {
searchdirs = umem_alloc(sizeof (char *),
UMEM_NOFAIL);
} else {
char **tmp = umem_alloc((nsearch + 1) *
sizeof (char *), UMEM_NOFAIL);
bcopy(searchdirs, tmp, nsearch *
sizeof (char *));
umem_free(searchdirs,
nsearch * sizeof (char *));
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 't':
max_txg = strtoull(optarg, NULL, 0);
if (max_txg < TXG_INITIAL) {
(void) fprintf(stderr, "incorrect txg "
"specified: %s\n", optarg);
usage();
}
break;
case 'U':
spa_config_path = optarg;
if (spa_config_path[0] != '/') {
(void) fprintf(stderr,
"cachefile must be an absolute path "
"(i.e. start with a slash)\n");
usage();
}
break;
case 'v':
verbose++;
break;
case 'V':
flags = ZFS_IMPORT_VERBATIM;
break;
case 'x':
vn_dumpdir = optarg;
break;
default:
usage();
break;
}
}
if (!dump_opt['e'] && searchdirs != NULL) {
(void) fprintf(stderr, "-p option requires use of -e\n");
usage();
}
#if defined(_LP64)
/*
* ZDB does not typically re-read blocks; therefore limit the ARC
* to 256 MB, which can be used entirely for metadata.
*/
zfs_arc_max = zfs_arc_meta_limit = 256 * 1024 * 1024;
#endif
/*
* "zdb -c" uses checksum-verifying scrub i/os which are async reads.
* "zdb -b" uses traversal prefetch which uses async reads.
* For good performance, let several of them be active at once.
*/
zfs_vdev_async_read_max_active = 10;
/*
* Disable reference tracking for better performance.
*/
reference_tracking_enable = B_FALSE;
kernel_init(FREAD);
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s", libzfs_error_init(errno));
return (1);
}
if (dump_all)
verbose = MAX(verbose, 1);
for (c = 0; c < 256; c++) {
if (dump_all && strchr("AeEFlLOPRSX", c) == NULL)
dump_opt[c] = 1;
if (dump_opt[c])
dump_opt[c] += verbose;
}
aok = (dump_opt['A'] == 1) || (dump_opt['A'] > 2);
zfs_recover = (dump_opt['A'] > 1);
argc -= optind;
argv += optind;
if (argc < 2 && dump_opt['R'])
usage();
if (dump_opt['E']) {
if (argc != 1)
usage();
zdb_embedded_block(argv[0]);
return (0);
}
if (argc < 1) {
if (!dump_opt['e'] && dump_opt['C']) {
dump_cachefile(spa_config_path);
return (0);
}
usage();
}
if (dump_opt['l'])
return (dump_label(argv[0]));
if (dump_opt['O']) {
if (argc != 2)
usage();
dump_opt['v'] = verbose + 3;
return (dump_path(argv[0], argv[1]));
}
if (dump_opt['X'] || dump_opt['F'])
rewind = ZPOOL_DO_REWIND |
(dump_opt['X'] ? ZPOOL_EXTREME_REWIND : 0);
if (nvlist_alloc(&policy, NV_UNIQUE_NAME_TYPE, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, max_txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind) != 0)
fatal("internal error: %s", strerror(ENOMEM));
error = 0;
target = argv[0];
if (strpbrk(target, "/@") != NULL) {
size_t targetlen;
target_pool = strdup(target);
*strpbrk(target_pool, "/@") = '\0';
target_is_spa = B_FALSE;
targetlen = strlen(target);
if (targetlen && target[targetlen - 1] == '/')
target[targetlen - 1] = '\0';
} else {
target_pool = target;
}
if (dump_opt['e']) {
importargs_t args = { 0 };
nvlist_t *cfg = NULL;
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_TRUE;
error = zpool_tryimport(g_zfs, target_pool, &cfg, &args);
if (error == 0) {
if (nvlist_add_nvlist(cfg,
ZPOOL_REWIND_POLICY, policy) != 0) {
fatal("can't open '%s': %s",
target, strerror(ENOMEM));
}
/*
* Disable the activity check to allow examination of
* active pools.
*/
if (dump_opt['C'] > 1) {
(void) printf("\nConfiguration for import:\n");
dump_nvlist(cfg, 8);
}
error = spa_import(target_pool, cfg, NULL,
flags | ZFS_IMPORT_SKIP_MMP);
}
}
if (target_pool != target)
free(target_pool);
if (error == 0) {
if (target_is_spa || dump_opt['R']) {
/*
* Disable the activity check to allow examination of
* active pools.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL) {
spa->spa_import_flags |= ZFS_IMPORT_SKIP_MMP;
}
mutex_exit(&spa_namespace_lock);
error = spa_open_rewind(target, &spa, FTAG, policy,
NULL);
if (error) {
/*
* If we're missing the log device then
* try opening the pool after clearing the
* log state.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(target)) != NULL &&
spa->spa_log_state == SPA_LOG_MISSING) {
spa->spa_log_state = SPA_LOG_CLEAR;
error = 0;
}
mutex_exit(&spa_namespace_lock);
if (!error) {
error = spa_open_rewind(target, &spa,
FTAG, policy, NULL);
}
}
} else {
error = open_objset(target, DMU_OST_ANY, FTAG, &os);
if (error == 0)
spa = dmu_objset_spa(os);
}
}
nvlist_free(policy);
if (error)
fatal("can't open '%s': %s", target, strerror(error));
/*
* Set the pool failure mode to panic in order to prevent the pool
* from suspending. A suspended I/O will have no way to resume and
* can prevent the zdb(8) command from terminating as expected.
*/
if (spa != NULL)
spa->spa_failmode = ZIO_FAILURE_MODE_PANIC;
argv++;
argc--;
if (!dump_opt['R']) {
if (argc > 0) {
zopt_objects = argc;
zopt_object = calloc(zopt_objects, sizeof (uint64_t));
for (unsigned i = 0; i < zopt_objects; i++) {
errno = 0;
zopt_object[i] = strtoull(argv[i], NULL, 0);
if (zopt_object[i] == 0 && errno != 0)
fatal("bad number %s: %s",
argv[i], strerror(errno));
}
}
if (os != NULL) {
dump_dir(os);
} else if (zopt_objects > 0 && !dump_opt['m']) {
dump_dir(spa->spa_meta_objset);
} else {
dump_zpool(spa);
}
} else {
flagbits['b'] = ZDB_FLAG_PRINT_BLKPTR;
flagbits['c'] = ZDB_FLAG_CHECKSUM;
flagbits['d'] = ZDB_FLAG_DECOMPRESS;
flagbits['e'] = ZDB_FLAG_BSWAP;
flagbits['g'] = ZDB_FLAG_GBH;
flagbits['i'] = ZDB_FLAG_INDIRECT;
flagbits['p'] = ZDB_FLAG_PHYS;
flagbits['r'] = ZDB_FLAG_RAW;
for (int i = 0; i < argc; i++)
zdb_read_block(argv[i], spa);
}
if (os != NULL)
close_objset(os, FTAG);
else
spa_close(spa, FTAG);
fuid_table_destroy();
dump_debug_buffer();
libzfs_fini(g_zfs);
kernel_fini();
return (0);
}
diff --git a/cmd/zfs/zfs_main.c b/cmd/zfs/zfs_main.c
index 31376080bb67..45d9a661b0d5 100644
--- a/cmd/zfs/zfs_main.c
+++ b/cmd/zfs/zfs_main.c
@@ -1,7961 +1,7984 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright 2012 Milan Jurik. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2018 Datto Inc.
*/
#include <assert.h>
#include <ctype.h>
#include <errno.h>
#include <getopt.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <libnvpair.h>
#include <locale.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <fcntl.h>
#include <zone.h>
#include <grp.h>
#include <pwd.h>
#include <signal.h>
#include <sys/debug.h>
#include <sys/list.h>
#include <sys/mkdev.h>
#include <sys/mntent.h>
#include <sys/mnttab.h>
#include <sys/mount.h>
#include <sys/stat.h>
#include <sys/fs/zfs.h>
#include <sys/systeminfo.h>
#include <sys/types.h>
#include <time.h>
#include <sys/zfs_project.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include <zfs_prop.h>
#include <zfs_deleg.h>
#include <libuutil.h>
#ifdef HAVE_IDMAP
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include "zfs_iter.h"
#include "zfs_util.h"
#include "zfs_comutil.h"
#include "libzfs_impl.h"
#include "zfs_projectutil.h"
libzfs_handle_t *g_zfs;
static FILE *mnttab_file;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static int zfs_do_clone(int argc, char **argv);
static int zfs_do_create(int argc, char **argv);
static int zfs_do_destroy(int argc, char **argv);
static int zfs_do_get(int argc, char **argv);
static int zfs_do_inherit(int argc, char **argv);
static int zfs_do_list(int argc, char **argv);
static int zfs_do_mount(int argc, char **argv);
static int zfs_do_rename(int argc, char **argv);
static int zfs_do_rollback(int argc, char **argv);
static int zfs_do_set(int argc, char **argv);
static int zfs_do_upgrade(int argc, char **argv);
static int zfs_do_snapshot(int argc, char **argv);
static int zfs_do_unmount(int argc, char **argv);
static int zfs_do_share(int argc, char **argv);
static int zfs_do_unshare(int argc, char **argv);
static int zfs_do_send(int argc, char **argv);
static int zfs_do_receive(int argc, char **argv);
static int zfs_do_promote(int argc, char **argv);
static int zfs_do_userspace(int argc, char **argv);
static int zfs_do_allow(int argc, char **argv);
static int zfs_do_unallow(int argc, char **argv);
static int zfs_do_hold(int argc, char **argv);
static int zfs_do_holds(int argc, char **argv);
static int zfs_do_release(int argc, char **argv);
static int zfs_do_diff(int argc, char **argv);
static int zfs_do_bookmark(int argc, char **argv);
static int zfs_do_channel_program(int argc, char **argv);
+static int zfs_do_remap(int argc, char **argv);
static int zfs_do_load_key(int argc, char **argv);
static int zfs_do_unload_key(int argc, char **argv);
static int zfs_do_change_key(int argc, char **argv);
static int zfs_do_project(int argc, char **argv);
/*
* Enable a reasonable set of defaults for libumem debugging on DEBUG builds.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_CLONE,
HELP_CREATE,
HELP_DESTROY,
HELP_GET,
HELP_INHERIT,
HELP_UPGRADE,
HELP_LIST,
HELP_MOUNT,
HELP_PROMOTE,
HELP_RECEIVE,
HELP_RENAME,
HELP_ROLLBACK,
HELP_SEND,
HELP_SET,
HELP_SHARE,
HELP_SNAPSHOT,
HELP_UNMOUNT,
HELP_UNSHARE,
HELP_ALLOW,
HELP_UNALLOW,
HELP_USERSPACE,
HELP_GROUPSPACE,
HELP_PROJECTSPACE,
HELP_PROJECT,
HELP_HOLD,
HELP_HOLDS,
HELP_RELEASE,
HELP_DIFF,
+ HELP_REMAP,
HELP_BOOKMARK,
HELP_CHANNEL_PROGRAM,
HELP_LOAD_KEY,
HELP_UNLOAD_KEY,
HELP_CHANGE_KEY,
} zfs_help_t;
typedef struct zfs_command {
const char *name;
int (*func)(int argc, char **argv);
zfs_help_t usage;
} zfs_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zfs_command_t command_table[] = {
{ "create", zfs_do_create, HELP_CREATE },
{ "destroy", zfs_do_destroy, HELP_DESTROY },
{ NULL },
{ "snapshot", zfs_do_snapshot, HELP_SNAPSHOT },
{ "rollback", zfs_do_rollback, HELP_ROLLBACK },
{ "clone", zfs_do_clone, HELP_CLONE },
{ "promote", zfs_do_promote, HELP_PROMOTE },
{ "rename", zfs_do_rename, HELP_RENAME },
{ "bookmark", zfs_do_bookmark, HELP_BOOKMARK },
{ "program", zfs_do_channel_program, HELP_CHANNEL_PROGRAM },
{ NULL },
{ "list", zfs_do_list, HELP_LIST },
{ NULL },
{ "set", zfs_do_set, HELP_SET },
{ "get", zfs_do_get, HELP_GET },
{ "inherit", zfs_do_inherit, HELP_INHERIT },
{ "upgrade", zfs_do_upgrade, HELP_UPGRADE },
{ NULL },
{ "userspace", zfs_do_userspace, HELP_USERSPACE },
{ "groupspace", zfs_do_userspace, HELP_GROUPSPACE },
{ "projectspace", zfs_do_userspace, HELP_PROJECTSPACE },
{ NULL },
{ "project", zfs_do_project, HELP_PROJECT },
{ NULL },
{ "mount", zfs_do_mount, HELP_MOUNT },
{ "unmount", zfs_do_unmount, HELP_UNMOUNT },
{ "share", zfs_do_share, HELP_SHARE },
{ "unshare", zfs_do_unshare, HELP_UNSHARE },
{ NULL },
{ "send", zfs_do_send, HELP_SEND },
{ "receive", zfs_do_receive, HELP_RECEIVE },
{ NULL },
{ "allow", zfs_do_allow, HELP_ALLOW },
{ NULL },
{ "unallow", zfs_do_unallow, HELP_UNALLOW },
{ NULL },
{ "hold", zfs_do_hold, HELP_HOLD },
{ "holds", zfs_do_holds, HELP_HOLDS },
{ "release", zfs_do_release, HELP_RELEASE },
{ "diff", zfs_do_diff, HELP_DIFF },
+ { "remap", zfs_do_remap, HELP_REMAP },
{ "load-key", zfs_do_load_key, HELP_LOAD_KEY },
{ "unload-key", zfs_do_unload_key, HELP_UNLOAD_KEY },
{ "change-key", zfs_do_change_key, HELP_CHANGE_KEY },
};
#define NCOMMAND (sizeof (command_table) / sizeof (command_table[0]))
zfs_command_t *current_command;
static const char *
get_usage(zfs_help_t idx)
{
switch (idx) {
case HELP_CLONE:
return (gettext("\tclone [-p] [-o property=value] ... "
"<snapshot> <filesystem|volume>\n"));
case HELP_CREATE:
return (gettext("\tcreate [-p] [-o property=value] ... "
"<filesystem>\n"
"\tcreate [-ps] [-b blocksize] [-o property=value] ... "
"-V <size> <volume>\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-fnpRrv] <filesystem|volume>\n"
"\tdestroy [-dnpRrv] "
"<filesystem|volume>@<snap>[%<snap>][,...]\n"
"\tdestroy <filesystem|volume>#<bookmark>\n"));
case HELP_GET:
return (gettext("\tget [-rHp] [-d max] "
"[-o \"all\" | field[,...]]\n"
"\t [-t type[,...]] [-s source[,...]]\n"
"\t <\"all\" | property[,...]> "
"[filesystem|volume|snapshot|bookmark] ...\n"));
case HELP_INHERIT:
return (gettext("\tinherit [-rS] <property> "
"<filesystem|volume|snapshot> ...\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade [-v]\n"
"\tupgrade [-r] [-V version] <-a | filesystem ...>\n"));
case HELP_LIST:
return (gettext("\tlist [-Hp] [-r|-d max] [-o property[,...]] "
"[-s property]...\n\t [-S property]... [-t type[,...]] "
"[filesystem|volume|snapshot] ...\n"));
case HELP_MOUNT:
return (gettext("\tmount\n"
"\tmount [-lvO] [-o opts] <-a | filesystem>\n"));
case HELP_PROMOTE:
return (gettext("\tpromote <clone-filesystem>\n"));
case HELP_RECEIVE:
return (gettext("\treceive [-vnsFu] "
"[-o <property>=<value>] ... [-x <property>] ...\n"
"\t <filesystem|volume|snapshot>\n"
"\treceive [-vnsFu] [-o <property>=<value>] ... "
"[-x <property>] ... \n"
"\t [-d | -e] <filesystem>\n"
"\treceive -A <filesystem|volume>\n"));
case HELP_RENAME:
return (gettext("\trename [-f] <filesystem|volume|snapshot> "
"<filesystem|volume|snapshot>\n"
"\trename [-f] -p <filesystem|volume> <filesystem|volume>\n"
"\trename -r <snapshot> <snapshot>\n"));
case HELP_ROLLBACK:
return (gettext("\trollback [-rRf] <snapshot>\n"));
case HELP_SEND:
return (gettext("\tsend [-DnPpRvLecwb] [-[i|I] snapshot] "
"<snapshot>\n"
"\tsend [-nvPLecw] [-i snapshot|bookmark] "
"<filesystem|volume|snapshot>\n"
"\tsend [-nvPe] -t <receive_resume_token>\n"));
case HELP_SET:
return (gettext("\tset <property=value> ... "
"<filesystem|volume|snapshot> ...\n"));
case HELP_SHARE:
return (gettext("\tshare [-l] <-a [nfs|smb] | filesystem>\n"));
case HELP_SNAPSHOT:
return (gettext("\tsnapshot [-r] [-o property=value] ... "
"<filesystem|volume>@<snap> ...\n"));
case HELP_UNMOUNT:
return (gettext("\tunmount [-f] "
"<-a | filesystem|mountpoint>\n"));
case HELP_UNSHARE:
return (gettext("\tunshare "
"<-a [nfs|smb] | filesystem|mountpoint>\n"));
case HELP_ALLOW:
return (gettext("\tallow <filesystem|volume>\n"
"\tallow [-ldug] "
"<\"everyone\"|user|group>[,...] <perm|@setname>[,...]\n"
"\t <filesystem|volume>\n"
"\tallow [-ld] -e <perm|@setname>[,...] "
"<filesystem|volume>\n"
"\tallow -c <perm|@setname>[,...] <filesystem|volume>\n"
"\tallow -s @setname <perm|@setname>[,...] "
"<filesystem|volume>\n"));
case HELP_UNALLOW:
return (gettext("\tunallow [-rldug] "
"<\"everyone\"|user|group>[,...]\n"
"\t [<perm|@setname>[,...]] <filesystem|volume>\n"
"\tunallow [-rld] -e [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -c [<perm|@setname>[,...]] "
"<filesystem|volume>\n"
"\tunallow [-r] -s @setname [<perm|@setname>[,...]] "
"<filesystem|volume>\n"));
case HELP_USERSPACE:
return (gettext("\tuserspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot>\n"));
case HELP_GROUPSPACE:
return (gettext("\tgroupspace [-Hinp] [-o field[,...]] "
"[-s field] ...\n"
"\t [-S field] ... [-t type[,...]] "
"<filesystem|snapshot>\n"));
case HELP_PROJECTSPACE:
return (gettext("\tprojectspace [-Hp] [-o field[,...]] "
"[-s field] ... \n"
"\t [-S field] ... <filesystem|snapshot>\n"));
case HELP_PROJECT:
return (gettext("\tproject [-d|-r] <directory|file ...>\n"
"\tproject -c [-0] [-d|-r] [-p id] <directory|file ...>\n"
"\tproject -C [-k] [-r] <directory ...>\n"
"\tproject [-p id] [-r] [-s] <directory ...>\n"));
case HELP_HOLD:
return (gettext("\thold [-r] <tag> <snapshot> ...\n"));
case HELP_HOLDS:
return (gettext("\tholds [-r] <snapshot> ...\n"));
case HELP_RELEASE:
return (gettext("\trelease [-r] <tag> <snapshot> ...\n"));
case HELP_DIFF:
return (gettext("\tdiff [-FHt] <snapshot> "
"[snapshot|filesystem]\n"));
+ case HELP_REMAP:
+ return (gettext("\tremap <filesystem | volume>\n"));
case HELP_BOOKMARK:
return (gettext("\tbookmark <snapshot> <bookmark>\n"));
case HELP_CHANNEL_PROGRAM:
return (gettext("\tprogram [-jn] [-t <instruction limit>] "
"[-m <memory limit (b)>] <pool> <program file> "
"[lua args...]\n"));
case HELP_LOAD_KEY:
return (gettext("\tload-key [-rn] [-L <keylocation>] "
"<-a | filesystem|volume>\n"));
case HELP_UNLOAD_KEY:
return (gettext("\tunload-key [-r] "
"<-a | filesystem|volume>\n"));
case HELP_CHANGE_KEY:
return (gettext("\tchange-key [-l] [-o keyformat=<value>]\n"
"\t [-o keylocation=<value>] [-o pbkfd2iters=<value>]\n"
"\t <filesystem|volume>\n"
"\tchange-key -i [-l] <filesystem|volume>\n"));
}
abort();
/* NOTREACHED */
}
void
nomem(void)
{
(void) fprintf(stderr, gettext("internal error: out of memory\n"));
exit(1);
}
/*
* Utility function to guarantee malloc() success.
*/
void *
safe_malloc(size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
nomem();
return (data);
}
void *
safe_realloc(void *data, size_t size)
{
void *newp;
if ((newp = realloc(data, size)) == NULL) {
free(data);
nomem();
}
return (newp);
}
static char *
safe_strdup(char *str)
{
char *dupstr = strdup(str);
if (dupstr == NULL)
nomem();
return (dupstr);
}
/*
* Callback routine that will print out information for each of
* the properties.
*/
static int
usage_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-15s ", zfs_prop_to_name(prop));
if (zfs_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, "YES ");
if (zfs_prop_inheritable(prop))
(void) fprintf(fp, " YES ");
else
(void) fprintf(fp, " NO ");
if (zfs_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zfs_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
static void
usage(boolean_t requested)
{
int i;
boolean_t show_properties = B_FALSE;
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
(void) fprintf(fp, gettext("usage: zfs command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
(void) fprintf(fp, gettext("\nEach dataset is of the form: "
"pool/[dataset/]*dataset[@name]\n"));
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
(strcmp(current_command->name, "set") == 0 ||
strcmp(current_command->name, "get") == 0 ||
strcmp(current_command->name, "inherit") == 0 ||
strcmp(current_command->name, "list") == 0))
show_properties = B_TRUE;
if (show_properties) {
(void) fprintf(fp,
gettext("\nThe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-14s %s %s %s\n\n",
"PROPERTY", "EDIT", "INHERIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(usage_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_DATASET);
(void) fprintf(fp, "\t%-15s ", "userused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "groupobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "projectobjused@...");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, "\t%-15s ", "userquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "userobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "groupobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "projectobjquota@...");
(void) fprintf(fp, "YES NO <size> | none\n");
(void) fprintf(fp, "\t%-15s ", "written@<snap>");
(void) fprintf(fp, " NO NO <size>\n");
(void) fprintf(fp, gettext("\nSizes are specified in bytes "
"with standard units such as K, M, G, etc.\n"));
(void) fprintf(fp, gettext("\nUser-defined properties can "
"be specified by using a name containing a colon (:).\n"));
(void) fprintf(fp, gettext("\nThe {user|group|project}"
"[obj]{used|quota}@ properties must be appended with\n"
"a user|group|project specifier of one of these forms:\n"
" POSIX name (eg: \"matt\")\n"
" POSIX id (eg: \"126829\")\n"
" SMB name@domain (eg: \"matt@sun\")\n"
" SMB SID (eg: \"S-1-234-567-89\")\n"));
} else {
(void) fprintf(fp,
gettext("\nFor the property list, run: %s\n"),
"zfs set|get");
(void) fprintf(fp,
gettext("\nFor the delegated permission list, run: %s\n"),
"zfs allow|unallow");
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
/*
* Take a property=value argument string and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parseprop(nvlist_t *props, char *propname)
{
char *propval;
if ((propval = strchr(propname, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for property=value argument\n"));
return (B_FALSE);
}
*propval = '\0';
propval++;
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_string(props, propname, propval) != 0)
nomem();
return (B_TRUE);
}
/*
* Take a property name argument and add it to the given nvlist.
* Modifies the argument inplace.
*/
static boolean_t
parsepropname(nvlist_t *props, char *propname)
{
if (strchr(propname, '=') != NULL) {
(void) fprintf(stderr, gettext("invalid character "
"'=' in property argument\n"));
return (B_FALSE);
}
if (nvlist_exists(props, propname)) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (B_FALSE);
}
if (nvlist_add_boolean(props, propname) != 0)
nomem();
return (B_TRUE);
}
static int
parse_depth(char *opt, int *flags)
{
char *tmp;
int depth;
depth = (int)strtol(opt, &tmp, 0);
if (*tmp) {
(void) fprintf(stderr,
gettext("%s is not an integer\n"), optarg);
usage(B_FALSE);
}
if (depth < 0) {
(void) fprintf(stderr,
gettext("Depth can not be negative.\n"));
usage(B_FALSE);
}
*flags |= (ZFS_ITER_DEPTH_LIMIT|ZFS_ITER_RECURSE);
return (depth);
}
#define PROGRESS_DELAY 2 /* seconds */
static char *pt_reverse = "\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b\b";
static time_t pt_begin;
static char *pt_header = NULL;
static boolean_t pt_shown;
static void
start_progress_timer(void)
{
pt_begin = time(NULL) + PROGRESS_DELAY;
pt_shown = B_FALSE;
}
static void
set_progress_header(char *header)
{
assert(pt_header == NULL);
pt_header = safe_strdup(header);
if (pt_shown) {
(void) printf("%s: ", header);
(void) fflush(stdout);
}
}
static void
update_progress(char *update)
{
if (!pt_shown && time(NULL) > pt_begin) {
int len = strlen(update);
(void) printf("%s: %s%*.*s", pt_header, update, len, len,
pt_reverse);
(void) fflush(stdout);
pt_shown = B_TRUE;
} else if (pt_shown) {
int len = strlen(update);
(void) printf("%s%*.*s", update, len, len, pt_reverse);
(void) fflush(stdout);
}
}
static void
finish_progress(char *done)
{
if (pt_shown) {
(void) printf("%s\n", done);
(void) fflush(stdout);
}
free(pt_header);
pt_header = NULL;
}
static int
zfs_mount_and_share(libzfs_handle_t *hdl, const char *dataset, zfs_type_t type)
{
zfs_handle_t *zhp = NULL;
int ret = 0;
zhp = zfs_open(hdl, dataset, type);
if (zhp == NULL)
return (1);
/*
* Volumes may neither be mounted or shared. Potentially in the
* future filesystems detected on these volumes could be mounted.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_VOLUME) {
zfs_close(zhp);
return (0);
}
/*
* Mount and/or share the new filesystem as appropriate. We provide a
* verbose error message to let the user know that their filesystem was
* in fact created, even if we failed to mount or share it.
*
* If the user doesn't want the dataset automatically mounted, then
* skip the mount/share step
*/
if (zfs_prop_valid_for_type(ZFS_PROP_CANMOUNT, type, B_FALSE) &&
zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) == ZFS_CANMOUNT_ON) {
if (geteuid() != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but it may only be "
"mounted by root\n"));
ret = 1;
} else if (zfs_mount(zhp, NULL, 0) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not mounted\n"));
ret = 1;
} else if (zfs_share(zhp) != 0) {
(void) fprintf(stderr, gettext("filesystem "
"successfully created, but not shared\n"));
ret = 1;
}
}
zfs_close(zhp);
return (ret);
}
/*
* zfs clone [-p] [-o prop=value] ... <snap> <fs | vol>
*
* Given an existing dataset, create a writable copy whose initial contents
* are the same as the source. The newly created dataset maintains a
* dependency on the original; the original cannot be destroyed so long as
* the clone exists.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*/
static int
zfs_do_clone(int argc, char **argv)
{
zfs_handle_t *zhp = NULL;
boolean_t parents = B_FALSE;
nvlist_t *props;
int ret = 0;
int c;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "o:p")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
case 'p':
parents = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
goto usage;
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto usage;
}
/* open the source dataset */
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL) {
nvlist_free(props);
return (1);
}
if (parents && zfs_name_valid(argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
/*
* Now create the ancestors of the target dataset. If the
* target already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) {
zfs_close(zhp);
nvlist_free(props);
return (0);
}
if (zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
nvlist_free(props);
return (1);
}
}
/* pass to libzfs */
ret = zfs_clone(zhp, argv[1], props);
/* create the mountpoint if necessary */
if (ret == 0) {
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
ret = zfs_mount_and_share(g_zfs, argv[1], ZFS_TYPE_DATASET);
}
zfs_close(zhp);
nvlist_free(props);
return (!!ret);
usage:
ASSERT3P(zhp, ==, NULL);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* zfs create [-p] [-o prop=value] ... fs
* zfs create [-ps] [-b blocksize] [-o prop=value] ... -V vol size
*
* Create a new dataset. This command can be used to create filesystems
* and volumes. Snapshot creation is handled by 'zfs snapshot'.
* For volumes, the user must specify a size to be used.
*
* The '-s' flag applies only to volumes, and indicates that we should not try
* to set the reservation for this volume. By default we set a reservation
* equal to the size for any volume. For pools with SPA_VERSION >=
* SPA_VERSION_REFRESERVATION, we set a refreservation instead.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*/
static int
zfs_do_create(int argc, char **argv)
{
zfs_type_t type = ZFS_TYPE_FILESYSTEM;
uint64_t volsize = 0;
int c;
boolean_t noreserve = B_FALSE;
boolean_t bflag = B_FALSE;
boolean_t parents = B_FALSE;
int ret = 1;
nvlist_t *props;
uint64_t intval;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":V:b:so:p")) != -1) {
switch (c) {
case 'V':
type = ZFS_TYPE_VOLUME;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), intval) != 0)
nomem();
volsize = intval;
break;
case 'p':
parents = B_TRUE;
break;
case 'b':
bflag = B_TRUE;
if (zfs_nicestrtonum(g_zfs, optarg, &intval) != 0) {
(void) fprintf(stderr, gettext("bad volume "
"block size '%s': %s\n"), optarg,
libzfs_error_description(g_zfs));
goto error;
}
if (nvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
intval) != 0)
nomem();
break;
case 'o':
if (!parseprop(props, optarg))
goto error;
break;
case 's':
noreserve = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing size "
"argument\n"));
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
if ((bflag || noreserve) && type != ZFS_TYPE_VOLUME) {
(void) fprintf(stderr, gettext("'-s' and '-b' can only be "
"used when creating a volume\n"));
goto badusage;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing %s argument\n"),
zfs_type_to_name(type));
goto badusage;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
goto badusage;
}
if (type == ZFS_TYPE_VOLUME && !noreserve) {
zpool_handle_t *zpool_handle;
nvlist_t *real_props = NULL;
uint64_t spa_version;
char *p;
zfs_prop_t resv_prop;
char *strval;
char msg[1024];
if ((p = strchr(argv[0], '/')) != NULL)
*p = '\0';
zpool_handle = zpool_open(g_zfs, argv[0]);
if (p != NULL)
*p = '/';
if (zpool_handle == NULL)
goto error;
spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
if (spa_version >= SPA_VERSION_REFRESERVATION)
resv_prop = ZFS_PROP_REFRESERVATION;
else
resv_prop = ZFS_PROP_RESERVATION;
(void) snprintf(msg, sizeof (msg),
gettext("cannot create '%s'"), argv[0]);
if (props && (real_props = zfs_valid_proplist(g_zfs, type,
props, 0, NULL, zpool_handle, B_TRUE, msg)) == NULL) {
zpool_close(zpool_handle);
goto error;
}
zpool_close(zpool_handle);
volsize = zvol_volsize_to_reservation(volsize, real_props);
nvlist_free(real_props);
if (nvlist_lookup_string(props, zfs_prop_to_name(resv_prop),
&strval) != 0) {
if (nvlist_add_uint64(props,
zfs_prop_to_name(resv_prop), volsize) != 0) {
nvlist_free(props);
nomem();
}
}
}
if (parents && zfs_name_valid(argv[0], type)) {
/*
* Now create the ancestors of target dataset. If the target
* already exists and '-p' option was used we should not
* complain.
*/
if (zfs_dataset_exists(g_zfs, argv[0], type)) {
ret = 0;
goto error;
}
if (zfs_create_ancestors(g_zfs, argv[0]) != 0)
goto error;
}
/* pass to libzfs */
if (zfs_create(g_zfs, argv[0], type, props) != 0)
goto error;
if (log_history) {
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
ret = zfs_mount_and_share(g_zfs, argv[0], ZFS_TYPE_DATASET);
error:
nvlist_free(props);
return (ret);
badusage:
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zfs destroy [-rRf] <fs, vol>
* zfs destroy [-rRd] <snap>
*
* -r Recursively destroy all children
* -R Recursively destroy all dependents, including clones
* -f Force unmounting of any dependents
* -d If we can't destroy now, mark for deferred destruction
*
* Destroys the given dataset. By default, it will unmount any filesystems,
* and refuse to destroy a dataset that has any dependents. A dependent can
* either be a child, or a clone of a child.
*/
typedef struct destroy_cbdata {
boolean_t cb_first;
boolean_t cb_force;
boolean_t cb_recurse;
boolean_t cb_error;
boolean_t cb_doclones;
zfs_handle_t *cb_target;
boolean_t cb_defer_destroy;
boolean_t cb_verbose;
boolean_t cb_parsable;
boolean_t cb_dryrun;
nvlist_t *cb_nvl;
nvlist_t *cb_batchedsnaps;
/* first snap in contiguous run */
char *cb_firstsnap;
/* previous snap in contiguous run */
char *cb_prevsnap;
int64_t cb_snapused;
char *cb_snapspec;
char *cb_bookmark;
uint64_t cb_snap_count;
} destroy_cbdata_t;
/*
* Check for any dependents based on the '-r' or '-R' flags.
*/
static int
destroy_check_dependent(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cbp = data;
const char *tname = zfs_get_name(cbp->cb_target);
const char *name = zfs_get_name(zhp);
if (strncmp(tname, name, strlen(tname)) == 0 &&
(name[strlen(tname)] == '/' || name[strlen(tname)] == '@')) {
/*
* This is a direct descendant, not a clone somewhere else in
* the hierarchy.
*/
if (cbp->cb_recurse)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has children\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-r' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
} else {
/*
* This is a clone. We only want to report this if the '-r'
* wasn't specified, or the target is a snapshot.
*/
if (!cbp->cb_recurse &&
zfs_get_type(cbp->cb_target) != ZFS_TYPE_SNAPSHOT)
goto out;
if (cbp->cb_first) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"%s has dependent clones\n"),
zfs_get_name(cbp->cb_target),
zfs_type_to_name(zfs_get_type(cbp->cb_target)));
(void) fprintf(stderr, gettext("use '-R' to destroy "
"the following datasets:\n"));
cbp->cb_first = B_FALSE;
cbp->cb_error = B_TRUE;
cbp->cb_dryrun = B_TRUE;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
}
out:
zfs_close(zhp);
return (0);
}
static int
destroy_batched(destroy_cbdata_t *cb)
{
int error = zfs_destroy_snaps_nvl(g_zfs,
cb->cb_batchedsnaps, B_FALSE);
fnvlist_free(cb->cb_batchedsnaps);
cb->cb_batchedsnaps = fnvlist_alloc();
return (error);
}
static int
destroy_callback(zfs_handle_t *zhp, void *data)
{
destroy_cbdata_t *cb = data;
const char *name = zfs_get_name(zhp);
int error;
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
/*
* Ignore pools (which we've already flagged as an error before getting
* here).
*/
if (strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
zfs_close(zhp);
return (0);
}
if (cb->cb_dryrun) {
zfs_close(zhp);
return (0);
}
/*
* We batch up all contiguous snapshots (even of different
* filesystems) and destroy them with one ioctl. We can't
* simply do all snap deletions and then all fs deletions,
* because we must delete a clone before its origin.
*/
if (zfs_get_type(zhp) == ZFS_TYPE_SNAPSHOT) {
cb->cb_snap_count++;
fnvlist_add_boolean(cb->cb_batchedsnaps, name);
if (cb->cb_snap_count % 10 == 0 && cb->cb_defer_destroy)
error = destroy_batched(cb);
} else {
error = destroy_batched(cb);
if (error != 0 ||
zfs_unmount(zhp, NULL, cb->cb_force ? MS_FORCE : 0) != 0 ||
zfs_destroy(zhp, cb->cb_defer_destroy) != 0) {
zfs_close(zhp);
return (-1);
}
}
zfs_close(zhp);
return (0);
}
static int
destroy_print_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
const char *name = zfs_get_name(zhp);
int err = 0;
if (nvlist_exists(cb->cb_nvl, name)) {
if (cb->cb_firstsnap == NULL)
cb->cb_firstsnap = strdup(name);
if (cb->cb_prevsnap != NULL)
free(cb->cb_prevsnap);
/* this snap continues the current range */
cb->cb_prevsnap = strdup(name);
if (cb->cb_firstsnap == NULL || cb->cb_prevsnap == NULL)
nomem();
if (cb->cb_verbose) {
if (cb->cb_parsable) {
(void) printf("destroy\t%s\n", name);
} else if (cb->cb_dryrun) {
(void) printf(gettext("would destroy %s\n"),
name);
} else {
(void) printf(gettext("will destroy %s\n"),
name);
}
}
} else if (cb->cb_firstsnap != NULL) {
/* end of this range */
uint64_t used = 0;
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
zfs_close(zhp);
return (err);
}
static int
destroy_print_snapshots(zfs_handle_t *fs_zhp, destroy_cbdata_t *cb)
{
int err;
assert(cb->cb_firstsnap == NULL);
assert(cb->cb_prevsnap == NULL);
err = zfs_iter_snapshots_sorted(fs_zhp, destroy_print_cb, cb);
if (cb->cb_firstsnap != NULL) {
uint64_t used = 0;
if (err == 0) {
err = lzc_snaprange_space(cb->cb_firstsnap,
cb->cb_prevsnap, &used);
}
cb->cb_snapused += used;
free(cb->cb_firstsnap);
cb->cb_firstsnap = NULL;
free(cb->cb_prevsnap);
cb->cb_prevsnap = NULL;
}
return (err);
}
static int
snapshot_to_nvl_cb(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
/* Check for clones. */
if (!cb->cb_doclones && !cb->cb_defer_destroy) {
cb->cb_target = zhp;
cb->cb_first = B_TRUE;
err = zfs_iter_dependents(zhp, B_TRUE,
destroy_check_dependent, cb);
}
if (err == 0) {
if (nvlist_add_boolean(cb->cb_nvl, zfs_get_name(zhp)))
nomem();
}
zfs_close(zhp);
return (err);
}
static int
gather_snapshots(zfs_handle_t *zhp, void *arg)
{
destroy_cbdata_t *cb = arg;
int err = 0;
err = zfs_iter_snapspec(zhp, cb->cb_snapspec, snapshot_to_nvl_cb, cb);
if (err == ENOENT)
err = 0;
if (err != 0)
goto out;
if (cb->cb_verbose) {
err = destroy_print_snapshots(zhp, cb);
if (err != 0)
goto out;
}
if (cb->cb_recurse)
err = zfs_iter_filesystems(zhp, gather_snapshots, cb);
out:
zfs_close(zhp);
return (err);
}
static int
destroy_clones(destroy_cbdata_t *cb)
{
nvpair_t *pair;
for (pair = nvlist_next_nvpair(cb->cb_nvl, NULL);
pair != NULL;
pair = nvlist_next_nvpair(cb->cb_nvl, pair)) {
zfs_handle_t *zhp = zfs_open(g_zfs, nvpair_name(pair),
ZFS_TYPE_SNAPSHOT);
if (zhp != NULL) {
boolean_t defer = cb->cb_defer_destroy;
int err;
/*
* We can't defer destroy non-snapshots, so set it to
* false while destroying the clones.
*/
cb->cb_defer_destroy = B_FALSE;
err = zfs_iter_dependents(zhp, B_FALSE,
destroy_callback, cb);
cb->cb_defer_destroy = defer;
zfs_close(zhp);
if (err != 0)
return (err);
}
}
return (0);
}
static int
zfs_do_destroy(int argc, char **argv)
{
destroy_cbdata_t cb = { 0 };
int rv = 0;
int err = 0;
int c;
zfs_handle_t *zhp = NULL;
char *at, *pound;
zfs_type_t type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, "vpndfrR")) != -1) {
switch (c) {
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'p':
cb.cb_verbose = B_TRUE;
cb.cb_parsable = B_TRUE;
break;
case 'n':
cb.cb_dryrun = B_TRUE;
break;
case 'd':
cb.cb_defer_destroy = B_TRUE;
type = ZFS_TYPE_SNAPSHOT;
break;
case 'f':
cb.cb_force = B_TRUE;
break;
case 'r':
cb.cb_recurse = B_TRUE;
break;
case 'R':
cb.cb_recurse = B_TRUE;
cb.cb_doclones = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc == 0) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
at = strchr(argv[0], '@');
pound = strchr(argv[0], '#');
if (at != NULL) {
/* Build the list of snaps to destroy in cb_nvl. */
cb.cb_nvl = fnvlist_alloc();
*at = '\0';
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(cb.cb_nvl);
return (1);
}
cb.cb_snapspec = at + 1;
if (gather_snapshots(zfs_handle_dup(zhp), &cb) != 0 ||
cb.cb_error) {
rv = 1;
goto out;
}
if (nvlist_empty(cb.cb_nvl)) {
(void) fprintf(stderr, gettext("could not find any "
"snapshots to destroy; check snapshot names.\n"));
rv = 1;
goto out;
}
if (cb.cb_verbose) {
char buf[16];
zfs_nicebytes(cb.cb_snapused, buf, sizeof (buf));
if (cb.cb_parsable) {
(void) printf("reclaim\t%llu\n",
(u_longlong_t)cb.cb_snapused);
} else if (cb.cb_dryrun) {
(void) printf(gettext("would reclaim %s\n"),
buf);
} else {
(void) printf(gettext("will reclaim %s\n"),
buf);
}
}
if (!cb.cb_dryrun) {
if (cb.cb_doclones) {
cb.cb_batchedsnaps = fnvlist_alloc();
err = destroy_clones(&cb);
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, B_FALSE);
}
if (err != 0) {
rv = 1;
goto out;
}
}
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs, cb.cb_nvl,
cb.cb_defer_destroy);
}
}
if (err != 0)
rv = 1;
} else if (pound != NULL) {
int err;
nvlist_t *nvl;
if (cb.cb_dryrun) {
(void) fprintf(stderr,
"dryrun is not supported with bookmark\n");
return (-1);
}
if (cb.cb_defer_destroy) {
(void) fprintf(stderr,
"defer destroy is not supported with bookmark\n");
return (-1);
}
if (cb.cb_recurse) {
(void) fprintf(stderr,
"recursive is not supported with bookmark\n");
return (-1);
}
if (!zfs_bookmark_exists(argv[0])) {
(void) fprintf(stderr, gettext("bookmark '%s' "
"does not exist.\n"), argv[0]);
return (1);
}
nvl = fnvlist_alloc();
fnvlist_add_boolean(nvl, argv[0]);
err = lzc_destroy_bookmarks(nvl, NULL);
if (err != 0) {
(void) zfs_standard_error(g_zfs, err,
"cannot destroy bookmark");
}
nvlist_free(nvl);
return (err);
} else {
/* Open the given dataset */
if ((zhp = zfs_open(g_zfs, argv[0], type)) == NULL)
return (1);
cb.cb_target = zhp;
/*
* Perform an explicit check for pools before going any further.
*/
if (!cb.cb_recurse && strchr(zfs_get_name(zhp), '/') == NULL &&
zfs_get_type(zhp) == ZFS_TYPE_FILESYSTEM) {
(void) fprintf(stderr, gettext("cannot destroy '%s': "
"operation does not apply to pools\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zfs destroy -r "
"%s' to destroy all datasets in the pool\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use 'zpool destroy %s' "
"to destroy the pool itself\n"), zfs_get_name(zhp));
rv = 1;
goto out;
}
/*
* Check for any dependents and/or clones.
*/
cb.cb_first = B_TRUE;
if (!cb.cb_doclones &&
zfs_iter_dependents(zhp, B_TRUE, destroy_check_dependent,
&cb) != 0) {
rv = 1;
goto out;
}
if (cb.cb_error) {
rv = 1;
goto out;
}
cb.cb_batchedsnaps = fnvlist_alloc();
if (zfs_iter_dependents(zhp, B_FALSE, destroy_callback,
&cb) != 0) {
rv = 1;
goto out;
}
/*
* Do the real thing. The callback will close the
* handle regardless of whether it succeeds or not.
*/
err = destroy_callback(zhp, &cb);
zhp = NULL;
if (err == 0) {
err = zfs_destroy_snaps_nvl(g_zfs,
cb.cb_batchedsnaps, cb.cb_defer_destroy);
}
if (err != 0)
rv = 1;
}
out:
fnvlist_free(cb.cb_batchedsnaps);
fnvlist_free(cb.cb_nvl);
if (zhp != NULL)
zfs_close(zhp);
return (rv);
}
static boolean_t
is_recvd_column(zprop_get_cbdata_t *cbp)
{
int i;
zfs_get_column_t col;
for (i = 0; i < ZFS_GET_NCOLS &&
(col = cbp->cb_columns[i]) != GET_COL_NONE; i++)
if (col == GET_COL_RECVD)
return (B_TRUE);
return (B_FALSE);
}
/*
* zfs get [-rHp] [-o all | field[,field]...] [-s source[,source]...]
* < all | property[,property]... > < fs | snap | vol > ...
*
* -r recurse over any child datasets
* -H scripted mode. Headers are stripped, and fields are separated
* by tabs instead of spaces.
* -o Set of fields to display. One of "name,property,value,
* received,source". Default is "name,property,value,source".
* "all" is an alias for all five.
* -s Set of sources to allow. One of
* "local,default,inherited,received,temporary,none". Default is
* all six.
* -p Display values in parsable (literal) format.
*
* Prints properties for the given datasets. The user can control which
* columns to display as well as which property types to allow.
*/
/*
* Invoked to display the properties for a single dataset.
*/
static int
get_callback(zfs_handle_t *zhp, void *data)
{
char buf[ZFS_MAXPROPLEN];
char rbuf[ZFS_MAXPROPLEN];
zprop_source_t sourcetype;
char source[ZFS_MAX_DATASET_NAME_LEN];
zprop_get_cbdata_t *cbp = data;
nvlist_t *user_props = zfs_get_user_props(zhp);
zprop_list_t *pl = cbp->cb_proplist;
nvlist_t *propval;
char *strval;
char *sourceval;
boolean_t received = is_recvd_column(cbp);
for (; pl != NULL; pl = pl->pl_next) {
char *recvdval = NULL;
/*
* Skip the special fake placeholder. This will also skip over
* the name property when 'all' is specified.
*/
if (pl->pl_prop == ZFS_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, pl->pl_prop, buf,
sizeof (buf), &sourcetype, source,
sizeof (source),
cbp->cb_literal) != 0) {
if (pl->pl_all)
continue;
if (!zfs_prop_valid_for_type(pl->pl_prop,
ZFS_TYPE_DATASET, B_FALSE)) {
(void) fprintf(stderr,
gettext("No such property '%s'\n"),
zfs_prop_to_name(pl->pl_prop));
continue;
}
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
if (received && (zfs_prop_get_recvd(zhp,
zfs_prop_to_name(pl->pl_prop), rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
zfs_prop_to_name(pl->pl_prop),
buf, sourcetype, source, recvdval);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else if (zfs_prop_written(pl->pl_user_prop)) {
sourcetype = ZPROP_SRC_LOCAL;
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
buf, sizeof (buf), cbp->cb_literal) != 0) {
sourcetype = ZPROP_SRC_NONE;
(void) strlcpy(buf, "-", sizeof (buf));
}
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, buf, sourcetype, source, NULL);
} else {
if (nvlist_lookup_nvlist(user_props,
pl->pl_user_prop, &propval) != 0) {
if (pl->pl_all)
continue;
sourcetype = ZPROP_SRC_NONE;
strval = "-";
} else {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
verify(nvlist_lookup_string(propval,
ZPROP_SOURCE, &sourceval) == 0);
if (strcmp(sourceval,
zfs_get_name(zhp)) == 0) {
sourcetype = ZPROP_SRC_LOCAL;
} else if (strcmp(sourceval,
ZPROP_SOURCE_VAL_RECVD) == 0) {
sourcetype = ZPROP_SRC_RECEIVED;
} else {
sourcetype = ZPROP_SRC_INHERITED;
(void) strlcpy(source,
sourceval, sizeof (source));
}
}
if (received && (zfs_prop_get_recvd(zhp,
pl->pl_user_prop, rbuf, sizeof (rbuf),
cbp->cb_literal) == 0))
recvdval = rbuf;
zprop_print_one_property(zfs_get_name(zhp), cbp,
pl->pl_user_prop, strval, sourcetype,
source, recvdval);
}
}
return (0);
}
static int
zfs_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
int i, c, flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
int types = ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK;
char *value, *fields;
int ret = 0;
int limit = 0;
zprop_list_t fake_name = { 0 };
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_DATASET;
/* check options */
while ((c = getopt(argc, argv, ":d:o:s:rt:Hp")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'o':
/*
* Process the set of columns to display. We zero out
* the structure to give us a blank slate.
*/
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
{ "name", "property", "value", "received",
"source", "all", NULL };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
switch (getsubopt(&optarg, col_subopts,
&value)) {
case 0:
cb.cb_columns[i++] = GET_COL_NAME;
break;
case 1:
cb.cb_columns[i++] = GET_COL_PROPERTY;
break;
case 2:
cb.cb_columns[i++] = GET_COL_VALUE;
break;
case 3:
cb.cb_columns[i++] = GET_COL_RECVD;
flags |= ZFS_ITER_RECVD_PROPS;
break;
case 4:
cb.cb_columns[i++] = GET_COL_SOURCE;
break;
case 5:
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_RECVD;
cb.cb_columns[4] = GET_COL_SOURCE;
flags |= ZFS_ITER_RECVD_PROPS;
i = ZFS_GET_NCOLS;
break;
default:
(void) fprintf(stderr,
gettext("invalid column name "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case 's':
cb.cb_sources = 0;
while (*optarg != '\0') {
static char *source_subopts[] = {
"local", "default", "inherited",
"received", "temporary", "none",
NULL };
switch (getsubopt(&optarg, source_subopts,
&value)) {
case 0:
cb.cb_sources |= ZPROP_SRC_LOCAL;
break;
case 1:
cb.cb_sources |= ZPROP_SRC_DEFAULT;
break;
case 2:
cb.cb_sources |= ZPROP_SRC_INHERITED;
break;
case 3:
cb.cb_sources |= ZPROP_SRC_RECEIVED;
break;
case 4:
cb.cb_sources |= ZPROP_SRC_TEMPORARY;
break;
case 5:
cb.cb_sources |= ZPROP_SRC_NONE;
break;
default:
(void) fprintf(stderr,
gettext("invalid source "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case 't':
types = 0;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
while (*optarg != '\0') {
static char *type_subopts[] = { "filesystem",
"volume", "snapshot", "bookmark",
"all", NULL };
switch (getsubopt(&optarg, type_subopts,
&value)) {
case 0:
types |= ZFS_TYPE_FILESYSTEM;
break;
case 1:
types |= ZFS_TYPE_VOLUME;
break;
case 2:
types |= ZFS_TYPE_SNAPSHOT;
break;
case 3:
types |= ZFS_TYPE_BOOKMARK;
break;
case 4:
types = ZFS_TYPE_DATASET |
ZFS_TYPE_BOOKMARK;
break;
default:
(void) fprintf(stderr,
gettext("invalid type '%s'\n"),
value);
usage(B_FALSE);
}
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
fields = argv[0];
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
argc--;
argv++;
/*
* As part of zfs_expand_proplist(), we keep track of the maximum column
* width for each property. For the 'NAME' (and 'SOURCE') columns, we
* need to know the maximum name length. However, the user likely did
* not specify 'name' as one of the properties to fetch, so we need to
* make sure we always include at least this property for
* print_get_headers() to work properly.
*/
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZFS_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
cb.cb_first = B_TRUE;
/* run for each object */
ret = zfs_for_each(argc, argv, flags, types, NULL,
&cb.cb_proplist, limit, get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
/*
* inherit [-rS] <property> <fs|vol> ...
*
* -r Recurse over all children
* -S Revert to received value, if any
*
* For each dataset specified on the command line, inherit the given property
* from its parent. Inheriting a property at the pool level will cause it to
* use the default value. The '-r' flag will recurse over all children, and is
* useful for setting a property on a hierarchy-wide basis, regardless of any
* local modifications for each dataset.
*/
typedef struct inherit_cbdata {
const char *cb_propname;
boolean_t cb_received;
} inherit_cbdata_t;
static int
inherit_recurse_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
zfs_prop_t prop = zfs_name_to_prop(cb->cb_propname);
/*
* If we're doing it recursively, then ignore properties that
* are not valid for this type of dataset.
*/
if (prop != ZPROP_INVAL &&
!zfs_prop_valid_for_type(prop, zfs_get_type(zhp), B_FALSE))
return (0);
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
inherit_cb(zfs_handle_t *zhp, void *data)
{
inherit_cbdata_t *cb = data;
return (zfs_prop_inherit(zhp, cb->cb_propname, cb->cb_received) != 0);
}
static int
zfs_do_inherit(int argc, char **argv)
{
int c;
zfs_prop_t prop;
inherit_cbdata_t cb = { 0 };
char *propname;
int ret = 0;
int flags = 0;
boolean_t received = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "rS")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'S':
received = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
propname = argv[0];
argc--;
argv++;
if ((prop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
if (zfs_prop_readonly(prop)) {
(void) fprintf(stderr, gettext(
"%s property is read-only\n"),
propname);
return (1);
}
if (!zfs_prop_inheritable(prop) && !received) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be inherited\n"), propname);
if (prop == ZFS_PROP_QUOTA ||
prop == ZFS_PROP_RESERVATION ||
prop == ZFS_PROP_REFQUOTA ||
prop == ZFS_PROP_REFRESERVATION) {
(void) fprintf(stderr, gettext("use 'zfs set "
"%s=none' to clear\n"), propname);
(void) fprintf(stderr, gettext("use 'zfs "
"inherit -S %s' to revert to received "
"value\n"), propname);
}
return (1);
}
if (received && (prop == ZFS_PROP_VOLSIZE ||
prop == ZFS_PROP_VERSION)) {
(void) fprintf(stderr, gettext("'%s' property cannot "
"be reverted to a received value\n"), propname);
return (1);
}
} else if (!zfs_prop_user(propname)) {
(void) fprintf(stderr, gettext("invalid property '%s'\n"),
propname);
usage(B_FALSE);
}
cb.cb_propname = propname;
cb.cb_received = received;
if (flags & ZFS_ITER_RECURSE) {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_recurse_cb, &cb);
} else {
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_DATASET,
NULL, NULL, 0, inherit_cb, &cb);
}
return (ret);
}
typedef struct upgrade_cbdata {
uint64_t cb_numupgraded;
uint64_t cb_numsamegraded;
uint64_t cb_numfailed;
uint64_t cb_version;
boolean_t cb_newer;
boolean_t cb_foundone;
char cb_lastfs[ZFS_MAX_DATASET_NAME_LEN];
} upgrade_cbdata_t;
static int
same_pool(zfs_handle_t *zhp, const char *name)
{
int len1 = strcspn(name, "/@");
const char *zhname = zfs_get_name(zhp);
int len2 = strcspn(zhname, "/@");
if (len1 != len2)
return (B_FALSE);
return (strncmp(name, zhname, len1) == 0);
}
static int
upgrade_list_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
/* list if it's old/new */
if ((!cb->cb_newer && version < ZPL_VERSION) ||
(cb->cb_newer && version > ZPL_VERSION)) {
char *str;
if (cb->cb_newer) {
str = gettext("The following filesystems are "
"formatted using a newer software version and\n"
"cannot be accessed on the current system.\n\n");
} else {
str = gettext("The following filesystems are "
"out of date, and can be upgraded. After being\n"
"upgraded, these filesystems (and any 'zfs send' "
"streams generated from\n"
"subsequent snapshots) will no longer be "
"accessible by older software versions.\n\n");
}
if (!cb->cb_foundone) {
(void) puts(str);
(void) printf(gettext("VER FILESYSTEM\n"));
(void) printf(gettext("--- ------------\n"));
cb->cb_foundone = B_TRUE;
}
(void) printf("%2u %s\n", version, zfs_get_name(zhp));
}
return (0);
}
static int
upgrade_set_callback(zfs_handle_t *zhp, void *data)
{
upgrade_cbdata_t *cb = data;
int version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int needed_spa_version;
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
needed_spa_version = zfs_spa_version_map(cb->cb_version);
if (needed_spa_version < 0)
return (-1);
if (spa_version < needed_spa_version) {
/* can't upgrade */
(void) printf(gettext("%s: can not be "
"upgraded; the pool version needs to first "
"be upgraded\nto version %d\n\n"),
zfs_get_name(zhp), needed_spa_version);
cb->cb_numfailed++;
return (0);
}
/* upgrade */
if (version < cb->cb_version) {
char verstr[16];
(void) snprintf(verstr, sizeof (verstr),
"%llu", (u_longlong_t)cb->cb_version);
if (cb->cb_lastfs[0] && !same_pool(zhp, cb->cb_lastfs)) {
/*
* If they did "zfs upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (zfs_prop_set(zhp, "version", verstr) == 0)
cb->cb_numupgraded++;
else
cb->cb_numfailed++;
(void) strcpy(cb->cb_lastfs, zfs_get_name(zhp));
} else if (version > cb->cb_version) {
/* can't downgrade */
(void) printf(gettext("%s: can not be downgraded; "
"it is already at version %u\n"),
zfs_get_name(zhp), version);
cb->cb_numfailed++;
} else {
cb->cb_numsamegraded++;
}
return (0);
}
/*
* zfs upgrade
* zfs upgrade -v
* zfs upgrade [-r] [-V <version>] <-a | filesystem>
*/
static int
zfs_do_upgrade(int argc, char **argv)
{
boolean_t all = B_FALSE;
boolean_t showversions = B_FALSE;
int ret = 0;
upgrade_cbdata_t cb = { 0 };
signed char c;
int flags = ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "rvV:a")) != -1) {
switch (c) {
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
if (zfs_prop_string_to_index(ZFS_PROP_VERSION,
optarg, &cb.cb_version) != 0) {
(void) fprintf(stderr,
gettext("invalid version %s\n"), optarg);
usage(B_FALSE);
}
break;
case 'a':
all = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if ((!all && !argc) && ((flags & ZFS_ITER_RECURSE) | cb.cb_version))
usage(B_FALSE);
if (showversions && (flags & ZFS_ITER_RECURSE || all ||
cb.cb_version || argc))
usage(B_FALSE);
if ((all || argc) && (showversions))
usage(B_FALSE);
if (all && argc)
usage(B_FALSE);
if (showversions) {
/* Show info on available versions. */
(void) printf(gettext("The following filesystem versions are "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS filesystem version\n"));
(void) printf(gettext(" 2 Enhanced directory entries\n"));
(void) printf(gettext(" 3 Case insensitive and filesystem "
"user identifier (FUID)\n"));
(void) printf(gettext(" 4 userquota, groupquota "
"properties\n"));
(void) printf(gettext(" 5 System attributes\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf("see the ZFS Administration Guide.\n\n");
ret = 0;
} else if (argc || all) {
/* Upgrade filesystems */
if (cb.cb_version == 0)
cb.cb_version = ZPL_VERSION;
ret = zfs_for_each(argc, argv, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_set_callback, &cb);
(void) printf(gettext("%llu filesystems upgraded\n"),
(u_longlong_t)cb.cb_numupgraded);
if (cb.cb_numsamegraded) {
(void) printf(gettext("%llu filesystems already at "
"this version\n"),
(u_longlong_t)cb.cb_numsamegraded);
}
if (cb.cb_numfailed != 0)
ret = 1;
} else {
/* List old-version filesystems */
boolean_t found;
(void) printf(gettext("This system is currently running "
"ZFS filesystem version %llu.\n\n"), ZPL_VERSION);
flags |= ZFS_ITER_RECURSE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
found = cb.cb_foundone;
cb.cb_foundone = B_FALSE;
cb.cb_newer = B_TRUE;
ret = zfs_for_each(0, NULL, flags, ZFS_TYPE_FILESYSTEM,
NULL, NULL, 0, upgrade_list_callback, &cb);
if (!cb.cb_foundone && !found) {
(void) printf(gettext("All filesystems are "
"formatted with the current version.\n"));
}
}
return (ret);
}
/*
* zfs userspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]] filesystem | snapshot
* zfs groupspace [-Hinp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] [-t type[,...]] filesystem | snapshot
* zfs projectspace [-Hp] [-o field[,...]] [-s field [-s field]...]
* [-S field [-S field]...] filesystem | snapshot
*
* -H Scripted mode; elide headers and separate columns by tabs.
* -i Translate SID to POSIX ID.
* -n Print numeric ID instead of user/group name.
* -o Control which fields to display.
* -p Use exact (parsable) numeric output.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* Displays space consumed by, and quotas on, each user in the specified
* filesystem or snapshot.
*/
/* us_field_types, us_field_hdr and us_field_names should be kept in sync */
enum us_field_types {
USFIELD_TYPE,
USFIELD_NAME,
USFIELD_USED,
USFIELD_QUOTA,
USFIELD_OBJUSED,
USFIELD_OBJQUOTA
};
static char *us_field_hdr[] = { "TYPE", "NAME", "USED", "QUOTA",
"OBJUSED", "OBJQUOTA" };
static char *us_field_names[] = { "type", "name", "used", "quota",
"objused", "objquota" };
#define USFIELD_LAST (sizeof (us_field_names) / sizeof (char *))
#define USTYPE_PSX_GRP (1 << 0)
#define USTYPE_PSX_USR (1 << 1)
#define USTYPE_SMB_GRP (1 << 2)
#define USTYPE_SMB_USR (1 << 3)
#define USTYPE_PROJ (1 << 4)
#define USTYPE_ALL \
(USTYPE_PSX_GRP | USTYPE_PSX_USR | USTYPE_SMB_GRP | USTYPE_SMB_USR | \
USTYPE_PROJ)
static int us_type_bits[] = {
USTYPE_PSX_GRP,
USTYPE_PSX_USR,
USTYPE_SMB_GRP,
USTYPE_SMB_USR,
USTYPE_ALL
};
static char *us_type_names[] = { "posixgroup", "posixuser", "smbgroup",
"smbuser", "all" };
typedef struct us_node {
nvlist_t *usn_nvl;
uu_avl_node_t usn_avlnode;
uu_list_node_t usn_listnode;
} us_node_t;
typedef struct us_cbdata {
nvlist_t **cb_nvlp;
uu_avl_pool_t *cb_avl_pool;
uu_avl_t *cb_avl;
boolean_t cb_numname;
boolean_t cb_nicenum;
boolean_t cb_sid2posix;
zfs_userquota_prop_t cb_prop;
zfs_sort_column_t *cb_sortcol;
size_t cb_width[USFIELD_LAST];
} us_cbdata_t;
static boolean_t us_populated = B_FALSE;
typedef struct {
zfs_sort_column_t *si_sortcol;
boolean_t si_numname;
} us_sort_info_t;
static int
us_field_index(char *field)
{
int i;
for (i = 0; i < USFIELD_LAST; i++) {
if (strcmp(field, us_field_names[i]) == 0)
return (i);
}
return (-1);
}
static int
us_compare(const void *larg, const void *rarg, void *unused)
{
const us_node_t *l = larg;
const us_node_t *r = rarg;
us_sort_info_t *si = (us_sort_info_t *)unused;
zfs_sort_column_t *sortcol = si->si_sortcol;
boolean_t numname = si->si_numname;
nvlist_t *lnvl = l->usn_nvl;
nvlist_t *rnvl = r->usn_nvl;
int rc = 0;
boolean_t lvb, rvb;
for (; sortcol != NULL; sortcol = sortcol->sc_next) {
char *lvstr = "";
char *rvstr = "";
uint32_t lv32 = 0;
uint32_t rv32 = 0;
uint64_t lv64 = 0;
uint64_t rv64 = 0;
zfs_prop_t prop = sortcol->sc_prop;
const char *propname = NULL;
boolean_t reverse = sortcol->sc_reverse;
switch (prop) {
case ZFS_PROP_TYPE:
propname = "type";
(void) nvlist_lookup_uint32(lnvl, propname, &lv32);
(void) nvlist_lookup_uint32(rnvl, propname, &rv32);
if (rv32 != lv32)
rc = (rv32 < lv32) ? 1 : -1;
break;
case ZFS_PROP_NAME:
propname = "name";
if (numname) {
compare_nums:
(void) nvlist_lookup_uint64(lnvl, propname,
&lv64);
(void) nvlist_lookup_uint64(rnvl, propname,
&rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
} else {
if ((nvlist_lookup_string(lnvl, propname,
&lvstr) == ENOENT) ||
(nvlist_lookup_string(rnvl, propname,
&rvstr) == ENOENT)) {
goto compare_nums;
}
rc = strcmp(lvstr, rvstr);
}
break;
case ZFS_PROP_USED:
case ZFS_PROP_QUOTA:
if (!us_populated)
break;
if (prop == ZFS_PROP_USED)
propname = "used";
else
propname = "quota";
(void) nvlist_lookup_uint64(lnvl, propname, &lv64);
(void) nvlist_lookup_uint64(rnvl, propname, &rv64);
if (rv64 != lv64)
rc = (rv64 < lv64) ? 1 : -1;
break;
default:
break;
}
if (rc != 0) {
if (rc < 0)
return (reverse ? 1 : -1);
else
return (reverse ? -1 : 1);
}
}
/*
* If entries still seem to be the same, check if they are of the same
* type (smbentity is added only if we are doing SID to POSIX ID
* translation where we can have duplicate type/name combinations).
*/
if (nvlist_lookup_boolean_value(lnvl, "smbentity", &lvb) == 0 &&
nvlist_lookup_boolean_value(rnvl, "smbentity", &rvb) == 0 &&
lvb != rvb)
return (lvb < rvb ? -1 : 1);
return (0);
}
static boolean_t
zfs_prop_is_user(unsigned p)
{
return (p == ZFS_PROP_USERUSED || p == ZFS_PROP_USERQUOTA ||
p == ZFS_PROP_USEROBJUSED || p == ZFS_PROP_USEROBJQUOTA);
}
static boolean_t
zfs_prop_is_group(unsigned p)
{
return (p == ZFS_PROP_GROUPUSED || p == ZFS_PROP_GROUPQUOTA ||
p == ZFS_PROP_GROUPOBJUSED || p == ZFS_PROP_GROUPOBJQUOTA);
}
static boolean_t
zfs_prop_is_project(unsigned p)
{
return (p == ZFS_PROP_PROJECTUSED || p == ZFS_PROP_PROJECTQUOTA ||
p == ZFS_PROP_PROJECTOBJUSED || p == ZFS_PROP_PROJECTOBJQUOTA);
}
static inline const char *
us_type2str(unsigned field_type)
{
switch (field_type) {
case USTYPE_PSX_USR:
return ("POSIX User");
case USTYPE_PSX_GRP:
return ("POSIX Group");
case USTYPE_SMB_USR:
return ("SMB User");
case USTYPE_SMB_GRP:
return ("SMB Group");
case USTYPE_PROJ:
return ("Project");
default:
return ("Undefined");
}
}
static int
userspace_cb(void *arg, const char *domain, uid_t rid, uint64_t space)
{
us_cbdata_t *cb = (us_cbdata_t *)arg;
zfs_userquota_prop_t prop = cb->cb_prop;
char *name = NULL;
char *propname;
char sizebuf[32];
us_node_t *node;
uu_avl_pool_t *avl_pool = cb->cb_avl_pool;
uu_avl_t *avl = cb->cb_avl;
uu_avl_index_t idx;
nvlist_t *props;
us_node_t *n;
zfs_sort_column_t *sortcol = cb->cb_sortcol;
unsigned type = 0;
const char *typestr;
size_t namelen;
size_t typelen;
size_t sizelen;
int typeidx, nameidx, sizeidx;
us_sort_info_t sortinfo = { sortcol, cb->cb_numname };
boolean_t smbentity = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
node = safe_malloc(sizeof (us_node_t));
uu_avl_node_init(node, &node->usn_avlnode, avl_pool);
node->usn_nvl = props;
if (domain != NULL && domain[0] != '\0') {
#ifdef HAVE_IDMAP
/* SMB */
char sid[MAXNAMELEN + 32];
uid_t id;
uint64_t classes;
int err;
directory_error_t e;
smbentity = B_TRUE;
(void) snprintf(sid, sizeof (sid), "%s-%u", domain, rid);
if (prop == ZFS_PROP_GROUPUSED || prop == ZFS_PROP_GROUPQUOTA) {
type = USTYPE_SMB_GRP;
err = sid_to_id(sid, B_FALSE, &id);
} else {
type = USTYPE_SMB_USR;
err = sid_to_id(sid, B_TRUE, &id);
}
if (err == 0) {
rid = id;
if (!cb->cb_sid2posix) {
e = directory_name_from_sid(NULL, sid, &name,
&classes);
if (e != NULL)
directory_error_free(e);
if (name == NULL)
name = sid;
}
}
#else
nvlist_free(props);
free(node);
return (-1);
#endif /* HAVE_IDMAP */
}
if (cb->cb_sid2posix || domain == NULL || domain[0] == '\0') {
/* POSIX or -i */
if (zfs_prop_is_group(prop)) {
type = USTYPE_PSX_GRP;
if (!cb->cb_numname) {
struct group *g;
if ((g = getgrgid(rid)) != NULL)
name = g->gr_name;
}
} else if (zfs_prop_is_user(prop)) {
type = USTYPE_PSX_USR;
if (!cb->cb_numname) {
struct passwd *p;
if ((p = getpwuid(rid)) != NULL)
name = p->pw_name;
}
} else {
type = USTYPE_PROJ;
}
}
/*
* Make sure that the type/name combination is unique when doing
* SID to POSIX ID translation (hence changing the type from SMB to
* POSIX).
*/
if (cb->cb_sid2posix &&
nvlist_add_boolean_value(props, "smbentity", smbentity) != 0)
nomem();
/* Calculate/update width of TYPE field */
typestr = us_type2str(type);
typelen = strlen(gettext(typestr));
typeidx = us_field_index("type");
if (typelen > cb->cb_width[typeidx])
cb->cb_width[typeidx] = typelen;
if (nvlist_add_uint32(props, "type", type) != 0)
nomem();
/* Calculate/update width of NAME field */
if ((cb->cb_numname && cb->cb_sid2posix) || name == NULL) {
if (nvlist_add_uint64(props, "name", rid) != 0)
nomem();
namelen = snprintf(NULL, 0, "%u", rid);
} else {
if (nvlist_add_string(props, "name", name) != 0)
nomem();
namelen = strlen(name);
}
nameidx = us_field_index("name");
if (nameidx >= 0 && namelen > cb->cb_width[nameidx])
cb->cb_width[nameidx] = namelen;
/*
* Check if this type/name combination is in the list and update it;
* otherwise add new node to the list.
*/
if ((n = uu_avl_find(avl, node, &sortinfo, &idx)) == NULL) {
uu_avl_insert(avl, node, idx);
} else {
nvlist_free(props);
free(node);
node = n;
props = node->usn_nvl;
}
/* Calculate/update width of USED/QUOTA fields */
if (cb->cb_nicenum) {
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTUSED ||
prop == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(space, sizebuf, sizeof (sizebuf));
} else {
zfs_nicenum(space, sizebuf, sizeof (sizebuf));
}
} else {
(void) snprintf(sizebuf, sizeof (sizebuf), "%llu",
(u_longlong_t)space);
}
sizelen = strlen(sizebuf);
if (prop == ZFS_PROP_USERUSED || prop == ZFS_PROP_GROUPUSED ||
prop == ZFS_PROP_PROJECTUSED) {
propname = "used";
if (!nvlist_exists(props, "quota"))
(void) nvlist_add_uint64(props, "quota", 0);
} else if (prop == ZFS_PROP_USERQUOTA || prop == ZFS_PROP_GROUPQUOTA ||
prop == ZFS_PROP_PROJECTQUOTA) {
propname = "quota";
if (!nvlist_exists(props, "used"))
(void) nvlist_add_uint64(props, "used", 0);
} else if (prop == ZFS_PROP_USEROBJUSED ||
prop == ZFS_PROP_GROUPOBJUSED || prop == ZFS_PROP_PROJECTOBJUSED) {
propname = "objused";
if (!nvlist_exists(props, "objquota"))
(void) nvlist_add_uint64(props, "objquota", 0);
} else if (prop == ZFS_PROP_USEROBJQUOTA ||
prop == ZFS_PROP_GROUPOBJQUOTA ||
prop == ZFS_PROP_PROJECTOBJQUOTA) {
propname = "objquota";
if (!nvlist_exists(props, "objused"))
(void) nvlist_add_uint64(props, "objused", 0);
} else {
return (-1);
}
sizeidx = us_field_index(propname);
if (sizeidx >= 0 && sizelen > cb->cb_width[sizeidx])
cb->cb_width[sizeidx] = sizelen;
if (nvlist_add_uint64(props, propname, space) != 0)
nomem();
return (0);
}
static void
print_us_node(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, us_node_t *node)
{
nvlist_t *nvl = node->usn_nvl;
char valstr[MAXNAMELEN];
boolean_t first = B_TRUE;
int cfield = 0;
int field;
uint32_t ustype;
/* Check type */
(void) nvlist_lookup_uint32(nvl, "type", &ustype);
if (!(ustype & types))
return;
while ((field = fields[cfield]) != USFIELD_LAST) {
nvpair_t *nvp = NULL;
data_type_t type;
uint32_t val32;
uint64_t val64;
char *strval = "-";
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
if (strcmp(nvpair_name(nvp),
us_field_names[field]) == 0)
break;
}
type = nvp == NULL ? DATA_TYPE_UNKNOWN : nvpair_type(nvp);
switch (type) {
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &val32);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &val64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &strval);
break;
case DATA_TYPE_UNKNOWN:
break;
default:
(void) fprintf(stderr, "invalid data type\n");
}
switch (field) {
case USFIELD_TYPE:
if (type == DATA_TYPE_UINT32)
strval = (char *)us_type2str(val32);
break;
case USFIELD_NAME:
if (type == DATA_TYPE_UINT64) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
}
break;
case USFIELD_USED:
case USFIELD_QUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_QUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicebytes(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
case USFIELD_OBJUSED:
case USFIELD_OBJQUOTA:
if (type == DATA_TYPE_UINT64) {
if (parsable) {
(void) sprintf(valstr, "%llu",
(u_longlong_t)val64);
strval = valstr;
} else if (field == USFIELD_OBJQUOTA &&
val64 == 0) {
strval = "none";
} else {
zfs_nicenum(val64, valstr,
sizeof (valstr));
strval = valstr;
}
}
break;
}
if (!first) {
if (scripted)
(void) printf("\t");
else
(void) printf(" ");
}
if (scripted)
(void) printf("%s", strval);
else if (field == USFIELD_TYPE || field == USFIELD_NAME)
(void) printf("%-*s", (int)width[field], strval);
else
(void) printf("%*s", (int)width[field], strval);
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
static void
print_us(boolean_t scripted, boolean_t parsable, int *fields, int types,
size_t *width, boolean_t rmnode, uu_avl_t *avl)
{
us_node_t *node;
const char *col;
int cfield = 0;
int field;
if (!scripted) {
boolean_t first = B_TRUE;
while ((field = fields[cfield]) != USFIELD_LAST) {
col = gettext(us_field_hdr[field]);
if (field == USFIELD_TYPE || field == USFIELD_NAME) {
(void) printf(first ? "%-*s" : " %-*s",
(int)width[field], col);
} else {
(void) printf(first ? "%*s" : " %*s",
(int)width[field], col);
}
first = B_FALSE;
cfield++;
}
(void) printf("\n");
}
for (node = uu_avl_first(avl); node; node = uu_avl_next(avl, node)) {
print_us_node(scripted, parsable, fields, types, width, node);
if (rmnode)
nvlist_free(node->usn_nvl);
}
}
static int
zfs_do_userspace(int argc, char **argv)
{
zfs_handle_t *zhp;
zfs_userquota_prop_t p;
uu_avl_pool_t *avl_pool;
uu_avl_t *avl_tree;
uu_avl_walk_t *walk;
char *delim;
char deffields[] = "type,name,used,quota,objused,objquota";
char *ofield = NULL;
char *tfield = NULL;
int cfield = 0;
int fields[256];
int i;
boolean_t scripted = B_FALSE;
boolean_t prtnum = B_FALSE;
boolean_t parsable = B_FALSE;
boolean_t sid2posix = B_FALSE;
int ret = 0;
int c;
zfs_sort_column_t *sortcol = NULL;
int types = USTYPE_PSX_USR | USTYPE_SMB_USR;
us_cbdata_t cb;
us_node_t *node;
us_node_t *rmnode;
uu_list_pool_t *listpool;
uu_list_t *list;
uu_avl_index_t idx = 0;
uu_list_index_t idx2 = 0;
if (argc < 2)
usage(B_FALSE);
if (strcmp(argv[0], "groupspace") == 0) {
/* Toggle default group types */
types = USTYPE_PSX_GRP | USTYPE_SMB_GRP;
} else if (strcmp(argv[0], "projectspace") == 0) {
types = USTYPE_PROJ;
prtnum = B_TRUE;
}
while ((c = getopt(argc, argv, "nHpo:s:S:t:i")) != -1) {
switch (c) {
case 'n':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'n'\n"));
usage(B_FALSE);
}
prtnum = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'o':
ofield = optarg;
break;
case 's':
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
c == 's' ? B_FALSE : B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid field '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 't'\n"));
usage(B_FALSE);
}
tfield = optarg;
break;
case 'i':
if (types == USTYPE_PROJ) {
(void) fprintf(stderr,
gettext("invalid option 'i'\n"));
usage(B_FALSE);
}
sid2posix = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* Use default output fields if not specified using -o */
if (ofield == NULL)
ofield = deffields;
do {
if ((delim = strchr(ofield, ',')) != NULL)
*delim = '\0';
if ((fields[cfield++] = us_field_index(ofield)) == -1) {
(void) fprintf(stderr, gettext("invalid type '%s' "
"for -o option\n"), ofield);
return (-1);
}
if (delim != NULL)
ofield = delim + 1;
} while (delim != NULL);
fields[cfield] = USFIELD_LAST;
/* Override output types (-t option) */
if (tfield != NULL) {
types = 0;
do {
boolean_t found = B_FALSE;
if ((delim = strchr(tfield, ',')) != NULL)
*delim = '\0';
for (i = 0; i < sizeof (us_type_bits) / sizeof (int);
i++) {
if (strcmp(tfield, us_type_names[i]) == 0) {
found = B_TRUE;
types |= us_type_bits[i];
break;
}
}
if (!found) {
(void) fprintf(stderr, gettext("invalid type "
"'%s' for -t option\n"), tfield);
return (-1);
}
if (delim != NULL)
tfield = delim + 1;
} while (delim != NULL);
}
if ((zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET)) == NULL)
return (1);
if ((avl_pool = uu_avl_pool_create("us_avl_pool", sizeof (us_node_t),
offsetof(us_node_t, usn_avlnode), us_compare, UU_DEFAULT)) == NULL)
nomem();
if ((avl_tree = uu_avl_create(avl_pool, NULL, UU_DEFAULT)) == NULL)
nomem();
/* Always add default sorting columns */
(void) zfs_add_sort_column(&sortcol, "type", B_FALSE);
(void) zfs_add_sort_column(&sortcol, "name", B_FALSE);
cb.cb_sortcol = sortcol;
cb.cb_numname = prtnum;
cb.cb_nicenum = !parsable;
cb.cb_avl_pool = avl_pool;
cb.cb_avl = avl_tree;
cb.cb_sid2posix = sid2posix;
for (i = 0; i < USFIELD_LAST; i++)
cb.cb_width[i] = strlen(gettext(us_field_hdr[i]));
for (p = 0; p < ZFS_NUM_USERQUOTA_PROPS; p++) {
if ((zfs_prop_is_user(p) &&
!(types & (USTYPE_PSX_USR | USTYPE_SMB_USR))) ||
(zfs_prop_is_group(p) &&
!(types & (USTYPE_PSX_GRP | USTYPE_SMB_GRP))) ||
(zfs_prop_is_project(p) && types != USTYPE_PROJ))
continue;
cb.cb_prop = p;
if ((ret = zfs_userspace(zhp, p, userspace_cb, &cb)) != 0)
return (ret);
}
/* Sort the list */
if ((node = uu_avl_first(avl_tree)) == NULL)
return (0);
us_populated = B_TRUE;
listpool = uu_list_pool_create("tmplist", sizeof (us_node_t),
offsetof(us_node_t, usn_listnode), NULL, UU_DEFAULT);
list = uu_list_create(listpool, NULL, UU_DEFAULT);
uu_list_node_init(node, &node->usn_listnode, listpool);
while (node != NULL) {
rmnode = node;
node = uu_avl_next(avl_tree, node);
uu_avl_remove(avl_tree, rmnode);
if (uu_list_find(list, rmnode, NULL, &idx2) == NULL)
uu_list_insert(list, rmnode, idx2);
}
for (node = uu_list_first(list); node != NULL;
node = uu_list_next(list, node)) {
us_sort_info_t sortinfo = { sortcol, cb.cb_numname };
if (uu_avl_find(avl_tree, node, &sortinfo, &idx) == NULL)
uu_avl_insert(avl_tree, node, idx);
}
uu_list_destroy(list);
uu_list_pool_destroy(listpool);
/* Print and free node nvlist memory */
print_us(scripted, parsable, fields, types, cb.cb_width, B_TRUE,
cb.cb_avl);
zfs_free_sort_columns(sortcol);
/* Clean up the AVL tree */
if ((walk = uu_avl_walk_start(cb.cb_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
uu_avl_remove(cb.cb_avl, node);
free(node);
}
uu_avl_walk_end(walk);
uu_avl_destroy(avl_tree);
uu_avl_pool_destroy(avl_pool);
return (ret);
}
/*
* list [-Hp][-r|-d max] [-o property[,...]] [-s property] ... [-S property]
* [-t type[,...]] [filesystem|volume|snapshot] ...
*
* -H Scripted mode; elide headers and separate columns by tabs
* -p Display values in parsable (literal) format.
* -r Recurse over all children
* -d Limit recursion by depth.
* -o Control which fields to display.
* -s Specify sort columns, descending order.
* -S Specify sort columns, ascending order.
* -t Control which object types to display.
*
* When given no arguments, list all filesystems in the system.
* Otherwise, list the specified datasets, optionally recursing down them if
* '-r' is specified.
*/
typedef struct list_cbdata {
boolean_t cb_first;
boolean_t cb_literal;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZFS_MAXPROPLEN];
const char *header;
int i;
boolean_t first = B_TRUE;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
header = zfs_prop_column_name(pl->pl_prop);
right_justify = zfs_prop_align_right(pl->pl_prop);
} else {
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, header);
else
(void) printf("%-*s", (int)pl->pl_width, header);
}
(void) printf("\n");
}
/*
* Given a dataset and a list of fields, print out all the properties according
* to the described layout.
*/
static void
print_dataset(zfs_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZFS_MAXPROPLEN];
nvlist_t *userprops = zfs_get_user_props(zhp);
nvlist_t *propval;
char *propstr;
boolean_t right_justify;
for (; pl != NULL; pl = pl->pl_next) {
if (!first) {
if (cb->cb_scripted)
(void) printf("\t");
else
(void) printf(" ");
} else {
first = B_FALSE;
}
if (pl->pl_prop == ZFS_PROP_NAME) {
(void) strlcpy(property, zfs_get_name(zhp),
sizeof (property));
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (pl->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, pl->pl_prop, property,
sizeof (property), NULL, NULL, 0,
cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zfs_prop_align_right(pl->pl_prop);
} else if (zfs_prop_userquota(pl->pl_user_prop)) {
if (zfs_prop_get_userquota(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else if (zfs_prop_written(pl->pl_user_prop)) {
if (zfs_prop_get_written(zhp, pl->pl_user_prop,
property, sizeof (property), cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = B_TRUE;
} else {
if (nvlist_lookup_nvlist(userprops,
pl->pl_user_prop, &propval) != 0)
propstr = "-";
else
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &propstr) == 0);
right_justify = B_FALSE;
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) printf("%s", propstr);
else if (right_justify)
(void) printf("%*s", (int)pl->pl_width, propstr);
else
(void) printf("%-*s", (int)pl->pl_width, propstr);
}
(void) printf("\n");
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
list_callback(zfs_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
if (cbp->cb_first) {
if (!cbp->cb_scripted)
print_header(cbp);
cbp->cb_first = B_FALSE;
}
print_dataset(zhp, cbp);
return (0);
}
static int
zfs_do_list(int argc, char **argv)
{
int c;
static char default_fields[] =
"name,used,available,referenced,mountpoint";
int types = ZFS_TYPE_DATASET;
boolean_t types_specified = B_FALSE;
char *fields = NULL;
list_cbdata_t cb = { 0 };
char *value;
int limit = 0;
int ret = 0;
zfs_sort_column_t *sortcol = NULL;
int flags = ZFS_ITER_PROP_LISTSNAPS | ZFS_ITER_ARGS_CAN_BE_PATHS;
/* check options */
while ((c = getopt(argc, argv, "HS:d:o:prs:t:")) != -1) {
switch (c) {
case 'o':
fields = optarg;
break;
case 'p':
cb.cb_literal = B_TRUE;
flags |= ZFS_ITER_LITERAL_PROPS;
break;
case 'd':
limit = parse_depth(optarg, &flags);
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 's':
if (zfs_add_sort_column(&sortcol, optarg,
B_FALSE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 'S':
if (zfs_add_sort_column(&sortcol, optarg,
B_TRUE) != 0) {
(void) fprintf(stderr,
gettext("invalid property '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case 't':
types = 0;
types_specified = B_TRUE;
flags &= ~ZFS_ITER_PROP_LISTSNAPS;
while (*optarg != '\0') {
static char *type_subopts[] = { "filesystem",
"volume", "snapshot", "snap", "bookmark",
"all", NULL };
switch (getsubopt(&optarg, type_subopts,
&value)) {
case 0:
types |= ZFS_TYPE_FILESYSTEM;
break;
case 1:
types |= ZFS_TYPE_VOLUME;
break;
case 2:
case 3:
types |= ZFS_TYPE_SNAPSHOT;
break;
case 4:
types |= ZFS_TYPE_BOOKMARK;
break;
case 5:
types = ZFS_TYPE_DATASET |
ZFS_TYPE_BOOKMARK;
break;
default:
(void) fprintf(stderr,
gettext("invalid type '%s'\n"),
value);
usage(B_FALSE);
}
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (fields == NULL)
fields = default_fields;
/*
* If we are only going to list snapshot names and sort by name,
* then we can use faster version.
*/
if (strcmp(fields, "name") == 0 && zfs_sort_only_by_name(sortcol))
flags |= ZFS_ITER_SIMPLE;
/*
* If "-o space" and no types were specified, don't display snapshots.
*/
if (strcmp(fields, "space") == 0 && types_specified == B_FALSE)
types &= ~ZFS_TYPE_SNAPSHOT;
/*
* If the user specifies '-o all', the zprop_get_list() doesn't
* normally include the name of the dataset. For 'zfs list', we always
* want this property to be first.
*/
if (zprop_get_list(g_zfs, fields, &cb.cb_proplist, ZFS_TYPE_DATASET)
!= 0)
usage(B_FALSE);
cb.cb_first = B_TRUE;
ret = zfs_for_each(argc, argv, flags, types, sortcol, &cb.cb_proplist,
limit, list_callback, &cb);
zprop_free_list(cb.cb_proplist);
zfs_free_sort_columns(sortcol);
if (ret == 0 && cb.cb_first && !cb.cb_scripted)
(void) fprintf(stderr, gettext("no datasets available\n"));
return (ret);
}
/*
* zfs rename [-f] <fs | snap | vol> <fs | snap | vol>
* zfs rename [-f] -p <fs | vol> <fs | vol>
* zfs rename -r <snap> <snap>
*
* Renames the given dataset to another of the same type.
*
* The '-p' flag creates all the non-existing ancestors of the target first.
*/
/* ARGSUSED */
static int
zfs_do_rename(int argc, char **argv)
{
zfs_handle_t *zhp;
int c;
int ret = 0;
boolean_t recurse = B_FALSE;
boolean_t parents = B_FALSE;
boolean_t force_unmount = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "prf")) != -1) {
switch (c) {
case 'p':
parents = B_TRUE;
break;
case 'r':
recurse = B_TRUE;
break;
case 'f':
force_unmount = B_TRUE;
break;
case '?':
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing source dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing target dataset "
"argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (recurse && parents) {
(void) fprintf(stderr, gettext("-p and -r options are mutually "
"exclusive\n"));
usage(B_FALSE);
}
if (recurse && strchr(argv[0], '@') == 0) {
(void) fprintf(stderr, gettext("source dataset for recursive "
"rename must be a snapshot\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0], parents ? ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME : ZFS_TYPE_DATASET)) == NULL)
return (1);
/* If we were asked and the name looks good, try to create ancestors. */
if (parents && zfs_name_valid(argv[1], zfs_get_type(zhp)) &&
zfs_create_ancestors(g_zfs, argv[1]) != 0) {
zfs_close(zhp);
return (1);
}
ret = (zfs_rename(zhp, argv[1], recurse, force_unmount) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs promote <fs>
*
* Promotes the given clone fs to be the parent
*/
/* ARGSUSED */
static int
zfs_do_promote(int argc, char **argv)
{
zfs_handle_t *zhp;
int ret = 0;
/* check options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing clone filesystem"
" argument\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[1], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
ret = (zfs_promote(zhp) != 0);
zfs_close(zhp);
return (ret);
}
/*
* zfs rollback [-rRf] <snapshot>
*
* -r Delete any intervening snapshots before doing rollback
* -R Delete any snapshots and their clones
* -f ignored for backwards compatibility
*
* Given a filesystem, rollback to a specific snapshot, discarding any changes
* since then and making it the active dataset. If more recent snapshots exist,
* the command will complain unless the '-r' flag is given.
*/
typedef struct rollback_cbdata {
uint64_t cb_create;
boolean_t cb_first;
int cb_doclones;
char *cb_target;
int cb_error;
boolean_t cb_recurse;
} rollback_cbdata_t;
static int
rollback_check_dependent(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
if (cbp->cb_first && cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot rollback to "
"'%s': clones of previous snapshots exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-R' to "
"force deletion of the following clones and "
"dependents:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
(void) fprintf(stderr, "%s\n", zfs_get_name(zhp));
zfs_close(zhp);
return (0);
}
/*
* Report any snapshots more recent than the one specified. Used when '-r' is
* not specified. We reuse this same callback for the snapshot dependents - if
* 'cb_dependent' is set, then this is a dependent and we should report it
* without checking the transaction group.
*/
static int
rollback_check(zfs_handle_t *zhp, void *data)
{
rollback_cbdata_t *cbp = data;
if (cbp->cb_doclones) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
if (cbp->cb_first && !cbp->cb_recurse) {
(void) fprintf(stderr, gettext("cannot "
"rollback to '%s': more recent snapshots "
"or bookmarks exist\n"),
cbp->cb_target);
(void) fprintf(stderr, gettext("use '-r' to "
"force deletion of the following "
"snapshots and bookmarks:\n"));
cbp->cb_first = 0;
cbp->cb_error = 1;
}
if (cbp->cb_recurse) {
if (zfs_iter_dependents(zhp, B_TRUE,
rollback_check_dependent, cbp) != 0) {
zfs_close(zhp);
return (-1);
}
} else {
(void) fprintf(stderr, "%s\n",
zfs_get_name(zhp));
}
}
zfs_close(zhp);
return (0);
}
static int
zfs_do_rollback(int argc, char **argv)
{
int ret = 0;
int c;
boolean_t force = B_FALSE;
rollback_cbdata_t cb = { 0 };
zfs_handle_t *zhp, *snap;
char parentname[ZFS_MAX_DATASET_NAME_LEN];
char *delim;
/* check options */
while ((c = getopt(argc, argv, "rRf")) != -1) {
switch (c) {
case 'r':
cb.cb_recurse = 1;
break;
case 'R':
cb.cb_recurse = 1;
cb.cb_doclones = 1;
break;
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/* open the snapshot */
if ((snap = zfs_open(g_zfs, argv[0], ZFS_TYPE_SNAPSHOT)) == NULL)
return (1);
/* open the parent dataset */
(void) strlcpy(parentname, argv[0], sizeof (parentname));
verify((delim = strrchr(parentname, '@')) != NULL);
*delim = '\0';
if ((zhp = zfs_open(g_zfs, parentname, ZFS_TYPE_DATASET)) == NULL) {
zfs_close(snap);
return (1);
}
/*
* Check for more recent snapshots and/or clones based on the presence
* of '-r' and '-R'.
*/
cb.cb_target = argv[0];
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
cb.cb_first = B_TRUE;
cb.cb_error = 0;
if ((ret = zfs_iter_snapshots(zhp, B_FALSE, rollback_check, &cb)) != 0)
goto out;
if ((ret = zfs_iter_bookmarks(zhp, rollback_check, &cb)) != 0)
goto out;
if ((ret = cb.cb_error) != 0)
goto out;
/*
* Rollback parent to the given snapshot.
*/
ret = zfs_rollback(zhp, snap, force);
out:
zfs_close(snap);
zfs_close(zhp);
if (ret == 0)
return (0);
else
return (1);
}
/*
* zfs set property=value ... { fs | snap | vol } ...
*
* Sets the given properties for all datasets specified on the command line.
*/
static int
set_callback(zfs_handle_t *zhp, void *data)
{
nvlist_t *props = data;
if (zfs_prop_set_list(zhp, props) != 0) {
switch (libzfs_errno(g_zfs)) {
case EZFS_MOUNTFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to remount filesystem\n"));
break;
case EZFS_SHARENFSFAILED:
(void) fprintf(stderr, gettext("property may be set "
"but unable to reshare filesystem\n"));
break;
}
return (1);
}
return (0);
}
static int
zfs_do_set(int argc, char **argv)
{
nvlist_t *props = NULL;
int ds_start = -1; /* argv idx of first dataset arg */
int ret = 0;
int i;
/* check for options */
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
/* check number of arguments */
if (argc < 2) {
(void) fprintf(stderr, gettext("missing arguments\n"));
usage(B_FALSE);
}
if (argc < 3) {
if (strchr(argv[1], '=') == NULL) {
(void) fprintf(stderr, gettext("missing property=value "
"argument(s)\n"));
} else {
(void) fprintf(stderr, gettext("missing dataset "
"name(s)\n"));
}
usage(B_FALSE);
}
/* validate argument order: prop=val args followed by dataset args */
for (i = 1; i < argc; i++) {
if (strchr(argv[i], '=') != NULL) {
if (ds_start > 0) {
/* out-of-order prop=val argument */
(void) fprintf(stderr, gettext("invalid "
"argument order\n"));
usage(B_FALSE);
}
} else if (ds_start < 0) {
ds_start = i;
}
}
if (ds_start < 0) {
(void) fprintf(stderr, gettext("missing dataset name(s)\n"));
usage(B_FALSE);
}
/* Populate a list of property settings */
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 1; i < ds_start; i++) {
if (!parseprop(props, argv[i])) {
ret = -1;
goto error;
}
}
ret = zfs_for_each(argc - ds_start, argv + ds_start, 0,
ZFS_TYPE_DATASET, NULL, NULL, 0, set_callback, props);
error:
nvlist_free(props);
return (ret);
}
typedef struct snap_cbdata {
nvlist_t *sd_nvl;
boolean_t sd_recursive;
const char *sd_snapname;
} snap_cbdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snap_cbdata_t *sd = arg;
char *name;
int rv = 0;
int error;
if (sd->sd_recursive &&
zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) != 0) {
zfs_close(zhp);
return (0);
}
error = asprintf(&name, "%s@%s", zfs_get_name(zhp), sd->sd_snapname);
if (error == -1)
nomem();
fnvlist_add_boolean(sd->sd_nvl, name);
free(name);
if (sd->sd_recursive)
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
zfs_close(zhp);
return (rv);
}
/*
* zfs snapshot [-r] [-o prop=value] ... <fs@snap>
*
* Creates a snapshot with the given name. While functionally equivalent to
* 'zfs create', it is a separate command to differentiate intent.
*/
static int
zfs_do_snapshot(int argc, char **argv)
{
int ret = 0;
signed char c;
nvlist_t *props;
snap_cbdata_t sd = { 0 };
boolean_t multiple_snaps = B_FALSE;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, "ro:")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(sd.sd_nvl);
nvlist_free(props);
return (1);
}
break;
case 'r':
sd.sd_recursive = B_TRUE;
multiple_snaps = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
goto usage;
}
if (argc > 1)
multiple_snaps = B_TRUE;
for (; argc > 0; argc--, argv++) {
char *atp;
zfs_handle_t *zhp;
atp = strchr(argv[0], '@');
if (atp == NULL)
goto usage;
*atp = '\0';
sd.sd_snapname = atp + 1;
zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
goto usage;
if (zfs_snapshot_cb(zhp, &sd) != 0)
goto usage;
}
ret = zfs_snapshot_nvl(g_zfs, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
nvlist_free(props);
if (ret != 0 && multiple_snaps)
(void) fprintf(stderr, gettext("no snapshots were created\n"));
return (ret != 0);
usage:
nvlist_free(sd.sd_nvl);
nvlist_free(props);
usage(B_FALSE);
return (-1);
}
/*
* Send a backup stream to stdout.
*/
static int
zfs_do_send(int argc, char **argv)
{
char *fromname = NULL;
char *toname = NULL;
char *resume_token = NULL;
char *cp;
zfs_handle_t *zhp;
sendflags_t flags = { 0 };
int c, err;
nvlist_t *dbgnv = NULL;
boolean_t extraverbose = B_FALSE;
struct option long_options[] = {
{"replicate", no_argument, NULL, 'R'},
{"props", no_argument, NULL, 'p'},
{"parsable", no_argument, NULL, 'P'},
{"dedup", no_argument, NULL, 'D'},
{"verbose", no_argument, NULL, 'v'},
{"dryrun", no_argument, NULL, 'n'},
{"large-block", no_argument, NULL, 'L'},
{"embed", no_argument, NULL, 'e'},
{"resume", required_argument, NULL, 't'},
{"compressed", no_argument, NULL, 'c'},
{"raw", no_argument, NULL, 'w'},
{"backup", no_argument, NULL, 'b'},
{0, 0, 0, 0}
};
/* check options */
while ((c = getopt_long(argc, argv, ":i:I:RDpvnPLet:cwb", long_options,
NULL)) != -1) {
switch (c) {
case 'i':
if (fromname)
usage(B_FALSE);
fromname = optarg;
break;
case 'I':
if (fromname)
usage(B_FALSE);
fromname = optarg;
flags.doall = B_TRUE;
break;
case 'R':
flags.replicate = B_TRUE;
break;
case 'p':
flags.props = B_TRUE;
break;
case 'b':
flags.backup = B_TRUE;
break;
case 'P':
flags.parsable = B_TRUE;
flags.verbose = B_TRUE;
break;
case 'v':
if (flags.verbose)
extraverbose = B_TRUE;
flags.verbose = B_TRUE;
flags.progress = B_TRUE;
break;
case 'D':
flags.dedup = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'L':
flags.largeblock = B_TRUE;
break;
case 'e':
flags.embed_data = B_TRUE;
break;
case 't':
resume_token = optarg;
break;
case 'c':
flags.compress = B_TRUE;
break;
case 'w':
flags.raw = B_TRUE;
flags.compress = B_TRUE;
flags.embed_data = B_TRUE;
flags.largeblock = B_TRUE;
break;
case ':':
/*
* If a parameter was not passed, optopt contains the
* value that would normally lead us into the
* appropriate case statement. If it's > 256, then this
* must be a longopt and we should look at argv to get
* the string. Otherwise it's just the character, so we
* should use it directly.
*/
if (optopt <= UINT8_MAX) {
(void) fprintf(stderr,
gettext("missing argument for '%c' "
"option\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("missing argument for '%s' "
"option\n"), argv[optind - 1]);
}
usage(B_FALSE);
break;
case '?':
/*FALLTHROUGH*/
default:
/*
* If an invalid flag was passed, optopt contains the
* character if it was a short flag, or 0 if it was a
* longopt.
*/
if (optopt != 0) {
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
} else {
(void) fprintf(stderr,
gettext("invalid option '%s'\n"),
argv[optind - 1]);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (resume_token != NULL) {
if (fromname != NULL || flags.replicate || flags.props ||
flags.backup || flags.dedup) {
(void) fprintf(stderr,
gettext("invalid flags combined with -t\n"));
usage(B_FALSE);
}
if (argc != 0) {
(void) fprintf(stderr, gettext("no additional "
"arguments are permitted with -t\n"));
usage(B_FALSE);
}
} else {
if (argc < 1) {
(void) fprintf(stderr,
gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
if (!flags.dryrun && isatty(STDOUT_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Stream can not be written to a terminal.\n"
"You must redirect standard output.\n"));
return (1);
}
if (resume_token != NULL) {
return (zfs_send_resume(g_zfs, &flags, STDOUT_FILENO,
resume_token));
}
/*
* Special case sending a filesystem, or from a bookmark.
*/
if (strchr(argv[0], '@') == NULL ||
(fromname && strchr(fromname, '#') != NULL)) {
char frombuf[ZFS_MAX_DATASET_NAME_LEN];
if (flags.replicate || flags.doall || flags.props ||
flags.backup || flags.dedup ||
(strchr(argv[0], '@') == NULL &&
(flags.dryrun || flags.verbose || flags.progress))) {
(void) fprintf(stderr, gettext("Error: "
"Unsupported flag with filesystem or bookmark.\n"));
return (1);
}
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_DATASET);
if (zhp == NULL)
return (1);
if (fromname != NULL &&
(fromname[0] == '#' || fromname[0] == '@')) {
/*
* Incremental source name begins with # or @.
* Default to same fs as target.
*/
(void) strlcpy(frombuf, argv[0], sizeof (frombuf));
cp = strchr(frombuf, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(frombuf, fromname, sizeof (frombuf));
fromname = frombuf;
}
err = zfs_send_one(zhp, fromname, STDOUT_FILENO, flags);
zfs_close(zhp);
return (err != 0);
}
cp = strchr(argv[0], '@');
*cp = '\0';
toname = cp + 1;
zhp = zfs_open(g_zfs, argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
return (1);
/*
* If they specified the full path to the snapshot, chop off
* everything except the short name of the snapshot, but special
* case if they specify the origin.
*/
if (fromname && (cp = strchr(fromname, '@')) != NULL) {
char origin[ZFS_MAX_DATASET_NAME_LEN];
zprop_source_t src;
(void) zfs_prop_get(zhp, ZFS_PROP_ORIGIN,
origin, sizeof (origin), &src, NULL, 0, B_FALSE);
if (strcmp(origin, fromname) == 0) {
fromname = NULL;
flags.fromorigin = B_TRUE;
} else {
*cp = '\0';
if (cp != fromname && strcmp(argv[0], fromname)) {
(void) fprintf(stderr,
gettext("incremental source must be "
"in same filesystem\n"));
usage(B_FALSE);
}
fromname = cp + 1;
if (strchr(fromname, '@') || strchr(fromname, '/')) {
(void) fprintf(stderr,
gettext("invalid incremental source\n"));
usage(B_FALSE);
}
}
}
if (flags.replicate && fromname == NULL)
flags.doall = B_TRUE;
err = zfs_send(zhp, fromname, toname, &flags, STDOUT_FILENO, NULL, 0,
extraverbose ? &dbgnv : NULL);
if (extraverbose && dbgnv != NULL) {
/*
* dump_nvlist prints to stdout, but that's been
* redirected to a file. Make it print to stderr
* instead.
*/
(void) dup2(STDERR_FILENO, STDOUT_FILENO);
dump_nvlist(dbgnv, 0);
nvlist_free(dbgnv);
}
zfs_close(zhp);
return (err != 0);
}
/*
* Restore a backup stream from stdin.
*/
static int
zfs_do_receive(int argc, char **argv)
{
int c, err = 0;
recvflags_t flags = { 0 };
boolean_t abort_resumable = B_FALSE;
nvlist_t *props;
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0)
nomem();
/* check options */
while ((c = getopt(argc, argv, ":o:x:denuvFsA")) != -1) {
switch (c) {
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'x':
if (!parsepropname(props, optarg)) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'd':
flags.isprefix = B_TRUE;
break;
case 'e':
flags.isprefix = B_TRUE;
flags.istail = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'u':
flags.nomount = B_TRUE;
break;
case 'v':
flags.verbose = B_TRUE;
break;
case 's':
flags.resumable = B_TRUE;
break;
case 'F':
flags.force = B_TRUE;
break;
case 'A':
abort_resumable = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (abort_resumable) {
if (flags.isprefix || flags.istail || flags.dryrun ||
flags.resumable || flags.nomount) {
(void) fprintf(stderr, gettext("invalid option\n"));
usage(B_FALSE);
}
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(namebuf, sizeof (namebuf),
"%s/%%recv", argv[0]);
if (zfs_dataset_exists(g_zfs, namebuf,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME)) {
zfs_handle_t *zhp = zfs_open(g_zfs,
namebuf, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
nvlist_free(props);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
} else {
zfs_handle_t *zhp = zfs_open(g_zfs,
argv[0], ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (!zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) ||
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == -1) {
(void) fprintf(stderr,
gettext("'%s' does not have any "
"resumable receive state to abort\n"),
argv[0]);
nvlist_free(props);
zfs_close(zhp);
return (1);
}
err = zfs_destroy(zhp, B_FALSE);
zfs_close(zhp);
}
nvlist_free(props);
return (err != 0);
}
if (isatty(STDIN_FILENO)) {
(void) fprintf(stderr,
gettext("Error: Backup stream can not be read "
"from a terminal.\n"
"You must redirect standard input.\n"));
nvlist_free(props);
return (1);
}
err = zfs_receive(g_zfs, argv[0], props, &flags, STDIN_FILENO, NULL);
nvlist_free(props);
return (err != 0);
}
/*
* allow/unallow stuff
*/
/* copied from zfs/sys/dsl_deleg.h */
#define ZFS_DELEG_PERM_CREATE "create"
#define ZFS_DELEG_PERM_DESTROY "destroy"
#define ZFS_DELEG_PERM_SNAPSHOT "snapshot"
#define ZFS_DELEG_PERM_ROLLBACK "rollback"
#define ZFS_DELEG_PERM_CLONE "clone"
#define ZFS_DELEG_PERM_PROMOTE "promote"
#define ZFS_DELEG_PERM_RENAME "rename"
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_ALLOW "allow"
#define ZFS_DELEG_PERM_USERPROP "userprop"
#define ZFS_DELEG_PERM_VSCAN "vscan" /* ??? */
#define ZFS_DELEG_PERM_USERQUOTA "userquota"
#define ZFS_DELEG_PERM_GROUPQUOTA "groupquota"
#define ZFS_DELEG_PERM_USERUSED "userused"
#define ZFS_DELEG_PERM_GROUPUSED "groupused"
#define ZFS_DELEG_PERM_USEROBJQUOTA "userobjquota"
#define ZFS_DELEG_PERM_GROUPOBJQUOTA "groupobjquota"
#define ZFS_DELEG_PERM_USEROBJUSED "userobjused"
#define ZFS_DELEG_PERM_GROUPOBJUSED "groupobjused"
#define ZFS_DELEG_PERM_HOLD "hold"
#define ZFS_DELEG_PERM_RELEASE "release"
#define ZFS_DELEG_PERM_DIFF "diff"
#define ZFS_DELEG_PERM_BOOKMARK "bookmark"
+#define ZFS_DELEG_PERM_REMAP "remap"
#define ZFS_DELEG_PERM_LOAD_KEY "load-key"
#define ZFS_DELEG_PERM_CHANGE_KEY "change-key"
#define ZFS_DELEG_PERM_PROJECTUSED "projectused"
#define ZFS_DELEG_PERM_PROJECTQUOTA "projectquota"
#define ZFS_DELEG_PERM_PROJECTOBJUSED "projectobjused"
#define ZFS_DELEG_PERM_PROJECTOBJQUOTA "projectobjquota"
#define ZFS_NUM_DELEG_NOTES ZFS_DELEG_NOTE_NONE
static zfs_deleg_perm_tab_t zfs_deleg_perm_tbl[] = {
{ ZFS_DELEG_PERM_ALLOW, ZFS_DELEG_NOTE_ALLOW },
{ ZFS_DELEG_PERM_CLONE, ZFS_DELEG_NOTE_CLONE },
{ ZFS_DELEG_PERM_CREATE, ZFS_DELEG_NOTE_CREATE },
{ ZFS_DELEG_PERM_DESTROY, ZFS_DELEG_NOTE_DESTROY },
{ ZFS_DELEG_PERM_DIFF, ZFS_DELEG_NOTE_DIFF},
{ ZFS_DELEG_PERM_HOLD, ZFS_DELEG_NOTE_HOLD },
{ ZFS_DELEG_PERM_MOUNT, ZFS_DELEG_NOTE_MOUNT },
{ ZFS_DELEG_PERM_PROMOTE, ZFS_DELEG_NOTE_PROMOTE },
{ ZFS_DELEG_PERM_RECEIVE, ZFS_DELEG_NOTE_RECEIVE },
{ ZFS_DELEG_PERM_RELEASE, ZFS_DELEG_NOTE_RELEASE },
{ ZFS_DELEG_PERM_RENAME, ZFS_DELEG_NOTE_RENAME },
{ ZFS_DELEG_PERM_ROLLBACK, ZFS_DELEG_NOTE_ROLLBACK },
{ ZFS_DELEG_PERM_SEND, ZFS_DELEG_NOTE_SEND },
{ ZFS_DELEG_PERM_SHARE, ZFS_DELEG_NOTE_SHARE },
{ ZFS_DELEG_PERM_SNAPSHOT, ZFS_DELEG_NOTE_SNAPSHOT },
{ ZFS_DELEG_PERM_BOOKMARK, ZFS_DELEG_NOTE_BOOKMARK },
+ { ZFS_DELEG_PERM_REMAP, ZFS_DELEG_NOTE_REMAP },
{ ZFS_DELEG_PERM_LOAD_KEY, ZFS_DELEG_NOTE_LOAD_KEY },
{ ZFS_DELEG_PERM_CHANGE_KEY, ZFS_DELEG_NOTE_CHANGE_KEY },
{ ZFS_DELEG_PERM_GROUPQUOTA, ZFS_DELEG_NOTE_GROUPQUOTA },
{ ZFS_DELEG_PERM_GROUPUSED, ZFS_DELEG_NOTE_GROUPUSED },
{ ZFS_DELEG_PERM_USERPROP, ZFS_DELEG_NOTE_USERPROP },
{ ZFS_DELEG_PERM_USERQUOTA, ZFS_DELEG_NOTE_USERQUOTA },
{ ZFS_DELEG_PERM_USERUSED, ZFS_DELEG_NOTE_USERUSED },
{ ZFS_DELEG_PERM_USEROBJQUOTA, ZFS_DELEG_NOTE_USEROBJQUOTA },
{ ZFS_DELEG_PERM_USEROBJUSED, ZFS_DELEG_NOTE_USEROBJUSED },
{ ZFS_DELEG_PERM_GROUPOBJQUOTA, ZFS_DELEG_NOTE_GROUPOBJQUOTA },
{ ZFS_DELEG_PERM_GROUPOBJUSED, ZFS_DELEG_NOTE_GROUPOBJUSED },
{ ZFS_DELEG_PERM_PROJECTUSED, ZFS_DELEG_NOTE_PROJECTUSED },
{ ZFS_DELEG_PERM_PROJECTQUOTA, ZFS_DELEG_NOTE_PROJECTQUOTA },
{ ZFS_DELEG_PERM_PROJECTOBJUSED, ZFS_DELEG_NOTE_PROJECTOBJUSED },
{ ZFS_DELEG_PERM_PROJECTOBJQUOTA, ZFS_DELEG_NOTE_PROJECTOBJQUOTA },
{ NULL, ZFS_DELEG_NOTE_NONE }
};
/* permission structure */
typedef struct deleg_perm {
zfs_deleg_who_type_t dp_who_type;
const char *dp_name;
boolean_t dp_local;
boolean_t dp_descend;
} deleg_perm_t;
/* */
typedef struct deleg_perm_node {
deleg_perm_t dpn_perm;
uu_avl_node_t dpn_avl_node;
} deleg_perm_node_t;
typedef struct fs_perm fs_perm_t;
/* permissions set */
typedef struct who_perm {
zfs_deleg_who_type_t who_type;
const char *who_name; /* id */
char who_ug_name[256]; /* user/group name */
fs_perm_t *who_fsperm; /* uplink */
uu_avl_t *who_deleg_perm_avl; /* permissions */
} who_perm_t;
/* */
typedef struct who_perm_node {
who_perm_t who_perm;
uu_avl_node_t who_avl_node;
} who_perm_node_t;
typedef struct fs_perm_set fs_perm_set_t;
/* fs permissions */
struct fs_perm {
const char *fsp_name;
uu_avl_t *fsp_sc_avl; /* sets,create */
uu_avl_t *fsp_uge_avl; /* user,group,everyone */
fs_perm_set_t *fsp_set; /* uplink */
};
/* */
typedef struct fs_perm_node {
fs_perm_t fspn_fsperm;
uu_avl_t *fspn_avl;
uu_list_node_t fspn_list_node;
} fs_perm_node_t;
/* top level structure */
struct fs_perm_set {
uu_list_pool_t *fsps_list_pool;
uu_list_t *fsps_list; /* list of fs_perms */
uu_avl_pool_t *fsps_named_set_avl_pool;
uu_avl_pool_t *fsps_who_perm_avl_pool;
uu_avl_pool_t *fsps_deleg_perm_avl_pool;
};
static inline const char *
deleg_perm_type(zfs_deleg_note_t note)
{
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
case ZFS_DELEG_NOTE_GROUPUSED:
case ZFS_DELEG_NOTE_USERPROP:
case ZFS_DELEG_NOTE_USERQUOTA:
case ZFS_DELEG_NOTE_USERUSED:
case ZFS_DELEG_NOTE_USEROBJQUOTA:
case ZFS_DELEG_NOTE_USEROBJUSED:
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
case ZFS_DELEG_NOTE_GROUPOBJUSED:
case ZFS_DELEG_NOTE_PROJECTUSED:
case ZFS_DELEG_NOTE_PROJECTQUOTA:
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
/* other */
return (gettext("other"));
default:
return (gettext("subcommand"));
}
}
static int
who_type2weight(zfs_deleg_who_type_t who_type)
{
int res;
switch (who_type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
res = 0;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
res = 1;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
res = 2;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
res = 3;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
res = 4;
break;
default:
res = -1;
}
return (res);
}
/* ARGSUSED */
static int
who_perm_compare(const void *larg, const void *rarg, void *unused)
{
const who_perm_node_t *l = larg;
const who_perm_node_t *r = rarg;
zfs_deleg_who_type_t ltype = l->who_perm.who_type;
zfs_deleg_who_type_t rtype = r->who_perm.who_type;
int lweight = who_type2weight(ltype);
int rweight = who_type2weight(rtype);
int res = lweight - rweight;
if (res == 0)
res = strncmp(l->who_perm.who_name, r->who_perm.who_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
/* ARGSUSED */
static int
deleg_perm_compare(const void *larg, const void *rarg, void *unused)
{
const deleg_perm_node_t *l = larg;
const deleg_perm_node_t *r = rarg;
int res = strncmp(l->dpn_perm.dp_name, r->dpn_perm.dp_name,
ZFS_MAX_DELEG_NAME-1);
if (res == 0)
return (0);
if (res > 0)
return (1);
else
return (-1);
}
static inline void
fs_perm_set_init(fs_perm_set_t *fspset)
{
bzero(fspset, sizeof (fs_perm_set_t));
if ((fspset->fsps_list_pool = uu_list_pool_create("fsps_list_pool",
sizeof (fs_perm_node_t), offsetof(fs_perm_node_t, fspn_list_node),
NULL, UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_list = uu_list_create(fspset->fsps_list_pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_named_set_avl_pool = uu_avl_pool_create(
"named_set_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_who_perm_avl_pool = uu_avl_pool_create(
"who_perm_avl_pool", sizeof (who_perm_node_t), offsetof(
who_perm_node_t, who_avl_node), who_perm_compare,
UU_DEFAULT)) == NULL)
nomem();
if ((fspset->fsps_deleg_perm_avl_pool = uu_avl_pool_create(
"deleg_perm_avl_pool", sizeof (deleg_perm_node_t), offsetof(
deleg_perm_node_t, dpn_avl_node), deleg_perm_compare, UU_DEFAULT))
== NULL)
nomem();
}
static inline void fs_perm_fini(fs_perm_t *);
static inline void who_perm_fini(who_perm_t *);
static inline void
fs_perm_set_fini(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = uu_list_first(fspset->fsps_list);
while (node != NULL) {
fs_perm_node_t *next_node =
uu_list_next(fspset->fsps_list, node);
fs_perm_t *fsperm = &node->fspn_fsperm;
fs_perm_fini(fsperm);
uu_list_remove(fspset->fsps_list, node);
free(node);
node = next_node;
}
uu_avl_pool_destroy(fspset->fsps_named_set_avl_pool);
uu_avl_pool_destroy(fspset->fsps_who_perm_avl_pool);
uu_avl_pool_destroy(fspset->fsps_deleg_perm_avl_pool);
}
static inline void
deleg_perm_init(deleg_perm_t *deleg_perm, zfs_deleg_who_type_t type,
const char *name)
{
deleg_perm->dp_who_type = type;
deleg_perm->dp_name = name;
}
static inline void
who_perm_init(who_perm_t *who_perm, fs_perm_t *fsperm,
zfs_deleg_who_type_t type, const char *name)
{
uu_avl_pool_t *pool;
pool = fsperm->fsp_set->fsps_deleg_perm_avl_pool;
bzero(who_perm, sizeof (who_perm_t));
if ((who_perm->who_deleg_perm_avl = uu_avl_create(pool, NULL,
UU_DEFAULT)) == NULL)
nomem();
who_perm->who_type = type;
who_perm->who_name = name;
who_perm->who_fsperm = fsperm;
}
static inline void
who_perm_fini(who_perm_t *who_perm)
{
deleg_perm_node_t *node = uu_avl_first(who_perm->who_deleg_perm_avl);
while (node != NULL) {
deleg_perm_node_t *next_node =
uu_avl_next(who_perm->who_deleg_perm_avl, node);
uu_avl_remove(who_perm->who_deleg_perm_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(who_perm->who_deleg_perm_avl);
}
static inline void
fs_perm_init(fs_perm_t *fsperm, fs_perm_set_t *fspset, const char *fsname)
{
uu_avl_pool_t *nset_pool = fspset->fsps_named_set_avl_pool;
uu_avl_pool_t *who_pool = fspset->fsps_who_perm_avl_pool;
bzero(fsperm, sizeof (fs_perm_t));
if ((fsperm->fsp_sc_avl = uu_avl_create(nset_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
if ((fsperm->fsp_uge_avl = uu_avl_create(who_pool, NULL, UU_DEFAULT))
== NULL)
nomem();
fsperm->fsp_set = fspset;
fsperm->fsp_name = fsname;
}
static inline void
fs_perm_fini(fs_perm_t *fsperm)
{
who_perm_node_t *node = uu_avl_first(fsperm->fsp_sc_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_sc_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_sc_avl, node);
free(node);
node = next_node;
}
node = uu_avl_first(fsperm->fsp_uge_avl);
while (node != NULL) {
who_perm_node_t *next_node = uu_avl_next(fsperm->fsp_uge_avl,
node);
who_perm_t *who_perm = &node->who_perm;
who_perm_fini(who_perm);
uu_avl_remove(fsperm->fsp_uge_avl, node);
free(node);
node = next_node;
}
uu_avl_destroy(fsperm->fsp_sc_avl);
uu_avl_destroy(fsperm->fsp_uge_avl);
}
static void
set_deleg_perm_node(uu_avl_t *avl, deleg_perm_node_t *node,
zfs_deleg_who_type_t who_type, const char *name, char locality)
{
uu_avl_index_t idx = 0;
deleg_perm_node_t *found_node = NULL;
deleg_perm_t *deleg_perm = &node->dpn_perm;
deleg_perm_init(deleg_perm, who_type, name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL)
uu_avl_insert(avl, node, idx);
else {
node = found_node;
deleg_perm = &node->dpn_perm;
}
switch (locality) {
case ZFS_DELEG_LOCAL:
deleg_perm->dp_local = B_TRUE;
break;
case ZFS_DELEG_DESCENDENT:
deleg_perm->dp_descend = B_TRUE;
break;
case ZFS_DELEG_NA:
break;
default:
assert(B_FALSE); /* invalid locality */
}
}
static inline int
parse_who_perm(who_perm_t *who_perm, nvlist_t *nvl, char locality)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = who_perm->who_fsperm->fsp_set;
uu_avl_t *avl = who_perm->who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_perm->who_type;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
uu_avl_pool_t *avl_pool = fspset->fsps_deleg_perm_avl_pool;
deleg_perm_node_t *node =
safe_malloc(sizeof (deleg_perm_node_t));
VERIFY(type == DATA_TYPE_BOOLEAN);
uu_avl_node_init(node, &node->dpn_avl_node, avl_pool);
set_deleg_perm_node(avl, node, who_type, name, locality);
}
return (0);
}
static inline int
parse_fs_perm(fs_perm_t *fsperm, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
fs_perm_set_t *fspset = fsperm->fsp_set;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *name = nvpair_name(nvp);
uu_avl_t *avl = NULL;
uu_avl_pool_t *avl_pool = NULL;
zfs_deleg_who_type_t perm_type = name[0];
char perm_locality = name[1];
const char *perm_name = name + 3;
boolean_t is_set = B_TRUE;
who_perm_t *who_perm = NULL;
assert('$' == name[2]);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
switch (perm_type) {
case ZFS_DELEG_CREATE:
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_NAMED_SET:
case ZFS_DELEG_NAMED_SET_SETS:
avl_pool = fspset->fsps_named_set_avl_pool;
avl = fsperm->fsp_sc_avl;
break;
case ZFS_DELEG_USER:
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_GROUP:
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_EVERYONE:
case ZFS_DELEG_EVERYONE_SETS:
avl_pool = fspset->fsps_who_perm_avl_pool;
avl = fsperm->fsp_uge_avl;
break;
default:
assert(!"unhandled zfs_deleg_who_type_t");
}
if (is_set) {
who_perm_node_t *found_node = NULL;
who_perm_node_t *node = safe_malloc(
sizeof (who_perm_node_t));
who_perm = &node->who_perm;
uu_avl_index_t idx = 0;
uu_avl_node_init(node, &node->who_avl_node, avl_pool);
who_perm_init(who_perm, fsperm, perm_type, perm_name);
if ((found_node = uu_avl_find(avl, node, NULL, &idx))
== NULL) {
if (avl == fsperm->fsp_uge_avl) {
uid_t rid = 0;
struct passwd *p = NULL;
struct group *g = NULL;
const char *nice_name = NULL;
switch (perm_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
rid = atoi(perm_name);
p = getpwuid(rid);
if (p)
nice_name = p->pw_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
rid = atoi(perm_name);
g = getgrgid(rid);
if (g)
nice_name = g->gr_name;
break;
default:
break;
}
if (nice_name != NULL)
(void) strlcpy(
node->who_perm.who_ug_name,
nice_name, 256);
}
uu_avl_insert(avl, node, idx);
} else {
node = found_node;
who_perm = &node->who_perm;
}
}
VERIFY3P(who_perm, !=, NULL);
(void) parse_who_perm(who_perm, nvl2, perm_locality);
}
return (0);
}
static inline int
parse_fs_perm_set(fs_perm_set_t *fspset, nvlist_t *nvl)
{
nvpair_t *nvp = NULL;
uu_avl_index_t idx = 0;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
nvlist_t *nvl2 = NULL;
const char *fsname = nvpair_name(nvp);
data_type_t type = nvpair_type(nvp);
fs_perm_t *fsperm = NULL;
fs_perm_node_t *node = safe_malloc(sizeof (fs_perm_node_t));
if (node == NULL)
nomem();
fsperm = &node->fspn_fsperm;
VERIFY(DATA_TYPE_NVLIST == type);
uu_list_node_init(node, &node->fspn_list_node,
fspset->fsps_list_pool);
idx = uu_list_numnodes(fspset->fsps_list);
fs_perm_init(fsperm, fspset, fsname);
if (nvpair_value_nvlist(nvp, &nvl2) != 0)
return (-1);
(void) parse_fs_perm(fsperm, nvl2);
uu_list_insert(fspset->fsps_list, node, idx);
}
return (0);
}
static inline const char *
deleg_perm_comment(zfs_deleg_note_t note)
{
const char *str = "";
/* subcommands */
switch (note) {
/* SUBCOMMANDS */
case ZFS_DELEG_NOTE_ALLOW:
str = gettext("Must also have the permission that is being"
"\n\t\t\t\tallowed");
break;
case ZFS_DELEG_NOTE_CLONE:
str = gettext("Must also have the 'create' ability and 'mount'"
"\n\t\t\t\tability in the origin file system");
break;
case ZFS_DELEG_NOTE_CREATE:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DESTROY:
str = gettext("Must also have the 'mount' ability");
break;
case ZFS_DELEG_NOTE_DIFF:
str = gettext("Allows lookup of paths within a dataset;"
"\n\t\t\t\tgiven an object number. Ordinary users need this"
"\n\t\t\t\tin order to use zfs diff");
break;
case ZFS_DELEG_NOTE_HOLD:
str = gettext("Allows adding a user hold to a snapshot");
break;
case ZFS_DELEG_NOTE_MOUNT:
str = gettext("Allows mount/umount of ZFS datasets");
break;
case ZFS_DELEG_NOTE_PROMOTE:
str = gettext("Must also have the 'mount'\n\t\t\t\tand"
" 'promote' ability in the origin file system");
break;
case ZFS_DELEG_NOTE_RECEIVE:
str = gettext("Must also have the 'mount' and 'create'"
" ability");
break;
case ZFS_DELEG_NOTE_RELEASE:
str = gettext("Allows releasing a user hold which\n\t\t\t\t"
"might destroy the snapshot");
break;
case ZFS_DELEG_NOTE_RENAME:
str = gettext("Must also have the 'mount' and 'create'"
"\n\t\t\t\tability in the new parent");
break;
case ZFS_DELEG_NOTE_ROLLBACK:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SEND:
str = gettext("");
break;
case ZFS_DELEG_NOTE_SHARE:
str = gettext("Allows sharing file systems over NFS or SMB"
"\n\t\t\t\tprotocols");
break;
case ZFS_DELEG_NOTE_SNAPSHOT:
str = gettext("");
break;
case ZFS_DELEG_NOTE_LOAD_KEY:
str = gettext("Allows loading or unloading an encryption key");
break;
case ZFS_DELEG_NOTE_CHANGE_KEY:
str = gettext("Allows changing or adding an encryption key");
break;
/*
* case ZFS_DELEG_NOTE_VSCAN:
* str = gettext("");
* break;
*/
/* OTHER */
case ZFS_DELEG_NOTE_GROUPQUOTA:
str = gettext("Allows accessing any groupquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPUSED:
str = gettext("Allows reading any groupused@... property");
break;
case ZFS_DELEG_NOTE_USERPROP:
str = gettext("Allows changing any user property");
break;
case ZFS_DELEG_NOTE_USERQUOTA:
str = gettext("Allows accessing any userquota@... property");
break;
case ZFS_DELEG_NOTE_USERUSED:
str = gettext("Allows reading any userused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJQUOTA:
str = gettext("Allows accessing any userobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"groupobjquota@... property");
break;
case ZFS_DELEG_NOTE_GROUPOBJUSED:
str = gettext("Allows reading any groupobjused@... property");
break;
case ZFS_DELEG_NOTE_USEROBJUSED:
str = gettext("Allows reading any userobjused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTQUOTA:
str = gettext("Allows accessing any projectquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJQUOTA:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjquota@... property");
break;
case ZFS_DELEG_NOTE_PROJECTUSED:
str = gettext("Allows reading any projectused@... property");
break;
case ZFS_DELEG_NOTE_PROJECTOBJUSED:
str = gettext("Allows accessing any \n\t\t\t\t"
"projectobjused@... property");
break;
/* other */
default:
str = "";
}
return (str);
}
struct allow_opts {
boolean_t local;
boolean_t descend;
boolean_t user;
boolean_t group;
boolean_t everyone;
boolean_t create;
boolean_t set;
boolean_t recursive; /* unallow only */
boolean_t prt_usage;
boolean_t prt_perms;
char *who;
char *perms;
const char *dataset;
};
static inline int
prop_cmp(const void *a, const void *b)
{
const char *str1 = *(const char **)a;
const char *str2 = *(const char **)b;
return (strcmp(str1, str2));
}
static void
allow_usage(boolean_t un, boolean_t requested, const char *msg)
{
const char *opt_desc[] = {
"-h", gettext("show this help message and exit"),
"-l", gettext("set permission locally"),
"-d", gettext("set permission for descents"),
"-u", gettext("set permission for user"),
"-g", gettext("set permission for group"),
"-e", gettext("set permission for everyone"),
"-c", gettext("set create time permission"),
"-s", gettext("define permission set"),
/* unallow only */
"-r", gettext("remove permissions recursively"),
};
size_t unallow_size = sizeof (opt_desc) / sizeof (char *);
size_t allow_size = unallow_size - 2;
const char *props[ZFS_NUM_PROPS];
int i;
size_t count = 0;
FILE *fp = requested ? stdout : stderr;
zprop_desc_t *pdtbl = zfs_prop_get_table();
const char *fmt = gettext("%-16s %-14s\t%s\n");
(void) fprintf(fp, gettext("Usage: %s\n"), get_usage(un ? HELP_UNALLOW :
HELP_ALLOW));
(void) fprintf(fp, gettext("Options:\n"));
for (i = 0; i < (un ? unallow_size : allow_size); i += 2) {
const char *opt = opt_desc[i];
const char *optdsc = opt_desc[i + 1];
(void) fprintf(fp, gettext(" %-10s %s\n"), opt, optdsc);
}
(void) fprintf(fp, gettext("\nThe following permissions are "
"supported:\n\n"));
(void) fprintf(fp, fmt, gettext("NAME"), gettext("TYPE"),
gettext("NOTES"));
for (i = 0; i < ZFS_NUM_DELEG_NOTES; i++) {
const char *perm_name = zfs_deleg_perm_tbl[i].z_perm;
zfs_deleg_note_t perm_note = zfs_deleg_perm_tbl[i].z_note;
const char *perm_type = deleg_perm_type(perm_note);
const char *perm_comment = deleg_perm_comment(perm_note);
(void) fprintf(fp, fmt, perm_name, perm_type, perm_comment);
}
for (i = 0; i < ZFS_NUM_PROPS; i++) {
zprop_desc_t *pd = &pdtbl[i];
if (pd->pd_visible != B_TRUE)
continue;
if (pd->pd_attr == PROP_READONLY)
continue;
props[count++] = pd->pd_name;
}
props[count] = NULL;
qsort(props, count, sizeof (char *), prop_cmp);
for (i = 0; i < count; i++)
(void) fprintf(fp, fmt, props[i], gettext("property"), "");
if (msg != NULL)
(void) fprintf(fp, gettext("\nzfs: error: %s"), msg);
exit(requested ? 0 : 2);
}
static inline const char *
munge_args(int argc, char **argv, boolean_t un, size_t expected_argc,
char **permsp)
{
if (un && argc == expected_argc - 1)
*permsp = NULL;
else if (argc == expected_argc)
*permsp = argv[argc - 2];
else
allow_usage(un, B_FALSE,
gettext("wrong number of parameters\n"));
return (argv[argc - 1]);
}
static void
parse_allow_args(int argc, char **argv, boolean_t un, struct allow_opts *opts)
{
int uge_sum = opts->user + opts->group + opts->everyone;
int csuge_sum = opts->create + opts->set + uge_sum;
int ldcsuge_sum = csuge_sum + opts->local + opts->descend;
int all_sum = un ? ldcsuge_sum + opts->recursive : ldcsuge_sum;
if (uge_sum > 1)
allow_usage(un, B_FALSE,
gettext("-u, -g, and -e are mutually exclusive\n"));
if (opts->prt_usage) {
if (argc == 0 && all_sum == 0)
allow_usage(un, B_TRUE, NULL);
else
usage(B_FALSE);
}
if (opts->set) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -s\n"));
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
if (argv[0][0] != '@')
allow_usage(un, B_FALSE,
gettext("invalid set name: missing '@' prefix\n"));
opts->who = argv[0];
} else if (opts->create) {
if (ldcsuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -c\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (opts->everyone) {
if (csuge_sum > 1)
allow_usage(un, B_FALSE,
gettext("invalid options combined with -e\n"));
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (uge_sum == 0 && argc > 0 && strcmp(argv[0], "everyone")
== 0) {
opts->everyone = B_TRUE;
argc--;
argv++;
opts->dataset = munge_args(argc, argv, un, 2, &opts->perms);
} else if (argc == 1 && !un) {
opts->prt_perms = B_TRUE;
opts->dataset = argv[argc-1];
} else {
opts->dataset = munge_args(argc, argv, un, 3, &opts->perms);
opts->who = argv[0];
}
if (!opts->local && !opts->descend) {
opts->local = B_TRUE;
opts->descend = B_TRUE;
}
}
static void
store_allow_perm(zfs_deleg_who_type_t type, boolean_t local, boolean_t descend,
const char *who, char *perms, nvlist_t *top_nvl)
{
int i;
char ld[2] = { '\0', '\0' };
char who_buf[MAXNAMELEN + 32];
char base_type = '\0';
char set_type = '\0';
nvlist_t *base_nvl = NULL;
nvlist_t *set_nvl = NULL;
nvlist_t *nvl;
if (nvlist_alloc(&base_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (nvlist_alloc(&set_nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
switch (type) {
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
set_type = ZFS_DELEG_NAMED_SET_SETS;
base_type = ZFS_DELEG_NAMED_SET;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_CREATE_SETS:
case ZFS_DELEG_CREATE:
set_type = ZFS_DELEG_CREATE_SETS;
base_type = ZFS_DELEG_CREATE;
ld[0] = ZFS_DELEG_NA;
break;
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
set_type = ZFS_DELEG_USER_SETS;
base_type = ZFS_DELEG_USER;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
set_type = ZFS_DELEG_GROUP_SETS;
base_type = ZFS_DELEG_GROUP;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
set_type = ZFS_DELEG_EVERYONE_SETS;
base_type = ZFS_DELEG_EVERYONE;
if (local)
ld[0] = ZFS_DELEG_LOCAL;
if (descend)
ld[1] = ZFS_DELEG_DESCENDENT;
break;
default:
assert(set_type != '\0' && base_type != '\0');
}
if (perms != NULL) {
char *curr = perms;
char *end = curr + strlen(perms);
while (curr < end) {
char *delim = strchr(curr, ',');
if (delim == NULL)
delim = end;
else
*delim = '\0';
if (curr[0] == '@')
nvl = set_nvl;
else
nvl = base_nvl;
(void) nvlist_add_boolean(nvl, curr);
if (delim != end)
*delim = ',';
curr = delim + 1;
}
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (!nvlist_empty(base_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
base_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
base_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
base_nvl);
}
if (!nvlist_empty(set_nvl)) {
if (who != NULL)
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$%s",
set_type, locality, who);
else
(void) snprintf(who_buf,
sizeof (who_buf), "%c%c$",
set_type, locality);
(void) nvlist_add_nvlist(top_nvl, who_buf,
set_nvl);
}
}
} else {
for (i = 0; i < 2; i++) {
char locality = ld[i];
if (locality == 0)
continue;
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", base_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", base_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
if (who != NULL)
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$%s", set_type, locality, who);
else
(void) snprintf(who_buf, sizeof (who_buf),
"%c%c$", set_type, locality);
(void) nvlist_add_boolean(top_nvl, who_buf);
}
}
}
static int
construct_fsacl_list(boolean_t un, struct allow_opts *opts, nvlist_t **nvlp)
{
if (nvlist_alloc(nvlp, NV_UNIQUE_NAME, 0) != 0)
nomem();
if (opts->set) {
store_allow_perm(ZFS_DELEG_NAMED_SET, opts->local,
opts->descend, opts->who, opts->perms, *nvlp);
} else if (opts->create) {
store_allow_perm(ZFS_DELEG_CREATE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else if (opts->everyone) {
store_allow_perm(ZFS_DELEG_EVERYONE, opts->local,
opts->descend, NULL, opts->perms, *nvlp);
} else {
char *curr = opts->who;
char *end = curr + strlen(curr);
while (curr < end) {
const char *who;
zfs_deleg_who_type_t who_type = ZFS_DELEG_WHO_UNKNOWN;
char *endch;
char *delim = strchr(curr, ',');
char errbuf[256];
char id[64];
struct passwd *p = NULL;
struct group *g = NULL;
uid_t rid;
if (delim == NULL)
delim = end;
else
*delim = '\0';
rid = (uid_t)strtol(curr, &endch, 0);
if (opts->user) {
who_type = ZFS_DELEG_USER;
if (*endch != '\0')
p = getpwnam(curr);
else
p = getpwuid(rid);
if (p != NULL)
rid = p->pw_uid;
else {
(void) snprintf(errbuf, 256, gettext(
"invalid user %s"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else if (opts->group) {
who_type = ZFS_DELEG_GROUP;
if (*endch != '\0')
g = getgrnam(curr);
else
g = getgrgid(rid);
if (g != NULL)
rid = g->gr_gid;
else {
(void) snprintf(errbuf, 256, gettext(
"invalid group %s"), curr);
allow_usage(un, B_TRUE, errbuf);
}
} else {
if (*endch != '\0') {
p = getpwnam(curr);
} else {
p = getpwuid(rid);
}
if (p == NULL) {
if (*endch != '\0') {
g = getgrnam(curr);
} else {
g = getgrgid(rid);
}
}
if (p != NULL) {
who_type = ZFS_DELEG_USER;
rid = p->pw_uid;
} else if (g != NULL) {
who_type = ZFS_DELEG_GROUP;
rid = g->gr_gid;
} else {
(void) snprintf(errbuf, 256, gettext(
"invalid user/group %s"), curr);
allow_usage(un, B_TRUE, errbuf);
}
}
(void) sprintf(id, "%u", rid);
who = id;
store_allow_perm(who_type, opts->local,
opts->descend, who, opts->perms, *nvlp);
curr = delim + 1;
}
}
return (0);
}
static void
print_set_creat_perms(uu_avl_t *who_avl)
{
const char *sc_title[] = {
gettext("Permission sets:\n"),
gettext("Create time permissions:\n"),
NULL
};
const char **title_ptr = sc_title;
who_perm_node_t *who_node = NULL;
int prev_weight = -1;
for (who_node = uu_avl_first(who_avl); who_node != NULL;
who_node = uu_avl_next(who_avl, who_node)) {
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
const char *who_name = who_node->who_perm.who_name;
int weight = who_type2weight(who_type);
boolean_t first = B_TRUE;
deleg_perm_node_t *deleg_node;
if (prev_weight != weight) {
(void) printf("%s", *title_ptr++);
prev_weight = weight;
}
if (who_name == NULL || strnlen(who_name, 1) == 0)
(void) printf("\t");
else
(void) printf("\t%s ", who_name);
for (deleg_node = uu_avl_first(avl); deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (first) {
(void) printf("%s",
deleg_node->dpn_perm.dp_name);
first = B_FALSE;
} else
(void) printf(",%s",
deleg_node->dpn_perm.dp_name);
}
(void) printf("\n");
}
}
static void
print_uge_deleg_perms(uu_avl_t *who_avl, boolean_t local, boolean_t descend,
const char *title)
{
who_perm_node_t *who_node = NULL;
boolean_t prt_title = B_TRUE;
uu_avl_walk_t *walk;
if ((walk = uu_avl_walk_start(who_avl, UU_WALK_ROBUST)) == NULL)
nomem();
while ((who_node = uu_avl_walk_next(walk)) != NULL) {
const char *who_name = who_node->who_perm.who_name;
const char *nice_who_name = who_node->who_perm.who_ug_name;
uu_avl_t *avl = who_node->who_perm.who_deleg_perm_avl;
zfs_deleg_who_type_t who_type = who_node->who_perm.who_type;
char delim = ' ';
deleg_perm_node_t *deleg_node;
boolean_t prt_who = B_TRUE;
for (deleg_node = uu_avl_first(avl);
deleg_node != NULL;
deleg_node = uu_avl_next(avl, deleg_node)) {
if (local != deleg_node->dpn_perm.dp_local ||
descend != deleg_node->dpn_perm.dp_descend)
continue;
if (prt_who) {
const char *who = NULL;
if (prt_title) {
prt_title = B_FALSE;
(void) printf("%s", title);
}
switch (who_type) {
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_USER:
who = gettext("user");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_GROUP_SETS:
case ZFS_DELEG_GROUP:
who = gettext("group");
if (nice_who_name)
who_name = nice_who_name;
break;
case ZFS_DELEG_EVERYONE_SETS:
case ZFS_DELEG_EVERYONE:
who = gettext("everyone");
who_name = NULL;
break;
default:
assert(who != NULL);
}
prt_who = B_FALSE;
if (who_name == NULL)
(void) printf("\t%s", who);
else
(void) printf("\t%s %s", who, who_name);
}
(void) printf("%c%s", delim,
deleg_node->dpn_perm.dp_name);
delim = ',';
}
if (!prt_who)
(void) printf("\n");
}
uu_avl_walk_end(walk);
}
static void
print_fs_perms(fs_perm_set_t *fspset)
{
fs_perm_node_t *node = NULL;
char buf[MAXNAMELEN + 32];
const char *dsname = buf;
for (node = uu_list_first(fspset->fsps_list); node != NULL;
node = uu_list_next(fspset->fsps_list, node)) {
uu_avl_t *sc_avl = node->fspn_fsperm.fsp_sc_avl;
uu_avl_t *uge_avl = node->fspn_fsperm.fsp_uge_avl;
int left = 0;
(void) snprintf(buf, sizeof (buf),
gettext("---- Permissions on %s "),
node->fspn_fsperm.fsp_name);
(void) printf("%s", dsname);
left = 70 - strlen(buf);
while (left-- > 0)
(void) printf("-");
(void) printf("\n");
print_set_creat_perms(sc_avl);
print_uge_deleg_perms(uge_avl, B_TRUE, B_FALSE,
gettext("Local permissions:\n"));
print_uge_deleg_perms(uge_avl, B_FALSE, B_TRUE,
gettext("Descendent permissions:\n"));
print_uge_deleg_perms(uge_avl, B_TRUE, B_TRUE,
gettext("Local+Descendent permissions:\n"));
}
}
static fs_perm_set_t fs_perm_set = { NULL, NULL, NULL, NULL };
struct deleg_perms {
boolean_t un;
nvlist_t *nvl;
};
static int
set_deleg_perms(zfs_handle_t *zhp, void *data)
{
struct deleg_perms *perms = (struct deleg_perms *)data;
zfs_type_t zfs_type = zfs_get_type(zhp);
if (zfs_type != ZFS_TYPE_FILESYSTEM && zfs_type != ZFS_TYPE_VOLUME)
return (0);
return (zfs_set_fsacl(zhp, perms->un, perms->nvl));
}
static int
zfs_do_allow_unallow_impl(int argc, char **argv, boolean_t un)
{
zfs_handle_t *zhp;
nvlist_t *perm_nvl = NULL;
nvlist_t *update_perm_nvl = NULL;
int error = 1;
int c;
struct allow_opts opts = { 0 };
const char *optstr = un ? "ldugecsrh" : "ldugecsh";
/* check opts */
while ((c = getopt(argc, argv, optstr)) != -1) {
switch (c) {
case 'l':
opts.local = B_TRUE;
break;
case 'd':
opts.descend = B_TRUE;
break;
case 'u':
opts.user = B_TRUE;
break;
case 'g':
opts.group = B_TRUE;
break;
case 'e':
opts.everyone = B_TRUE;
break;
case 's':
opts.set = B_TRUE;
break;
case 'c':
opts.create = B_TRUE;
break;
case 'r':
opts.recursive = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case 'h':
opts.prt_usage = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
parse_allow_args(argc, argv, un, &opts);
/* try to open the dataset */
if ((zhp = zfs_open(g_zfs, opts.dataset, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
(void) fprintf(stderr, "Failed to open dataset: %s\n",
opts.dataset);
return (-1);
}
if (zfs_get_fsacl(zhp, &perm_nvl) != 0)
goto cleanup2;
fs_perm_set_init(&fs_perm_set);
if (parse_fs_perm_set(&fs_perm_set, perm_nvl) != 0) {
(void) fprintf(stderr, "Failed to parse fsacl permissions\n");
goto cleanup1;
}
if (opts.prt_perms)
print_fs_perms(&fs_perm_set);
else {
(void) construct_fsacl_list(un, &opts, &update_perm_nvl);
if (zfs_set_fsacl(zhp, un, update_perm_nvl) != 0)
goto cleanup0;
if (un && opts.recursive) {
struct deleg_perms data = { un, update_perm_nvl };
if (zfs_iter_filesystems(zhp, set_deleg_perms,
&data) != 0)
goto cleanup0;
}
}
error = 0;
cleanup0:
nvlist_free(perm_nvl);
nvlist_free(update_perm_nvl);
cleanup1:
fs_perm_set_fini(&fs_perm_set);
cleanup2:
zfs_close(zhp);
return (error);
}
static int
zfs_do_allow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_FALSE));
}
static int
zfs_do_unallow(int argc, char **argv)
{
return (zfs_do_allow_unallow_impl(argc, argv, B_TRUE));
}
static int
zfs_do_hold_rele_impl(int argc, char **argv, boolean_t holding)
{
int errors = 0;
int i;
const char *tag;
boolean_t recursive = B_FALSE;
const char *opts = holding ? "rt" : "r";
int c;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 2)
usage(B_FALSE);
tag = argv[0];
--argc;
++argv;
if (holding && tag[0] == '.') {
/* tags starting with '.' are reserved for libzfs */
(void) fprintf(stderr, gettext("tag may not start with '.'\n"));
usage(B_FALSE);
}
for (i = 0; i < argc; ++i) {
zfs_handle_t *zhp;
char parent[ZFS_MAX_DATASET_NAME_LEN];
const char *delim;
char *path = argv[i];
delim = strchr(path, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), path);
++errors;
continue;
}
(void) strncpy(parent, path, delim - path);
parent[delim - path] = '\0';
zhp = zfs_open(g_zfs, parent,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL) {
++errors;
continue;
}
if (holding) {
if (zfs_hold(zhp, delim+1, tag, recursive, -1) != 0)
++errors;
} else {
if (zfs_release(zhp, delim+1, tag, recursive) != 0)
++errors;
}
zfs_close(zhp);
}
return (errors != 0);
}
/*
* zfs hold [-r] [-t] <tag> <snap> ...
*
* -r Recursively hold
*
* Apply a user-hold with the given tag to the list of snapshots.
*/
static int
zfs_do_hold(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_TRUE));
}
/*
* zfs release [-r] <tag> <snap> ...
*
* -r Recursively release
*
* Release a user-hold with the given tag from the list of snapshots.
*/
static int
zfs_do_release(int argc, char **argv)
{
return (zfs_do_hold_rele_impl(argc, argv, B_FALSE));
}
typedef struct holds_cbdata {
boolean_t cb_recursive;
const char *cb_snapname;
nvlist_t **cb_nvlp;
size_t cb_max_namelen;
size_t cb_max_taglen;
} holds_cbdata_t;
#define STRFTIME_FMT_STR "%a %b %e %k:%M %Y"
#define DATETIME_BUF_LEN (32)
/*
*
*/
static void
print_holds(boolean_t scripted, int nwidth, int tagwidth, nvlist_t *nvl)
{
int i;
nvpair_t *nvp = NULL;
char *hdr_cols[] = { "NAME", "TAG", "TIMESTAMP" };
const char *col;
if (!scripted) {
for (i = 0; i < 3; i++) {
col = gettext(hdr_cols[i]);
if (i < 2)
(void) printf("%-*s ", i ? tagwidth : nwidth,
col);
else
(void) printf("%s\n", col);
}
}
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
char *zname = nvpair_name(nvp);
nvlist_t *nvl2;
nvpair_t *nvp2 = NULL;
(void) nvpair_value_nvlist(nvp, &nvl2);
while ((nvp2 = nvlist_next_nvpair(nvl2, nvp2)) != NULL) {
char tsbuf[DATETIME_BUF_LEN];
char *tagname = nvpair_name(nvp2);
uint64_t val = 0;
time_t time;
struct tm t;
(void) nvpair_value_uint64(nvp2, &val);
time = (time_t)val;
(void) localtime_r(&time, &t);
(void) strftime(tsbuf, DATETIME_BUF_LEN,
gettext(STRFTIME_FMT_STR), &t);
if (scripted) {
(void) printf("%s\t%s\t%s\n", zname,
tagname, tsbuf);
} else {
(void) printf("%-*s %-*s %s\n", nwidth,
zname, tagwidth, tagname, tsbuf);
}
}
}
}
/*
* Generic callback function to list a dataset or snapshot.
*/
static int
holds_callback(zfs_handle_t *zhp, void *data)
{
holds_cbdata_t *cbp = data;
nvlist_t *top_nvl = *cbp->cb_nvlp;
nvlist_t *nvl = NULL;
nvpair_t *nvp = NULL;
const char *zname = zfs_get_name(zhp);
size_t znamelen = strlen(zname);
if (cbp->cb_recursive) {
const char *snapname;
char *delim = strchr(zname, '@');
if (delim == NULL)
return (0);
snapname = delim + 1;
if (strcmp(cbp->cb_snapname, snapname))
return (0);
}
if (zfs_get_holds(zhp, &nvl) != 0)
return (-1);
if (znamelen > cbp->cb_max_namelen)
cbp->cb_max_namelen = znamelen;
while ((nvp = nvlist_next_nvpair(nvl, nvp)) != NULL) {
const char *tag = nvpair_name(nvp);
size_t taglen = strlen(tag);
if (taglen > cbp->cb_max_taglen)
cbp->cb_max_taglen = taglen;
}
return (nvlist_add_nvlist(top_nvl, zname, nvl));
}
/*
* zfs holds [-r] <snap> ...
*
* -r Recursively hold
*/
static int
zfs_do_holds(int argc, char **argv)
{
int errors = 0;
int c;
int i;
boolean_t scripted = B_FALSE;
boolean_t recursive = B_FALSE;
const char *opts = "rH";
nvlist_t *nvl;
int types = ZFS_TYPE_SNAPSHOT;
holds_cbdata_t cb = { 0 };
int limit = 0;
int ret = 0;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, opts)) != -1) {
switch (c) {
case 'r':
recursive = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (recursive) {
types |= ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME;
flags |= ZFS_ITER_RECURSE;
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1)
usage(B_FALSE);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
nomem();
for (i = 0; i < argc; ++i) {
char *snapshot = argv[i];
const char *delim;
const char *snapname;
delim = strchr(snapshot, '@');
if (delim == NULL) {
(void) fprintf(stderr,
gettext("'%s' is not a snapshot\n"), snapshot);
++errors;
continue;
}
snapname = delim + 1;
if (recursive)
snapshot[delim - snapshot] = '\0';
cb.cb_recursive = recursive;
cb.cb_snapname = snapname;
cb.cb_nvlp = &nvl;
/*
* 1. collect holds data, set format options
*/
ret = zfs_for_each(argc, argv, flags, types, NULL, NULL, limit,
holds_callback, &cb);
if (ret != 0)
++errors;
}
/*
* 2. print holds data
*/
print_holds(scripted, cb.cb_max_namelen, cb.cb_max_taglen, nvl);
if (nvlist_empty(nvl))
(void) fprintf(stderr, gettext("no datasets available\n"));
nvlist_free(nvl);
return (0 != errors);
}
#define CHECK_SPINNER 30
#define SPINNER_TIME 3 /* seconds */
#define MOUNT_TIME 5 /* seconds */
static int
get_one_dataset(zfs_handle_t *zhp, void *data)
{
static char *spin[] = { "-", "\\", "|", "/" };
static int spinval = 0;
static int spincheck = 0;
static time_t last_spin_time = (time_t)0;
get_all_cb_t *cbp = data;
zfs_type_t type = zfs_get_type(zhp);
if (cbp->cb_verbose) {
if (--spincheck < 0) {
time_t now = time(NULL);
if (last_spin_time + SPINNER_TIME < now) {
update_progress(spin[spinval++ % 4]);
last_spin_time = now;
}
spincheck = CHECK_SPINNER;
}
}
/*
* Iterate over any nested datasets.
*/
if (zfs_iter_filesystems(zhp, get_one_dataset, data) != 0) {
zfs_close(zhp);
return (1);
}
/*
* Skip any datasets whose type does not match.
*/
if ((type & ZFS_TYPE_FILESYSTEM) == 0) {
zfs_close(zhp);
return (0);
}
libzfs_add_handle(cbp, zhp);
assert(cbp->cb_used <= cbp->cb_alloc);
return (0);
}
static void
get_all_datasets(zfs_handle_t ***dslist, size_t *count, boolean_t verbose)
{
get_all_cb_t cb = { 0 };
cb.cb_verbose = verbose;
cb.cb_getone = get_one_dataset;
if (verbose)
set_progress_header(gettext("Reading ZFS config"));
(void) zfs_iter_root(g_zfs, get_one_dataset, &cb);
*dslist = cb.cb_handles;
*count = cb.cb_used;
if (verbose)
finish_progress(gettext("done."));
}
/*
* Generic callback for sharing or mounting filesystems. Because the code is so
* similar, we have a common function with an extra parameter to determine which
* mode we are using.
*/
#define OP_SHARE 0x1
#define OP_MOUNT 0x2
/*
* Share or mount a dataset.
*/
static int
share_mount_one(zfs_handle_t *zhp, int op, int flags, char *protocol,
boolean_t explicit, const char *options)
{
char mountpoint[ZFS_MAXPROPLEN];
char shareopts[ZFS_MAXPROPLEN];
char smbshareopts[ZFS_MAXPROPLEN];
const char *cmdname = op == OP_SHARE ? "share" : "mount";
struct mnttab mnt;
uint64_t zoned, canmount;
boolean_t shared_nfs, shared_smb;
assert(zfs_get_type(zhp) & ZFS_TYPE_FILESYSTEM);
/*
* Check to make sure we can mount/share this dataset. If we
* are in the global zone and the filesystem is exported to a
* local zone, or if we are in a local zone and the
* filesystem is not exported, then it is an error.
*/
zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned && getzoneid() == GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"dataset is exported to a local zone\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (!zoned && getzoneid() != GLOBAL_ZONEID) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"permission denied\n"), cmdname,
zfs_get_name(zhp));
return (1);
}
/*
* Ignore any filesystems which don't apply to us. This
* includes those with a legacy mountpoint, or those with
* legacy share options.
*/
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mountpoint,
sizeof (mountpoint), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, shareopts,
sizeof (shareopts), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshareopts,
sizeof (smbshareopts), NULL, NULL, 0, B_FALSE) == 0);
if (op == OP_SHARE && strcmp(shareopts, "off") == 0 &&
strcmp(smbshareopts, "off") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share '%s': "
"legacy share\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use share(1M) to "
"share this filesystem, or set "
"sharenfs property on\n"));
return (1);
}
/*
* We cannot share or mount legacy filesystems. If the
* shareopts is non-legacy but the mountpoint is legacy, we
* treat it as a legacy share.
*/
if (strcmp(mountpoint, "legacy") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"legacy mountpoint\n"), cmdname, zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use %s(1M) to "
"%s this filesystem\n"), cmdname, cmdname);
return (1);
}
if (strcmp(mountpoint, "none") == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': no "
"mountpoint set\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* canmount explicit outcome
* on no pass through
* on yes pass through
* off no return 0
* off yes display error, return 1
* noauto no return 0
* noauto yes pass through
*/
canmount = zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT);
if (canmount == ZFS_CANMOUNT_OFF) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"'canmount' property is set to 'off'\n"), cmdname,
zfs_get_name(zhp));
return (1);
} else if (canmount == ZFS_CANMOUNT_NOAUTO && !explicit) {
return (0);
}
/*
* If this filesystem is encrypted and does not have
* a loaded key, we can not mount it.
*/
if ((flags & MS_CRYPT) == 0 &&
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) != ZIO_CRYPT_OFF &&
zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS) ==
ZFS_KEYSTATUS_UNAVAILABLE) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"encryption key not loaded\n"), cmdname, zfs_get_name(zhp));
return (1);
}
/*
* If this filesystem is inconsistent and has a receive resume
* token, we can not mount it.
*/
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) &&
zfs_prop_get(zhp, ZFS_PROP_RECEIVE_RESUME_TOKEN,
NULL, 0, NULL, NULL, 0, B_TRUE) == 0) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot %s '%s': "
"Contains partially-completed state from "
"\"zfs receive -s\", which can be resumed with "
"\"zfs send -t\"\n"),
cmdname, zfs_get_name(zhp));
return (1);
}
/*
* At this point, we have verified that the mountpoint and/or
* shareopts are appropriate for auto management. If the
* filesystem is already mounted or shared, return (failing
* for explicit requests); otherwise mount or share the
* filesystem.
*/
switch (op) {
case OP_SHARE:
shared_nfs = zfs_is_shared_nfs(zhp, NULL);
shared_smb = zfs_is_shared_smb(zhp, NULL);
if ((shared_nfs && shared_smb) ||
(shared_nfs && strcmp(shareopts, "on") == 0 &&
strcmp(smbshareopts, "off") == 0) ||
(shared_smb && strcmp(smbshareopts, "on") == 0 &&
strcmp(shareopts, "off") == 0)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot share "
"'%s': filesystem already shared\n"),
zfs_get_name(zhp));
return (1);
}
if (!zfs_is_mounted(zhp, NULL) &&
zfs_mount(zhp, NULL, flags) != 0)
return (1);
if (protocol == NULL) {
if (zfs_shareall(zhp) != 0)
return (1);
} else if (strcmp(protocol, "nfs") == 0) {
if (zfs_share_nfs(zhp))
return (1);
} else if (strcmp(protocol, "smb") == 0) {
if (zfs_share_smb(zhp))
return (1);
} else {
(void) fprintf(stderr, gettext("cannot share "
"'%s': invalid share type '%s' "
"specified\n"),
zfs_get_name(zhp), protocol);
return (1);
}
break;
case OP_MOUNT:
if (options == NULL)
mnt.mnt_mntopts = "";
else
mnt.mnt_mntopts = (char *)options;
if (!hasmntopt(&mnt, MNTOPT_REMOUNT) &&
zfs_is_mounted(zhp, NULL)) {
if (!explicit)
return (0);
(void) fprintf(stderr, gettext("cannot mount "
"'%s': filesystem already mounted\n"),
zfs_get_name(zhp));
return (1);
}
if (zfs_mount(zhp, options, flags) != 0)
return (1);
break;
}
return (0);
}
/*
* Reports progress in the form "(current/total)". Not thread-safe.
*/
static void
report_mount_progress(int current, int total)
{
static time_t last_progress_time = 0;
time_t now = time(NULL);
char info[32];
/* report 1..n instead of 0..n-1 */
++current;
/* display header if we're here for the first time */
if (current == 1) {
set_progress_header(gettext("Mounting ZFS filesystems"));
} else if (current != total && last_progress_time + MOUNT_TIME >= now) {
/* too soon to report again */
return;
}
last_progress_time = now;
(void) sprintf(info, "(%d/%d)", current, total);
if (current == total)
finish_progress(info);
else
update_progress(info);
}
static void
append_options(char *mntopts, char *newopts)
{
int len = strlen(mntopts);
/* original length plus new string to append plus 1 for the comma */
if (len + 1 + strlen(newopts) >= MNT_LINE_MAX) {
(void) fprintf(stderr, gettext("the opts argument for "
"'%s' option is too long (more than %d chars)\n"),
"-o", MNT_LINE_MAX);
usage(B_FALSE);
}
if (*mntopts)
mntopts[len++] = ',';
(void) strcpy(&mntopts[len], newopts);
}
static int
share_mount(int op, int argc, char **argv)
{
int do_all = 0;
boolean_t verbose = B_FALSE;
int c, ret = 0;
char *options = NULL;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, op == OP_MOUNT ? ":alvo:O" : "al"))
!= -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'v':
verbose = B_TRUE;
break;
case 'l':
flags |= MS_CRYPT;
break;
case 'o':
if (*optarg == '\0') {
(void) fprintf(stderr, gettext("empty mount "
"options (-o) specified\n"));
usage(B_FALSE);
}
if (options == NULL)
options = safe_malloc(MNT_LINE_MAX + 1);
/* option validation is done later */
append_options(options, optarg);
break;
case 'O':
flags |= MS_OVERLAY;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (do_all) {
zfs_handle_t **dslist = NULL;
size_t i, count = 0;
char *protocol = NULL;
if (op == OP_SHARE && argc > 0) {
if (strcmp(argv[0], "nfs") != 0 &&
strcmp(argv[0], "smb") != 0) {
(void) fprintf(stderr, gettext("share type "
"must be 'nfs' or 'smb'\n"));
usage(B_FALSE);
}
protocol = argv[0];
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
start_progress_timer();
get_all_datasets(&dslist, &count, verbose);
if (count == 0) {
if (options != NULL)
free(options);
return (0);
}
qsort(dslist, count, sizeof (void *), libzfs_dataset_cmp);
for (i = 0; i < count; i++) {
if (verbose)
report_mount_progress(i, count);
if (share_mount_one(dslist[i], op, flags, protocol,
B_FALSE, options) != 0)
ret = 1;
zfs_close(dslist[i]);
}
free(dslist);
} else if (argc == 0) {
struct mnttab entry;
if ((op == OP_SHARE) || (options != NULL)) {
(void) fprintf(stderr, gettext("missing filesystem "
"argument (specify -a for all)\n"));
usage(B_FALSE);
}
/*
* When mount is given no arguments, go through
* /proc/self/mounts and display any active ZFS mounts.
* We hide any snapshots, since they are controlled
* automatically.
*/
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", mnttab_file) == NULL) {
if (options != NULL)
free(options);
return (ENOENT);
}
while (getmntent(mnttab_file, &entry) == 0) {
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0 ||
strchr(entry.mnt_special, '@') != NULL)
continue;
(void) printf("%-30s %s\n", entry.mnt_special,
entry.mnt_mountp);
}
} else {
zfs_handle_t *zhp;
if (argc > 1) {
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
} else {
ret = share_mount_one(zhp, op, flags, NULL, B_TRUE,
options);
zfs_close(zhp);
}
}
if (options != NULL)
free(options);
return (ret);
}
/*
* zfs mount -a [nfs]
* zfs mount filesystem
*
* Mount all filesystems, or mount the given filesystem.
*/
static int
zfs_do_mount(int argc, char **argv)
{
return (share_mount(OP_MOUNT, argc, argv));
}
/*
* zfs share -a [nfs | smb]
* zfs share filesystem
*
* Share all filesystems, or share the given filesystem.
*/
static int
zfs_do_share(int argc, char **argv)
{
return (share_mount(OP_SHARE, argc, argv));
}
typedef struct unshare_unmount_node {
zfs_handle_t *un_zhp;
char *un_mountp;
uu_avl_node_t un_avlnode;
} unshare_unmount_node_t;
/* ARGSUSED */
static int
unshare_unmount_compare(const void *larg, const void *rarg, void *unused)
{
const unshare_unmount_node_t *l = larg;
const unshare_unmount_node_t *r = rarg;
return (strcmp(l->un_mountp, r->un_mountp));
}
/*
* Convenience routine used by zfs_do_umount() and manual_unmount(). Given an
* absolute path, find the entry /proc/self/mounts, verify that its a
* ZFS filesystems, and unmount it appropriately.
*/
static int
unshare_unmount_path(int op, char *path, int flags, boolean_t is_manual)
{
zfs_handle_t *zhp;
int ret = 0;
struct stat64 statbuf;
struct extmnttab entry;
const char *cmdname = (op == OP_SHARE) ? "unshare" : "unmount";
ino_t path_inode;
/*
* Search for the path in /proc/self/mounts. Rather than looking for the
* specific path, which can be fooled by non-standard paths (i.e. ".."
* or "//"), we stat() the path and search for the corresponding
* (major,minor) device pair.
*/
if (stat64(path, &statbuf) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
cmdname, path, strerror(errno));
return (1);
}
path_inode = statbuf.st_ino;
/*
* Search for the given (major,minor) pair in the mount table.
*/
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", mnttab_file) == NULL)
return (ENOENT);
while ((ret = getextmntent(mnttab_file, &entry, 0)) == 0) {
if (entry.mnt_major == major(statbuf.st_dev) &&
entry.mnt_minor == minor(statbuf.st_dev))
break;
}
if (ret != 0) {
if (op == OP_SHARE) {
(void) fprintf(stderr, gettext("cannot %s '%s': not "
"currently mounted\n"), cmdname, path);
return (1);
}
(void) fprintf(stderr, gettext("warning: %s not in"
"/proc/self/mounts\n"), path);
if ((ret = umount2(path, flags)) != 0)
(void) fprintf(stderr, gettext("%s: %s\n"), path,
strerror(errno));
return (ret != 0);
}
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': not a ZFS "
"filesystem\n"), cmdname, path);
return (1);
}
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
ret = 1;
if (stat64(entry.mnt_mountp, &statbuf) != 0) {
(void) fprintf(stderr, gettext("cannot %s '%s': %s\n"),
cmdname, path, strerror(errno));
goto out;
} else if (statbuf.st_ino != path_inode) {
(void) fprintf(stderr, gettext("cannot "
"%s '%s': not a mountpoint\n"), cmdname, path);
goto out;
}
if (op == OP_SHARE) {
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char smbshare_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS, nfs_mnt_prop,
sizeof (nfs_mnt_prop), NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB, smbshare_prop,
sizeof (smbshare_prop), NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(smbshare_prop, "off") == 0) {
(void) fprintf(stderr, gettext("cannot unshare "
"'%s': legacy share\n"), path);
(void) fprintf(stderr, gettext("use exportfs(8) "
"or smbcontrol(1) to unshare this filesystem\n"));
} else if (!zfs_is_shared(zhp)) {
(void) fprintf(stderr, gettext("cannot unshare '%s': "
"not currently shared\n"), path);
} else {
ret = zfs_unshareall_bypath(zhp, path);
}
} else {
char mtpt_prop[ZFS_MAXPROPLEN];
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT, mtpt_prop,
sizeof (mtpt_prop), NULL, NULL, 0, B_FALSE) == 0);
if (is_manual) {
ret = zfs_unmount(zhp, NULL, flags);
} else if (strcmp(mtpt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot unmount "
"'%s': legacy mountpoint\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use umount(8) "
"to unmount this filesystem\n"));
} else {
ret = zfs_unmountall(zhp, flags);
}
}
out:
zfs_close(zhp);
return (ret != 0);
}
/*
* Generic callback for unsharing or unmounting a filesystem.
*/
static int
unshare_unmount(int op, int argc, char **argv)
{
int do_all = 0;
int flags = 0;
int ret = 0;
int c;
zfs_handle_t *zhp;
char nfs_mnt_prop[ZFS_MAXPROPLEN];
char sharesmb[ZFS_MAXPROPLEN];
/* check options */
while ((c = getopt(argc, argv, op == OP_SHARE ? ":a" : "af")) != -1) {
switch (c) {
case 'a':
do_all = 1;
break;
case 'f':
flags = MS_FORCE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (do_all) {
/*
* We could make use of zfs_for_each() to walk all datasets in
* the system, but this would be very inefficient, especially
* since we would have to linearly search /proc/self/mounts for
* each one. Instead, do one pass through /proc/self/mounts
* looking for zfs entries and call zfs_unmount() for each one.
*
* Things get a little tricky if the administrator has created
* mountpoints beneath other ZFS filesystems. In this case, we
* have to unmount the deepest filesystems first. To accomplish
* this, we place all the mountpoints in an AVL tree sorted by
* the special type (dataset name), and walk the result in
* reverse to make sure to get any snapshots first.
*/
struct mnttab entry;
uu_avl_pool_t *pool;
uu_avl_t *tree = NULL;
unshare_unmount_node_t *node;
uu_avl_index_t idx;
uu_avl_walk_t *walk;
char *protocol = NULL;
if (op == OP_SHARE && argc > 0) {
if (strcmp(argv[0], "nfs") != 0 &&
strcmp(argv[0], "smb") != 0) {
(void) fprintf(stderr, gettext("share type "
"must be 'nfs' or 'smb'\n"));
usage(B_FALSE);
}
protocol = argv[0];
argc--;
argv++;
}
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if (((pool = uu_avl_pool_create("unmount_pool",
sizeof (unshare_unmount_node_t),
offsetof(unshare_unmount_node_t, un_avlnode),
unshare_unmount_compare, UU_DEFAULT)) == NULL) ||
((tree = uu_avl_create(pool, NULL, UU_DEFAULT)) == NULL))
nomem();
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", mnttab_file) == NULL)
return (ENOENT);
while (getmntent(mnttab_file, &entry) == 0) {
/* ignore non-ZFS entries */
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
/* ignore snapshots */
if (strchr(entry.mnt_special, '@') != NULL)
continue;
if ((zhp = zfs_open(g_zfs, entry.mnt_special,
ZFS_TYPE_FILESYSTEM)) == NULL) {
ret = 1;
continue;
}
/*
* Ignore datasets that are excluded/restricted by
* parent pool name.
*/
if (zpool_skip_pool(zfs_get_pool_name(zhp))) {
zfs_close(zhp);
continue;
}
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") != 0)
break;
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0)
continue;
break;
case OP_MOUNT:
/* Ignore legacy mounts */
verify(zfs_prop_get(zhp, ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "legacy") == 0)
continue;
/* Ignore canmount=noauto mounts */
if (zfs_prop_get_int(zhp, ZFS_PROP_CANMOUNT) ==
ZFS_CANMOUNT_NOAUTO)
continue;
default:
break;
}
node = safe_malloc(sizeof (unshare_unmount_node_t));
node->un_zhp = zhp;
node->un_mountp = safe_strdup(entry.mnt_mountp);
uu_avl_node_init(node, &node->un_avlnode, pool);
if (uu_avl_find(tree, node, NULL, &idx) == NULL) {
uu_avl_insert(tree, node, idx);
} else {
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
}
/*
* Walk the AVL tree in reverse, unmounting each filesystem and
* removing it from the AVL tree in the process.
*/
if ((walk = uu_avl_walk_start(tree,
UU_WALK_REVERSE | UU_WALK_ROBUST)) == NULL)
nomem();
while ((node = uu_avl_walk_next(walk)) != NULL) {
uu_avl_remove(tree, node);
switch (op) {
case OP_SHARE:
if (zfs_unshareall_bytype(node->un_zhp,
node->un_mountp, protocol) != 0)
ret = 1;
break;
case OP_MOUNT:
if (zfs_unmount(node->un_zhp,
node->un_zhp->zfs_name, flags) != 0)
ret = 1;
break;
}
zfs_close(node->un_zhp);
free(node->un_mountp);
free(node);
}
uu_avl_walk_end(walk);
uu_avl_destroy(tree);
uu_avl_pool_destroy(pool);
} else {
if (argc != 1) {
if (argc == 0)
(void) fprintf(stderr,
gettext("missing filesystem argument\n"));
else
(void) fprintf(stderr,
gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* We have an argument, but it may be a full path or a ZFS
* filesystem. Pass full paths off to unmount_path() (shared by
* manual_unmount), otherwise open the filesystem and pass to
* zfs_unmount().
*/
if (argv[0][0] == '/')
return (unshare_unmount_path(op, argv[0],
flags, B_FALSE));
if ((zhp = zfs_open(g_zfs, argv[0],
ZFS_TYPE_FILESYSTEM)) == NULL)
return (1);
verify(zfs_prop_get(zhp, op == OP_SHARE ?
ZFS_PROP_SHARENFS : ZFS_PROP_MOUNTPOINT,
nfs_mnt_prop, sizeof (nfs_mnt_prop), NULL,
NULL, 0, B_FALSE) == 0);
switch (op) {
case OP_SHARE:
verify(zfs_prop_get(zhp, ZFS_PROP_SHARENFS,
nfs_mnt_prop,
sizeof (nfs_mnt_prop),
NULL, NULL, 0, B_FALSE) == 0);
verify(zfs_prop_get(zhp, ZFS_PROP_SHARESMB,
sharesmb, sizeof (sharesmb), NULL, NULL,
0, B_FALSE) == 0);
if (strcmp(nfs_mnt_prop, "off") == 0 &&
strcmp(sharesmb, "off") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': legacy share\n"),
zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"unshare(1M) to unshare this "
"filesystem\n"));
ret = 1;
} else if (!zfs_is_shared(zhp)) {
(void) fprintf(stderr, gettext("cannot "
"unshare '%s': not currently "
"shared\n"), zfs_get_name(zhp));
ret = 1;
} else if (zfs_unshareall(zhp) != 0) {
ret = 1;
}
break;
case OP_MOUNT:
if (strcmp(nfs_mnt_prop, "legacy") == 0) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': legacy "
"mountpoint\n"), zfs_get_name(zhp));
(void) fprintf(stderr, gettext("use "
"umount(1M) to unmount this "
"filesystem\n"));
ret = 1;
} else if (!zfs_is_mounted(zhp, NULL)) {
(void) fprintf(stderr, gettext("cannot "
"unmount '%s': not currently "
"mounted\n"),
zfs_get_name(zhp));
ret = 1;
} else if (zfs_unmountall(zhp, flags) != 0) {
ret = 1;
}
break;
}
zfs_close(zhp);
}
return (ret);
}
/*
* zfs unmount -a
* zfs unmount filesystem
*
* Unmount all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unmount(int argc, char **argv)
{
return (unshare_unmount(OP_MOUNT, argc, argv));
}
/*
* zfs unshare -a
* zfs unshare filesystem
*
* Unshare all filesystems, or a specific ZFS filesystem.
*/
static int
zfs_do_unshare(int argc, char **argv)
{
return (unshare_unmount(OP_SHARE, argc, argv));
}
static int
find_command_idx(char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
static int
zfs_do_diff(int argc, char **argv)
{
zfs_handle_t *zhp;
int flags = 0;
char *tosnap = NULL;
char *fromsnap = NULL;
char *atp, *copy;
int err = 0;
int c;
while ((c = getopt(argc, argv, "FHt")) != -1) {
switch (c) {
case 'F':
flags |= ZFS_DIFF_CLASSIFY;
break;
case 'H':
flags |= ZFS_DIFF_PARSEABLE;
break;
case 't':
flags |= ZFS_DIFF_TIMESTAMP;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr,
- gettext("must provide at least one snapshot name\n"));
+ gettext("must provide at least one snapshot name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
fromsnap = argv[0];
tosnap = (argc == 2) ? argv[1] : NULL;
copy = NULL;
if (*fromsnap != '@')
copy = strdup(fromsnap);
else if (tosnap)
copy = strdup(tosnap);
if (copy == NULL)
usage(B_FALSE);
if ((atp = strchr(copy, '@')) != NULL)
*atp = '\0';
if ((zhp = zfs_open(g_zfs, copy, ZFS_TYPE_FILESYSTEM)) == NULL) {
free(copy);
return (1);
}
free(copy);
/*
* Ignore SIGPIPE so that the library can give us
* information on any failure
*/
(void) sigignore(SIGPIPE);
err = zfs_show_diffs(zhp, STDOUT_FILENO, fromsnap, tosnap, flags);
zfs_close(zhp);
return (err != 0);
}
+static int
+zfs_do_remap(int argc, char **argv)
+{
+ const char *fsname;
+ int err = 0;
+ if (argc != 2) {
+ (void) fprintf(stderr, gettext("wrong number of arguments\n"));
+ usage(B_FALSE);
+ }
+
+ fsname = argv[1];
+ err = zfs_remap_indirects(g_zfs, fsname);
+
+ return (err);
+}
+
/*
* zfs bookmark <fs@snap> <fs#bmark>
*
* Creates a bookmark with the given name from the given snapshot.
*/
static int
zfs_do_bookmark(int argc, char **argv)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
char bookname[ZFS_MAX_DATASET_NAME_LEN];
zfs_handle_t *zhp;
nvlist_t *nvl;
int ret = 0;
int c;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
/* check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing snapshot argument\n"));
goto usage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing bookmark argument\n"));
goto usage;
}
if (strchr(argv[0], '@') == NULL) {
(void) fprintf(stderr,
gettext("invalid snapshot name '%s': "
"must contain a '@'\n"), argv[0]);
goto usage;
}
if (strchr(argv[1], '#') == NULL) {
(void) fprintf(stderr,
gettext("invalid bookmark name '%s': "
"must contain a '#'\n"), argv[1]);
goto usage;
}
if (argv[0][0] == '@') {
/*
* Snapshot name begins with @.
* Default to same fs as bookmark.
*/
(void) strlcpy(snapname, argv[1], sizeof (snapname));
*strchr(snapname, '#') = '\0';
(void) strlcat(snapname, argv[0], sizeof (snapname));
} else {
(void) strlcpy(snapname, argv[0], sizeof (snapname));
}
if (argv[1][0] == '#') {
/*
* Bookmark name begins with #.
* Default to same fs as snapshot.
*/
(void) strlcpy(bookname, argv[0], sizeof (bookname));
*strchr(bookname, '@') = '\0';
(void) strlcat(bookname, argv[1], sizeof (bookname));
} else {
(void) strlcpy(bookname, argv[1], sizeof (bookname));
}
zhp = zfs_open(g_zfs, snapname, ZFS_TYPE_SNAPSHOT);
if (zhp == NULL)
goto usage;
zfs_close(zhp);
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, bookname, snapname);
ret = lzc_bookmark(nvl, NULL);
fnvlist_free(nvl);
if (ret != 0) {
const char *err_msg;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create bookmark '%s'"), bookname);
switch (ret) {
case EXDEV:
err_msg = "bookmark is in a different pool";
break;
case EEXIST:
err_msg = "bookmark exists";
break;
case EINVAL:
err_msg = "invalid argument";
break;
case ENOTSUP:
err_msg = "bookmark feature not enabled";
break;
case ENOSPC:
err_msg = "out of space";
break;
case ENOENT:
err_msg = "dataset does not exist";
break;
default:
err_msg = "unknown error";
break;
}
(void) fprintf(stderr, "%s: %s\n", errbuf,
dgettext(TEXT_DOMAIN, err_msg));
}
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
static int
zfs_do_channel_program(int argc, char **argv)
{
int ret, fd, c;
char *progbuf, *filename, *poolname;
size_t progsize, progread;
nvlist_t *outnvl;
uint64_t instrlimit = ZCP_DEFAULT_INSTRLIMIT;
uint64_t memlimit = ZCP_DEFAULT_MEMLIMIT;
boolean_t sync_flag = B_TRUE, json_output = B_FALSE;
zpool_handle_t *zhp;
/* check options */
while ((c = getopt(argc, argv, "nt:m:j")) != -1) {
switch (c) {
case 't':
case 'm': {
uint64_t arg;
char *endp;
errno = 0;
arg = strtoull(optarg, &endp, 0);
if (errno != 0 || *endp != '\0') {
(void) fprintf(stderr, gettext(
"invalid argument "
"'%s': expected integer\n"), optarg);
goto usage;
}
if (c == 't') {
if (arg > ZCP_MAX_INSTRLIMIT || arg == 0) {
(void) fprintf(stderr, gettext(
"Invalid instruction limit: "
"%s\n"), optarg);
return (1);
} else {
instrlimit = arg;
}
} else {
ASSERT3U(c, ==, 'm');
if (arg > ZCP_MAX_MEMLIMIT || arg == 0) {
(void) fprintf(stderr, gettext(
"Invalid memory limit: "
"%s\n"), optarg);
return (1);
} else {
memlimit = arg;
}
}
break;
}
case 'n': {
sync_flag = B_FALSE;
break;
}
case 'j': {
json_output = B_TRUE;
break;
}
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto usage;
}
}
argc -= optind;
argv += optind;
if (argc < 2) {
(void) fprintf(stderr,
gettext("invalid number of arguments\n"));
goto usage;
}
poolname = argv[0];
filename = argv[1];
if (strcmp(filename, "-") == 0) {
fd = 0;
filename = "standard input";
} else if ((fd = open(filename, O_RDONLY)) < 0) {
(void) fprintf(stderr, gettext("cannot open '%s': %s\n"),
filename, strerror(errno));
return (1);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
(void) fprintf(stderr, gettext("cannot open pool '%s'"),
poolname);
if (fd != 0)
(void) close(fd);
return (1);
}
zpool_close(zhp);
/*
* Read in the channel program, expanding the program buffer as
* necessary.
*/
progread = 0;
progsize = 1024;
progbuf = safe_malloc(progsize);
do {
ret = read(fd, progbuf + progread, progsize - progread);
progread += ret;
if (progread == progsize && ret > 0) {
progsize *= 2;
progbuf = safe_realloc(progbuf, progsize);
}
} while (ret > 0);
if (fd != 0)
(void) close(fd);
if (ret < 0) {
free(progbuf);
(void) fprintf(stderr,
gettext("cannot read '%s': %s\n"),
filename, strerror(errno));
return (1);
}
progbuf[progread] = '\0';
/*
* Any remaining arguments are passed as arguments to the lua script as
* a string array:
* {
* "argv" -> [ "arg 1", ... "arg n" ],
* }
*/
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string_array(argnvl, ZCP_ARG_CLIARGV, argv + 2, argc - 2);
if (sync_flag) {
ret = lzc_channel_program(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
} else {
ret = lzc_channel_program_nosync(poolname, progbuf,
instrlimit, memlimit, argnvl, &outnvl);
}
if (ret != 0) {
/*
* On error, report the error message handed back by lua if one
* exists. Otherwise, generate an appropriate error message,
* falling back on strerror() for an unexpected return code.
*/
char *errstring = NULL;
uint64_t instructions = 0;
if (nvlist_exists(outnvl, ZCP_RET_ERROR)) {
(void) nvlist_lookup_string(outnvl,
ZCP_RET_ERROR, &errstring);
if (errstring == NULL)
errstring = strerror(ret);
if (ret == ETIME) {
(void) nvlist_lookup_uint64(outnvl,
ZCP_ARG_INSTRLIMIT, &instructions);
}
} else {
switch (ret) {
case EINVAL:
errstring =
"Invalid instruction or memory limit.";
break;
case ENOMEM:
errstring = "Return value too large.";
break;
case ENOSPC:
errstring = "Memory limit exhausted.";
break;
case ETIME:
errstring = "Timed out.";
break;
case EPERM:
errstring = "Permission denied. Channel "
"programs must be run as root.";
break;
default:
errstring = strerror(ret);
}
}
(void) fprintf(stderr,
gettext("Channel program execution failed:\n%s\n"),
errstring);
if (ret == ETIME && instructions != 0)
(void) fprintf(stderr,
gettext("%llu Lua instructions\n"),
(u_longlong_t)instructions);
} else {
if (json_output) {
(void) nvlist_print_json(stdout, outnvl);
} else if (nvlist_empty(outnvl)) {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and did not produce output.\n"));
} else {
(void) fprintf(stdout, gettext("Channel program fully "
"executed and produced output:\n"));
dump_nvlist(outnvl, 4);
}
}
free(progbuf);
fnvlist_free(outnvl);
fnvlist_free(argnvl);
return (ret != 0);
usage:
usage(B_FALSE);
return (-1);
}
typedef struct loadkey_cbdata {
boolean_t cb_loadkey;
boolean_t cb_recursive;
boolean_t cb_noop;
char *cb_keylocation;
uint64_t cb_numfailed;
uint64_t cb_numattempted;
} loadkey_cbdata_t;
static int
load_key_callback(zfs_handle_t *zhp, void *data)
{
int ret;
boolean_t is_encroot;
loadkey_cbdata_t *cb = data;
uint64_t keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
/*
* If we are working recursively, we want to skip loading / unloading
* keys for non-encryption roots and datasets whose keys are already
* in the desired end-state.
*/
if (cb->cb_recursive) {
ret = zfs_crypto_get_encryption_root(zhp, &is_encroot, NULL);
if (ret != 0)
return (ret);
if (!is_encroot)
return (0);
if ((cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_AVAILABLE) ||
(!cb->cb_loadkey && keystatus == ZFS_KEYSTATUS_UNAVAILABLE))
return (0);
}
cb->cb_numattempted++;
if (cb->cb_loadkey)
ret = zfs_crypto_load_key(zhp, cb->cb_noop, cb->cb_keylocation);
else
ret = zfs_crypto_unload_key(zhp);
if (ret != 0) {
cb->cb_numfailed++;
return (ret);
}
return (0);
}
static int
load_unload_keys(int argc, char **argv, boolean_t loadkey)
{
int c, ret = 0, flags = 0;
boolean_t do_all = B_FALSE;
loadkey_cbdata_t cb = { 0 };
cb.cb_loadkey = loadkey;
while ((c = getopt(argc, argv, "anrL:")) != -1) {
/* noop and alternate keylocations only apply to zfs load-key */
if (loadkey) {
switch (c) {
case 'n':
cb.cb_noop = B_TRUE;
continue;
case 'L':
cb.cb_keylocation = optarg;
continue;
default:
break;
}
}
switch (c) {
case 'a':
do_all = B_TRUE;
cb.cb_recursive = B_TRUE;
break;
case 'r':
flags |= ZFS_ITER_RECURSE;
cb.cb_recursive = B_TRUE;
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (!do_all && argc == 0) {
(void) fprintf(stderr,
gettext("Missing dataset argument or -a option\n"));
usage(B_FALSE);
}
if (do_all && argc != 0) {
(void) fprintf(stderr,
gettext("Cannot specify dataset with -a option\n"));
usage(B_FALSE);
}
if (cb.cb_recursive && cb.cb_keylocation != NULL &&
strcmp(cb.cb_keylocation, "prompt") != 0) {
(void) fprintf(stderr, gettext("alternate keylocation may only "
"be 'prompt' with -r or -a\n"));
usage(B_FALSE);
}
ret = zfs_for_each(argc, argv, flags,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, NULL, NULL, 0,
load_key_callback, &cb);
if (cb.cb_noop || (cb.cb_recursive && cb.cb_numattempted != 0)) {
(void) printf(gettext("%llu / %llu key(s) successfully %s\n"),
(u_longlong_t)(cb.cb_numattempted - cb.cb_numfailed),
(u_longlong_t)cb.cb_numattempted,
loadkey ? (cb.cb_noop ? "verified" : "loaded") :
"unloaded");
}
if (cb.cb_numfailed != 0)
ret = -1;
return (ret);
}
static int
zfs_do_load_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_TRUE));
}
static int
zfs_do_unload_key(int argc, char **argv)
{
return (load_unload_keys(argc, argv, B_FALSE));
}
static int
zfs_do_change_key(int argc, char **argv)
{
int c, ret;
uint64_t keystatus;
boolean_t loadkey = B_FALSE, inheritkey = B_FALSE;
zfs_handle_t *zhp = NULL;
nvlist_t *props = fnvlist_alloc();
while ((c = getopt(argc, argv, "lio:")) != -1) {
switch (c) {
case 'l':
loadkey = B_TRUE;
break;
case 'i':
inheritkey = B_TRUE;
break;
case 'o':
if (!parseprop(props, optarg)) {
nvlist_free(props);
return (1);
}
break;
default:
(void) fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
usage(B_FALSE);
}
}
if (inheritkey && !nvlist_empty(props)) {
(void) fprintf(stderr,
gettext("Properties not allowed for inheriting\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing dataset argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("Too many arguments\n"));
usage(B_FALSE);
}
zhp = zfs_open(g_zfs, argv[argc - 1],
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME);
if (zhp == NULL)
usage(B_FALSE);
if (loadkey) {
keystatus = zfs_prop_get_int(zhp, ZFS_PROP_KEYSTATUS);
if (keystatus != ZFS_KEYSTATUS_AVAILABLE) {
ret = zfs_crypto_load_key(zhp, B_FALSE, NULL);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
}
/* refresh the properties so the new keystatus is visible */
zfs_refresh_properties(zhp);
}
ret = zfs_crypto_rewrap(zhp, props, inheritkey);
if (ret != 0) {
nvlist_free(props);
zfs_close(zhp);
return (-1);
}
nvlist_free(props);
zfs_close(zhp);
return (0);
}
/*
* 1) zfs project [-d|-r] <file|directory ...>
* List project ID and inherit flag of file(s) or directories.
* -d: List the directory itself, not its children.
* -r: List subdirectories recursively.
*
* 2) zfs project -C [-k] [-r] <file|directory ...>
* Clear project inherit flag and/or ID on the file(s) or directories.
* -k: Keep the project ID unchanged. If not specified, the project ID
* will be reset as zero.
* -r: Clear on subdirectories recursively.
*
* 3) zfs project -c [-0] [-d|-r] [-p id] <file|directory ...>
* Check project ID and inherit flag on the file(s) or directories,
* report the outliers.
* -0: Print file name followed by a NUL instead of newline.
* -d: Check the directory itself, not its children.
* -p: Specify the referenced ID for comparing with the target file(s)
* or directories' project IDs. If not specified, the target (top)
* directory's project ID will be used as the referenced one.
* -r: Check subdirectories recursively.
*
* 4) zfs project [-p id] [-r] [-s] <file|directory ...>
* Set project ID and/or inherit flag on the file(s) or directories.
* -p: Set the project ID as the given id.
* -r: Set on subdirectorie recursively. If not specify "-p" option,
* it will use top-level directory's project ID as the given id,
* then set both project ID and inherit flag on all descendants
* of the top-level directory.
* -s: Set project inherit flag.
*/
static int
zfs_do_project(int argc, char **argv)
{
zfs_project_control_t zpc = {
.zpc_expected_projid = ZFS_INVALID_PROJID,
.zpc_op = ZFS_PROJECT_OP_DEFAULT,
.zpc_dironly = B_FALSE,
.zpc_keep_projid = B_FALSE,
.zpc_newline = B_TRUE,
.zpc_recursive = B_FALSE,
.zpc_set_flag = B_FALSE,
};
int ret = 0, c;
if (argc < 2)
usage(B_FALSE);
while ((c = getopt(argc, argv, "0Ccdkp:rs")) != -1) {
switch (c) {
case '0':
zpc.zpc_newline = B_FALSE;
break;
case 'C':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CLEAR;
break;
case 'c':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_op = ZFS_PROJECT_OP_CHECK;
break;
case 'd':
zpc.zpc_dironly = B_TRUE;
/* overwrite "-r" option */
zpc.zpc_recursive = B_FALSE;
break;
case 'k':
zpc.zpc_keep_projid = B_TRUE;
break;
case 'p': {
char *endptr;
errno = 0;
zpc.zpc_expected_projid = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("project ID must be less than "
"%u\n"), UINT32_MAX);
usage(B_FALSE);
}
if (zpc.zpc_expected_projid >= UINT32_MAX) {
(void) fprintf(stderr,
gettext("invalid project ID\n"));
usage(B_FALSE);
}
break;
}
case 'r':
zpc.zpc_recursive = B_TRUE;
/* overwrite "-d" option */
zpc.zpc_dironly = B_FALSE;
break;
case 's':
if (zpc.zpc_op != ZFS_PROJECT_OP_DEFAULT) {
(void) fprintf(stderr, gettext("cannot "
"specify '-C' '-c' '-s' together\n"));
usage(B_FALSE);
}
zpc.zpc_set_flag = B_TRUE;
zpc.zpc_op = ZFS_PROJECT_OP_SET;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (zpc.zpc_op == ZFS_PROJECT_OP_DEFAULT) {
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID)
zpc.zpc_op = ZFS_PROJECT_OP_SET;
else
zpc.zpc_op = ZFS_PROJECT_OP_LIST;
}
switch (zpc.zpc_op) {
case ZFS_PROJECT_OP_LIST:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CHECK:
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_CLEAR:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
if (zpc.zpc_expected_projid != ZFS_INVALID_PROJID) {
(void) fprintf(stderr,
gettext("'-p' is useless together with '-C'\n"));
usage(B_FALSE);
}
break;
case ZFS_PROJECT_OP_SET:
if (zpc.zpc_dironly) {
(void) fprintf(stderr,
gettext("'-d' is useless for set project ID and/or "
"inherit flag\n"));
usage(B_FALSE);
}
if (zpc.zpc_keep_projid) {
(void) fprintf(stderr,
gettext("'-k' is only valid together with '-C'\n"));
usage(B_FALSE);
}
if (!zpc.zpc_newline) {
(void) fprintf(stderr,
gettext("'-0' is only valid together with '-c'\n"));
usage(B_FALSE);
}
break;
default:
ASSERT(0);
break;
}
argv += optind;
argc -= optind;
if (argc == 0) {
(void) fprintf(stderr,
gettext("missing file or directory target(s)\n"));
usage(B_FALSE);
}
for (int i = 0; i < argc; i++) {
int err;
err = zfs_project_handle(argv[i], &zpc);
if (err && !ret)
ret = err;
}
return (ret);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
(void) setlocale(LC_ALL, "");
(void) textdomain(TEXT_DOMAIN);
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* The 'umount' command is an alias for 'unmount'
*/
if (strcmp(cmdname, "umount") == 0)
cmdname = "unmount";
/*
* The 'recv' command is an alias for 'receive'
*/
if (strcmp(cmdname, "recv") == 0)
cmdname = "receive";
/*
* The 'snap' command is an alias for 'snapshot'
*/
if (strcmp(cmdname, "snap") == 0)
cmdname = "snapshot";
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) ||
(strcmp(cmdname, "--help") == 0))
usage(B_TRUE);
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s", libzfs_error_init(errno));
return (1);
}
mnttab_file = g_zfs->libzfs_mnttab;
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
libzfs_print_on_error(g_zfs, B_TRUE);
/*
* Run the appropriate command.
*/
libzfs_mnttab_cache(g_zfs, B_TRUE);
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, argv + 1);
} else if (strchr(cmdname, '=') != NULL) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, argv);
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/cmd/zpool/zpool_main.c b/cmd/zpool/zpool_main.c
index a3537b1771a2..453fb2131ffb 100644
--- a/cmd/zpool/zpool_main.c
+++ b/cmd/zpool/zpool_main.c
@@ -1,8170 +1,8349 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright (c) 2012 by Frederik Wessels. All rights reserved.
* Copyright (c) 2012 by Cyril Plisko. All rights reserved.
* Copyright (c) 2013 by Prasad Joshi (sTec). All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#include <assert.h>
#include <ctype.h>
#include <dirent.h>
#include <errno.h>
#include <fcntl.h>
#include <libgen.h>
#include <libintl.h>
#include <libuutil.h>
#include <locale.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <strings.h>
#include <unistd.h>
#include <pwd.h>
#include <zone.h>
#include <sys/wait.h>
#include <zfs_prop.h>
#include <sys/fs/zfs.h>
#include <sys/stat.h>
#include <sys/systeminfo.h>
#include <sys/fm/fs/zfs.h>
#include <sys/fm/util.h>
#include <sys/fm/protocol.h>
#include <sys/zfs_ioctl.h>
#include <sys/mount.h>
#include <sys/sysmacros.h>
#include <math.h>
#include <libzfs.h>
#include "zpool_util.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
#include "statcommon.h"
static int zpool_do_create(int, char **);
static int zpool_do_destroy(int, char **);
static int zpool_do_add(int, char **);
static int zpool_do_remove(int, char **);
static int zpool_do_labelclear(int, char **);
static int zpool_do_list(int, char **);
static int zpool_do_iostat(int, char **);
static int zpool_do_status(int, char **);
static int zpool_do_online(int, char **);
static int zpool_do_offline(int, char **);
static int zpool_do_clear(int, char **);
static int zpool_do_reopen(int, char **);
static int zpool_do_reguid(int, char **);
static int zpool_do_attach(int, char **);
static int zpool_do_detach(int, char **);
static int zpool_do_replace(int, char **);
static int zpool_do_split(int, char **);
static int zpool_do_scrub(int, char **);
static int zpool_do_import(int, char **);
static int zpool_do_export(int, char **);
static int zpool_do_upgrade(int, char **);
static int zpool_do_history(int, char **);
static int zpool_do_events(int, char **);
static int zpool_do_get(int, char **);
static int zpool_do_set(int, char **);
static int zpool_do_sync(int, char **);
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
#ifdef DEBUG
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
#endif
typedef enum {
HELP_ADD,
HELP_ATTACH,
HELP_CLEAR,
HELP_CREATE,
HELP_DESTROY,
HELP_DETACH,
HELP_EXPORT,
HELP_HISTORY,
HELP_IMPORT,
HELP_IOSTAT,
HELP_LABELCLEAR,
HELP_LIST,
HELP_OFFLINE,
HELP_ONLINE,
HELP_REPLACE,
HELP_REMOVE,
HELP_SCRUB,
HELP_STATUS,
HELP_UPGRADE,
HELP_EVENTS,
HELP_GET,
HELP_SET,
HELP_SPLIT,
HELP_SYNC,
HELP_REGUID,
HELP_REOPEN
} zpool_help_t;
/*
* Flags for stats to display with "zpool iostats"
*/
enum iostat_type {
IOS_DEFAULT = 0,
IOS_LATENCY = 1,
IOS_QUEUES = 2,
IOS_L_HISTO = 3,
IOS_RQ_HISTO = 4,
IOS_COUNT, /* always last element */
};
/* iostat_type entries as bitmasks */
#define IOS_DEFAULT_M (1ULL << IOS_DEFAULT)
#define IOS_LATENCY_M (1ULL << IOS_LATENCY)
#define IOS_QUEUES_M (1ULL << IOS_QUEUES)
#define IOS_L_HISTO_M (1ULL << IOS_L_HISTO)
#define IOS_RQ_HISTO_M (1ULL << IOS_RQ_HISTO)
/* Mask of all the histo bits */
#define IOS_ANYHISTO_M (IOS_L_HISTO_M | IOS_RQ_HISTO_M)
/*
* Lookup table for iostat flags to nvlist names. Basically a list
* of all the nvlists a flag requires. Also specifies the order in
* which data gets printed in zpool iostat.
*/
static const char *vsx_type_to_nvlist[IOS_COUNT][11] = {
[IOS_L_HISTO] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
NULL},
[IOS_LATENCY] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
NULL},
[IOS_QUEUES] = {
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
NULL},
[IOS_RQ_HISTO] = {
ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
NULL},
};
/*
* Given a cb->cb_flags with a histogram bit set, return the iostat_type.
* Right now, only one histo bit is ever set at one time, so we can
* just do a highbit64(a)
*/
#define IOS_HISTO_IDX(a) (highbit64(a & IOS_ANYHISTO_M) - 1)
typedef struct zpool_command {
const char *name;
int (*func)(int, char **);
zpool_help_t usage;
} zpool_command_t;
/*
* Master command table. Each ZFS command has a name, associated function, and
* usage message. The usage messages need to be internationalized, so we have
* to have a function to return the usage message based on a command index.
*
* These commands are organized according to how they are displayed in the usage
* message. An empty command (one with a NULL name) indicates an empty line in
* the generic usage message.
*/
static zpool_command_t command_table[] = {
{ "create", zpool_do_create, HELP_CREATE },
{ "destroy", zpool_do_destroy, HELP_DESTROY },
{ NULL },
{ "add", zpool_do_add, HELP_ADD },
{ "remove", zpool_do_remove, HELP_REMOVE },
{ NULL },
{ "labelclear", zpool_do_labelclear, HELP_LABELCLEAR },
{ NULL },
{ "list", zpool_do_list, HELP_LIST },
{ "iostat", zpool_do_iostat, HELP_IOSTAT },
{ "status", zpool_do_status, HELP_STATUS },
{ NULL },
{ "online", zpool_do_online, HELP_ONLINE },
{ "offline", zpool_do_offline, HELP_OFFLINE },
{ "clear", zpool_do_clear, HELP_CLEAR },
{ "reopen", zpool_do_reopen, HELP_REOPEN },
{ NULL },
{ "attach", zpool_do_attach, HELP_ATTACH },
{ "detach", zpool_do_detach, HELP_DETACH },
{ "replace", zpool_do_replace, HELP_REPLACE },
{ "split", zpool_do_split, HELP_SPLIT },
{ NULL },
{ "scrub", zpool_do_scrub, HELP_SCRUB },
{ NULL },
{ "import", zpool_do_import, HELP_IMPORT },
{ "export", zpool_do_export, HELP_EXPORT },
{ "upgrade", zpool_do_upgrade, HELP_UPGRADE },
{ "reguid", zpool_do_reguid, HELP_REGUID },
{ NULL },
{ "history", zpool_do_history, HELP_HISTORY },
{ "events", zpool_do_events, HELP_EVENTS },
{ NULL },
{ "get", zpool_do_get, HELP_GET },
{ "set", zpool_do_set, HELP_SET },
{ "sync", zpool_do_sync, HELP_SYNC },
};
#define NCOMMAND (ARRAY_SIZE(command_table))
static zpool_command_t *current_command;
static char history_str[HIS_MAX_RECORD_LEN];
static boolean_t log_history = B_TRUE;
static uint_t timestamp_fmt = NODATE;
static const char *
get_usage(zpool_help_t idx)
{
switch (idx) {
case HELP_ADD:
return (gettext("\tadd [-fgLnP] [-o property=value] "
"<pool> <vdev> ...\n"));
case HELP_ATTACH:
return (gettext("\tattach [-f] [-o property=value] "
"<pool> <device> <new-device>\n"));
case HELP_CLEAR:
return (gettext("\tclear [-nF] <pool> [device]\n"));
case HELP_CREATE:
return (gettext("\tcreate [-fnd] [-o property=value] ... \n"
"\t [-O file-system-property=value] ... \n"
"\t [-m mountpoint] [-R root] <pool> <vdev> ...\n"));
case HELP_DESTROY:
return (gettext("\tdestroy [-f] <pool>\n"));
case HELP_DETACH:
return (gettext("\tdetach <pool> <device>\n"));
case HELP_EXPORT:
return (gettext("\texport [-af] <pool> ...\n"));
case HELP_HISTORY:
return (gettext("\thistory [-il] [<pool>] ...\n"));
case HELP_IMPORT:
return (gettext("\timport [-d dir] [-D]\n"
"\timport [-d dir | -c cachefile] [-F [-n]] [-l] "
"<pool | id>\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]] -a\n"
"\timport [-o mntopts] [-o property=value] ... \n"
"\t [-d dir | -c cachefile] [-D] [-l] [-f] [-m] [-N] "
"[-R root] [-F [-n]]\n"
"\t <pool | id> [newpool]\n"));
case HELP_IOSTAT:
return (gettext("\tiostat [[[-c [script1,script2,...]"
"[-lq]]|[-rw]] [-T d | u] [-ghHLpPvy]\n"
"\t [[pool ...]|[pool vdev ...]|[vdev ...]]"
" [interval [count]]\n"));
case HELP_LABELCLEAR:
return (gettext("\tlabelclear [-f] <vdev>\n"));
case HELP_LIST:
return (gettext("\tlist [-gHLpPv] [-o property[,...]] "
"[-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_OFFLINE:
return (gettext("\toffline [-f] [-t] <pool> <device> ...\n"));
case HELP_ONLINE:
return (gettext("\tonline <pool> <device> ...\n"));
case HELP_REPLACE:
return (gettext("\treplace [-f] [-o property=value] "
"<pool> <device> [new-device]\n"));
case HELP_REMOVE:
- return (gettext("\tremove <pool> <device> ...\n"));
+ return (gettext("\tremove [-nps] <pool> <device> ...\n"));
case HELP_REOPEN:
return (gettext("\treopen [-n] <pool>\n"));
case HELP_SCRUB:
return (gettext("\tscrub [-s | -p] <pool> ...\n"));
case HELP_STATUS:
return (gettext("\tstatus [-c [script1,script2,...]] [-gLPvxD]"
"[-T d|u] [pool] ... \n"
"\t [interval [count]]\n"));
case HELP_UPGRADE:
return (gettext("\tupgrade\n"
"\tupgrade -v\n"
"\tupgrade [-V version] <-a | pool ...>\n"));
case HELP_EVENTS:
return (gettext("\tevents [-vHf [pool] | -c]\n"));
case HELP_GET:
return (gettext("\tget [-Hp] [-o \"all\" | field[,...]] "
"<\"all\" | property[,...]> <pool> ...\n"));
case HELP_SET:
return (gettext("\tset <property=value> <pool> \n"));
case HELP_SPLIT:
return (gettext("\tsplit [-gLnPl] [-R altroot] [-o mntopts]\n"
"\t [-o property=value] <pool> <newpool> "
"[<device> ...]\n"));
case HELP_REGUID:
return (gettext("\treguid <pool>\n"));
case HELP_SYNC:
return (gettext("\tsync [pool] ...\n"));
}
abort();
/* NOTREACHED */
}
/*
* Callback routine that will print out a pool property value.
*/
static int
print_prop_cb(int prop, void *cb)
{
FILE *fp = cb;
(void) fprintf(fp, "\t%-15s ", zpool_prop_to_name(prop));
if (zpool_prop_readonly(prop))
(void) fprintf(fp, " NO ");
else
(void) fprintf(fp, " YES ");
if (zpool_prop_values(prop) == NULL)
(void) fprintf(fp, "-\n");
else
(void) fprintf(fp, "%s\n", zpool_prop_values(prop));
return (ZPROP_CONT);
}
/*
* Display usage message. If we're inside a command, display only the usage for
* that command. Otherwise, iterate over the entire command table and display
* a complete usage message.
*/
void
usage(boolean_t requested)
{
FILE *fp = requested ? stdout : stderr;
if (current_command == NULL) {
int i;
(void) fprintf(fp, gettext("usage: zpool command args ...\n"));
(void) fprintf(fp,
gettext("where 'command' is one of the following:\n\n"));
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
(void) fprintf(fp, "\n");
else
(void) fprintf(fp, "%s",
get_usage(command_table[i].usage));
}
} else {
(void) fprintf(fp, gettext("usage:\n"));
(void) fprintf(fp, "%s", get_usage(current_command->usage));
}
if (current_command != NULL &&
((strcmp(current_command->name, "set") == 0) ||
(strcmp(current_command->name, "get") == 0) ||
(strcmp(current_command->name, "list") == 0))) {
(void) fprintf(fp,
gettext("\nthe following properties are supported:\n"));
(void) fprintf(fp, "\n\t%-15s %s %s\n\n",
"PROPERTY", "EDIT", "VALUES");
/* Iterate over all properties */
(void) zprop_iter(print_prop_cb, fp, B_FALSE, B_TRUE,
ZFS_TYPE_POOL);
(void) fprintf(fp, "\t%-15s ", "feature@...");
(void) fprintf(fp, "YES disabled | enabled | active\n");
(void) fprintf(fp, gettext("\nThe feature@ properties must be "
"appended with a feature name.\nSee zpool-features(5).\n"));
}
/*
* See comments at end of main().
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
exit(requested ? 0 : 2);
}
void
print_vdev_tree(zpool_handle_t *zhp, const char *name, nvlist_t *nv, int indent,
boolean_t print_logs, int name_flags)
{
nvlist_t **child;
uint_t c, children;
char *vname;
if (name != NULL)
(void) printf("\t%*s%s\n", indent, "", name);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if ((is_log && !print_logs) || (!is_log && print_logs))
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c], name_flags);
print_vdev_tree(zhp, vname, child[c], indent + 2,
B_FALSE, name_flags);
free(vname);
}
}
static boolean_t
prop_list_contains_feature(nvlist_t *proplist)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(proplist, NULL); NULL != nvp;
nvp = nvlist_next_nvpair(proplist, nvp)) {
if (zpool_prop_feature(nvpair_name(nvp)))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Add a property pair (name, string-value) into a property nvlist.
*/
static int
add_prop_list(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop)
{
zpool_prop_t prop = ZPOOL_PROP_INVAL;
zfs_prop_t fprop;
nvlist_t *proplist;
const char *normnm;
char *strval;
if (*props == NULL &&
nvlist_alloc(props, NV_UNIQUE_NAME, 0) != 0) {
(void) fprintf(stderr,
gettext("internal error: out of memory\n"));
return (1);
}
proplist = *props;
if (poolprop) {
const char *vname = zpool_prop_to_name(ZPOOL_PROP_VERSION);
if ((prop = zpool_name_to_prop(propname)) == ZPOOL_PROP_INVAL &&
!zpool_prop_feature(propname)) {
(void) fprintf(stderr, gettext("property '%s' is "
"not a valid pool property\n"), propname);
return (2);
}
/*
* feature@ properties and version should not be specified
* at the same time.
*/
if ((prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname) &&
nvlist_exists(proplist, vname)) ||
(prop == ZPOOL_PROP_VERSION &&
prop_list_contains_feature(proplist))) {
(void) fprintf(stderr, gettext("'feature@' and "
"'version' properties cannot be specified "
"together\n"));
return (2);
}
if (zpool_prop_feature(propname))
normnm = propname;
else
normnm = zpool_prop_to_name(prop);
} else {
if ((fprop = zfs_name_to_prop(propname)) != ZPROP_INVAL) {
normnm = zfs_prop_to_name(fprop);
} else {
normnm = propname;
}
}
if (nvlist_lookup_string(proplist, normnm, &strval) == 0 &&
prop != ZPOOL_PROP_CACHEFILE) {
(void) fprintf(stderr, gettext("property '%s' "
"specified multiple times\n"), propname);
return (2);
}
if (nvlist_add_string(proplist, normnm, propval) != 0) {
(void) fprintf(stderr, gettext("internal "
"error: out of memory\n"));
return (1);
}
return (0);
}
/*
* Set a default property pair (name, string-value) in a property nvlist
*/
static int
add_prop_list_default(const char *propname, char *propval, nvlist_t **props,
boolean_t poolprop)
{
char *pval;
if (nvlist_lookup_string(*props, propname, &pval) == 0)
return (0);
return (add_prop_list(propname, propval, props, B_TRUE));
}
/*
* zpool add [-fgLnP] [-o property=value] <pool> <vdev> ...
*
* -f Force addition of devices, even if they appear in use
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not add the devices, but display the resulting layout if
* they were to be added.
* -o Set property=value.
* -P Display full path for vdev name.
*
* Adds the given vdevs to 'pool'. As with create, the bulk of this work is
* handled by get_vdev_spec(), which constructs the nvlist needed to pass to
* libzfs.
*/
int
zpool_do_add(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
int name_flags = 0;
int c;
nvlist_t *nvroot;
char *poolname;
int ret;
zpool_handle_t *zhp;
nvlist_t *config;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, "fgLno:P")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'g':
name_flags |= VDEV_NAME_GUID;
break;
case 'L':
name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case 'P':
name_flags |= VDEV_NAME_PATH;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
argc--;
argv++;
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
/* pass off to get_vdev_spec for processing */
nvroot = make_root_vdev(zhp, props, force, !force, B_FALSE, dryrun,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
return (1);
}
if (dryrun) {
nvlist_t *poolnvroot;
nvlist_t **l2child;
uint_t l2children, c;
char *vname;
boolean_t hadcache = B_FALSE;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&poolnvroot) == 0);
(void) printf(gettext("would update '%s' to the following "
"configuration:\n"), zpool_get_name(zhp));
/* print original main pool and new tree */
print_vdev_tree(zhp, poolname, poolnvroot, 0, B_FALSE,
name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0, B_FALSE, name_flags);
/* Do the same for the logs */
if (num_logs(poolnvroot) > 0) {
print_vdev_tree(zhp, "logs", poolnvroot, 0, B_TRUE,
name_flags);
print_vdev_tree(zhp, NULL, nvroot, 0, B_TRUE,
name_flags);
} else if (num_logs(nvroot) > 0) {
print_vdev_tree(zhp, "logs", nvroot, 0, B_TRUE,
name_flags);
}
/* Do the same for the caches */
if (nvlist_lookup_nvlist_array(poolnvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
hadcache = B_TRUE;
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2child, &l2children) == 0 && l2children) {
if (!hadcache)
(void) printf(gettext("\tcache\n"));
for (c = 0; c < l2children; c++) {
vname = zpool_vdev_name(g_zfs, NULL,
l2child[c], name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
ret = 0;
} else {
ret = (zpool_add(zhp, nvroot) != 0);
}
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool remove <pool> <vdev> ...
*
- * Removes the given vdev from the pool. Currently, this supports removing
- * spares, cache, and log devices from the pool.
+ * Removes the given vdev from the pool.
*/
int
zpool_do_remove(int argc, char **argv)
{
char *poolname;
int i, ret = 0;
zpool_handle_t *zhp = NULL;
+ boolean_t stop = B_FALSE;
+ char c;
+ boolean_t noop = B_FALSE;
+ boolean_t parsable = B_FALSE;
- argc--;
- argv++;
+ /* check options */
+ while ((c = getopt(argc, argv, "nps")) != -1) {
+ switch (c) {
+ case 'n':
+ noop = B_TRUE;
+ break;
+ case 'p':
+ parsable = B_TRUE;
+ break;
+ case 's':
+ stop = B_TRUE;
+ break;
+ case '?':
+ (void) fprintf(stderr, gettext("invalid option '%c'\n"),
+ optopt);
+ usage(B_FALSE);
+ }
+ }
+
+ argc -= optind;
+ argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
- if (argc < 2) {
- (void) fprintf(stderr, gettext("missing device\n"));
- usage(B_FALSE);
- }
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
- for (i = 1; i < argc; i++) {
- if (zpool_vdev_remove(zhp, argv[i]) != 0)
+ if (stop && noop) {
+ (void) fprintf(stderr, gettext("stop request ignored\n"));
+ return (0);
+ }
+
+ if (stop) {
+ if (argc > 1) {
+ (void) fprintf(stderr, gettext("too many arguments\n"));
+ usage(B_FALSE);
+ }
+ if (zpool_vdev_remove_cancel(zhp) != 0)
ret = 1;
+ } else {
+ if (argc < 2) {
+ (void) fprintf(stderr, gettext("missing device\n"));
+ usage(B_FALSE);
+ }
+
+ for (i = 1; i < argc; i++) {
+ if (noop) {
+ uint64_t size;
+
+ if (zpool_vdev_indirect_size(zhp, argv[i],
+ &size) != 0) {
+ ret = 1;
+ break;
+ }
+ if (parsable) {
+ (void) printf("%s %llu\n",
+ argv[i], (unsigned long long)size);
+ } else {
+ char valstr[32];
+ zfs_nicenum(size, valstr,
+ sizeof (valstr));
+ (void) printf("Memory that will be "
+ "used after removing %s: %s\n",
+ argv[i], valstr);
+ }
+ } else {
+ if (zpool_vdev_remove(zhp, argv[i]) != 0)
+ ret = 1;
+ }
+ }
}
zpool_close(zhp);
return (ret);
}
/*
* zpool labelclear [-f] <vdev>
*
* -f Force clearing the label for the vdevs which are members of
* the exported or foreign pools.
*
* Verifies that the vdev is not active and zeros out the label information
* on the device.
*/
int
zpool_do_labelclear(int argc, char **argv)
{
char vdev[MAXPATHLEN];
char *name = NULL;
struct stat st;
int c, fd = -1, ret = 0;
nvlist_t *config;
pool_state_t state;
boolean_t inuse = B_FALSE;
boolean_t force = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
default:
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get vdev name */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing vdev name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
/*
* Check if we were given absolute path and use it as is.
* Otherwise if the provided vdev name doesn't point to a file,
* try prepending expected disk paths and partition numbers.
*/
(void) strlcpy(vdev, argv[0], sizeof (vdev));
if (vdev[0] != '/' && stat(vdev, &st) != 0) {
int error;
error = zfs_resolve_shortname(argv[0], vdev, MAXPATHLEN);
if (error == 0 && zfs_dev_is_whole_disk(vdev)) {
if (zfs_append_partition(vdev, MAXPATHLEN) == -1)
error = ENOENT;
}
if (error || (stat(vdev, &st) != 0)) {
(void) fprintf(stderr, gettext(
"failed to find device %s, try specifying absolute "
"path instead\n"), argv[0]);
return (1);
}
}
if ((fd = open(vdev, O_RDWR)) < 0) {
(void) fprintf(stderr, gettext("failed to open %s: %s\n"),
vdev, strerror(errno));
return (1);
}
if (ioctl(fd, BLKFLSBUF) != 0)
(void) fprintf(stderr, gettext("failed to invalidate "
"cache for %s: %s\n"), vdev, strerror(errno));
if (zpool_read_label(fd, &config, NULL) != 0 || config == NULL) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
nvlist_free(config);
ret = zpool_in_use(g_zfs, fd, &state, &name, &inuse);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to check state for %s\n"), vdev);
ret = 1;
goto errout;
}
if (!inuse)
goto wipe_label;
switch (state) {
default:
case POOL_STATE_ACTIVE:
case POOL_STATE_SPARE:
case POOL_STATE_L2CACHE:
(void) fprintf(stderr, gettext(
"%s is a member (%s) of pool \"%s\"\n"),
vdev, zpool_pool_state_to_name(state), name);
ret = 1;
goto errout;
case POOL_STATE_EXPORTED:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of exported pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_POTENTIALLY_ACTIVE:
if (force)
break;
(void) fprintf(stderr, gettext(
"use '-f' to override the following error:\n"
"%s is a member of potentially active pool \"%s\"\n"),
vdev, name);
ret = 1;
goto errout;
case POOL_STATE_DESTROYED:
/* inuse should never be set for a destroyed pool */
assert(0);
break;
}
wipe_label:
ret = zpool_clear_label(fd);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to clear label for %s\n"), vdev);
}
errout:
free(name);
(void) close(fd);
return (ret);
}
/*
* zpool create [-fnd] [-o property=value] ...
* [-O file-system-property=value] ...
* [-R root] [-m mountpoint] <pool> <dev> ...
*
* -f Force creation, even if devices appear in use
* -n Do not create the pool, but display the resulting layout if it
* were to be created.
* -R Create a pool under an alternate root
* -m Set default mountpoint for the root dataset. By default it's
* '/<pool>'
* -o Set property=value.
* -o Set feature@feature=enabled|disabled.
* -d Don't automatically enable all supported pool features
* (individual features can be enabled with -o).
* -O Set fsproperty=value in the pool's root file system
*
* Creates the named pool according to the given vdev specification. The
* bulk of the vdev processing is done in get_vdev_spec() in zpool_vdev.c. Once
* we get the nvlist back from get_vdev_spec(), we either print out the contents
* (if '-n' was specified), or pass it to libzfs to do the creation.
*/
int
zpool_do_create(int argc, char **argv)
{
boolean_t force = B_FALSE;
boolean_t dryrun = B_FALSE;
boolean_t enable_all_pool_feat = B_TRUE;
int c;
nvlist_t *nvroot = NULL;
char *poolname;
char *tname = NULL;
int ret = 1;
char *altroot = NULL;
char *mountpoint = NULL;
nvlist_t *fsprops = NULL;
nvlist_t *props = NULL;
char *propval;
/* check options */
while ((c = getopt(argc, argv, ":fndR:m:o:O:t:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'd':
enable_all_pool_feat = B_FALSE;
break;
case 'R':
altroot = optarg;
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto errout;
break;
case 'm':
/* Equivalent to -O mountpoint=optarg */
mountpoint = optarg;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
goto errout;
}
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval, &props, B_TRUE))
goto errout;
/*
* If the user is creating a pool that doesn't support
* feature flags, don't enable any features.
*/
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_VERSION) {
char *end;
u_longlong_t ver;
ver = strtoull(propval, &end, 10);
if (*end == '\0' &&
ver < SPA_VERSION_FEATURES) {
enable_all_pool_feat = B_FALSE;
}
}
if (zpool_name_to_prop(optarg) == ZPOOL_PROP_ALTROOT)
altroot = propval;
break;
case 'O':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -O option\n"));
goto errout;
}
*propval = '\0';
propval++;
/*
* Mountpoints are checked and then added later.
* Uniquely among properties, they can be specified
* more than once, to avoid conflict with -m.
*/
if (0 == strcmp(optarg,
zfs_prop_to_name(ZFS_PROP_MOUNTPOINT))) {
mountpoint = propval;
} else if (add_prop_list(optarg, propval, &fsprops,
B_FALSE)) {
goto errout;
}
break;
case 't':
/*
* Sanity check temporary pool name.
*/
if (strchr(optarg, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create "
"'%s': invalid character '/' in temporary "
"name\n"), optarg);
(void) fprintf(stderr, gettext("use 'zfs "
"create' to create a dataset\n"));
goto errout;
}
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_TNAME), optarg, &props, B_TRUE))
goto errout;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto errout;
tname = optarg;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
goto badusage;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
goto badusage;
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
goto badusage;
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing vdev specification\n"));
goto badusage;
}
poolname = argv[0];
/*
* As a special case, check for use of '/' in the name, and direct the
* user to use 'zfs create' instead.
*/
if (strchr(poolname, '/') != NULL) {
(void) fprintf(stderr, gettext("cannot create '%s': invalid "
"character '/' in pool name\n"), poolname);
(void) fprintf(stderr, gettext("use 'zfs create' to "
"create a dataset\n"));
goto errout;
}
/* pass off to get_vdev_spec for bulk processing */
nvroot = make_root_vdev(NULL, props, force, !force, B_FALSE, dryrun,
argc - 1, argv + 1);
if (nvroot == NULL)
goto errout;
/* make_root_vdev() allows 0 toplevel children if there are spares */
if (!zfs_allocatable_devs(nvroot)) {
(void) fprintf(stderr, gettext("invalid vdev "
"specification: at least one toplevel vdev must be "
"specified\n"));
goto errout;
}
if (altroot != NULL && altroot[0] != '/') {
(void) fprintf(stderr, gettext("invalid alternate root '%s': "
"must be an absolute path\n"), altroot);
goto errout;
}
/*
* Check the validity of the mountpoint and direct the user to use the
* '-m' mountpoint option if it looks like its in use.
*/
if (mountpoint == NULL ||
(strcmp(mountpoint, ZFS_MOUNTPOINT_LEGACY) != 0 &&
strcmp(mountpoint, ZFS_MOUNTPOINT_NONE) != 0)) {
char buf[MAXPATHLEN];
DIR *dirp;
if (mountpoint && mountpoint[0] != '/') {
(void) fprintf(stderr, gettext("invalid mountpoint "
"'%s': must be an absolute path, 'legacy', or "
"'none'\n"), mountpoint);
goto errout;
}
if (mountpoint == NULL) {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s/%s",
altroot, poolname);
else
(void) snprintf(buf, sizeof (buf), "/%s",
poolname);
} else {
if (altroot != NULL)
(void) snprintf(buf, sizeof (buf), "%s%s",
altroot, mountpoint);
else
(void) snprintf(buf, sizeof (buf), "%s",
mountpoint);
}
if ((dirp = opendir(buf)) == NULL && errno != ENOENT) {
(void) fprintf(stderr, gettext("mountpoint '%s' : "
"%s\n"), buf, strerror(errno));
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a different default\n"));
goto errout;
} else if (dirp) {
int count = 0;
while (count < 3 && readdir(dirp) != NULL)
count++;
(void) closedir(dirp);
if (count > 2) {
(void) fprintf(stderr, gettext("mountpoint "
"'%s' exists and is not empty\n"), buf);
(void) fprintf(stderr, gettext("use '-m' "
"option to provide a "
"different default\n"));
goto errout;
}
}
}
/*
* Now that the mountpoint's validity has been checked, ensure that
* the property is set appropriately prior to creating the pool.
*/
if (mountpoint != NULL) {
ret = add_prop_list(zfs_prop_to_name(ZFS_PROP_MOUNTPOINT),
mountpoint, &fsprops, B_FALSE);
if (ret != 0)
goto errout;
}
ret = 1;
if (dryrun) {
/*
* For a dry run invocation, print out a basic message and run
* through all the vdevs in the list and print out in an
* appropriate hierarchy.
*/
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), poolname);
print_vdev_tree(NULL, poolname, nvroot, 0, B_FALSE, 0);
if (num_logs(nvroot) > 0)
print_vdev_tree(NULL, "logs", nvroot, 0, B_TRUE, 0);
ret = 0;
} else {
/*
* Hand off to libzfs.
*/
spa_feature_t i;
for (i = 0; i < SPA_FEATURES; i++) {
char propname[MAXPATHLEN];
char *propval;
zfeature_info_t *feat = &spa_feature_table[i];
(void) snprintf(propname, sizeof (propname),
"feature@%s", feat->fi_uname);
/*
* Only features contained in props will be enabled:
* remove from the nvlist every ZFS_FEATURE_DISABLED
* value and add every missing ZFS_FEATURE_ENABLED if
* enable_all_pool_feat is set.
*/
if (!nvlist_lookup_string(props, propname, &propval)) {
if (strcmp(propval, ZFS_FEATURE_DISABLED) == 0)
(void) nvlist_remove_all(props,
propname);
} else if (enable_all_pool_feat) {
ret = add_prop_list(propname,
ZFS_FEATURE_ENABLED, &props, B_TRUE);
if (ret != 0)
goto errout;
}
}
ret = 1;
if (zpool_create(g_zfs, poolname,
nvroot, props, fsprops) == 0) {
zfs_handle_t *pool = zfs_open(g_zfs,
tname ? tname : poolname, ZFS_TYPE_FILESYSTEM);
if (pool != NULL) {
if (zfs_mount(pool, NULL, 0) == 0)
ret = zfs_shareall(pool);
zfs_close(pool);
}
} else if (libzfs_errno(g_zfs) == EZFS_INVALIDNAME) {
(void) fprintf(stderr, gettext("pool name may have "
"been omitted\n"));
}
}
errout:
nvlist_free(nvroot);
nvlist_free(fsprops);
nvlist_free(props);
return (ret);
badusage:
nvlist_free(fsprops);
nvlist_free(props);
usage(B_FALSE);
return (2);
}
/*
* zpool destroy <pool>
*
* -f Forcefully unmount any datasets
*
* Destroy the given pool. Automatically unmounts any datasets in the pool.
*/
int
zpool_do_destroy(int argc, char **argv)
{
boolean_t force = B_FALSE;
int c;
char *pool;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
pool = argv[0];
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
/*
* As a special case, check for use of '/' in the name, and
* direct the user to use 'zfs destroy' instead.
*/
if (strchr(pool, '/') != NULL)
(void) fprintf(stderr, gettext("use 'zfs destroy' to "
"destroy a dataset\n"));
return (1);
}
if (zpool_disable_datasets(zhp, force) != 0) {
(void) fprintf(stderr, gettext("could not destroy '%s': "
"could not unmount datasets\n"), zpool_get_name(zhp));
zpool_close(zhp);
return (1);
}
/* The history must be logged as part of the export */
log_history = B_FALSE;
ret = (zpool_destroy(zhp, history_str) != 0);
zpool_close(zhp);
return (ret);
}
typedef struct export_cbdata {
boolean_t force;
boolean_t hardforce;
} export_cbdata_t;
/*
* Export one pool
*/
int
zpool_export_one(zpool_handle_t *zhp, void *data)
{
export_cbdata_t *cb = data;
if (zpool_disable_datasets(zhp, cb->force) != 0)
return (1);
/* The history must be logged as part of the export */
log_history = B_FALSE;
if (cb->hardforce) {
if (zpool_export_force(zhp, history_str) != 0)
return (1);
} else if (zpool_export(zhp, cb->force, history_str) != 0) {
return (1);
}
return (0);
}
/*
* zpool export [-f] <pool> ...
*
* -a Export all pools
* -f Forcefully unmount datasets
*
* Export the given pools. By default, the command will attempt to cleanly
* unmount any active datasets within the pool. If the '-f' flag is specified,
* then the datasets will be forcefully unmounted.
*/
int
zpool_do_export(int argc, char **argv)
{
export_cbdata_t cb;
boolean_t do_all = B_FALSE;
boolean_t force = B_FALSE;
boolean_t hardforce = B_FALSE;
int c, ret;
/* check options */
while ((c = getopt(argc, argv, "afF")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'f':
force = B_TRUE;
break;
case 'F':
hardforce = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.force = force;
cb.hardforce = hardforce;
argc -= optind;
argv += optind;
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL,
zpool_export_one, &cb));
}
/* check arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool argument\n"));
usage(B_FALSE);
}
ret = for_each_pool(argc, argv, B_TRUE, NULL, zpool_export_one, &cb);
return (ret);
}
/*
* Given a vdev configuration, determine the maximum width needed for the device
* name column.
*/
static int
max_width(zpool_handle_t *zhp, nvlist_t *nv, int depth, int max,
int name_flags)
{
char *name;
nvlist_t **child;
uint_t c, children;
int ret;
name = zpool_vdev_name(g_zfs, zhp, nv, name_flags);
if (strlen(name) + depth > max)
max = strlen(name) + depth;
free(name);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if ((ret = max_width(zhp, child[c], depth + 2,
max, name_flags)) > max)
max = ret;
}
return (max);
}
typedef struct spare_cbdata {
uint64_t cb_guid;
zpool_handle_t *cb_zhp;
} spare_cbdata_t;
static boolean_t
find_vdev(nvlist_t *nv, uint64_t search)
{
uint64_t guid;
nvlist_t **child;
uint_t c, children;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) == 0 &&
search == guid)
return (B_TRUE);
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) == 0) {
for (c = 0; c < children; c++)
if (find_vdev(child[c], search))
return (B_TRUE);
}
return (B_FALSE);
}
static int
find_spare(zpool_handle_t *zhp, void *data)
{
spare_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
if (find_vdev(nvroot, cbp->cb_guid)) {
cbp->cb_zhp = zhp;
return (1);
}
zpool_close(zhp);
return (0);
}
typedef struct status_cbdata {
int cb_count;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_allpools;
boolean_t cb_verbose;
boolean_t cb_explain;
boolean_t cb_first;
boolean_t cb_dedup_stats;
boolean_t cb_print_status;
vdev_cmd_data_list_t *vcdl;
} status_cbdata_t;
/* Return 1 if string is NULL, empty, or whitespace; return 0 otherwise. */
static int
is_blank_str(char *str)
{
while (str != NULL && *str != '\0') {
if (!isblank(*str))
return (0);
str++;
}
return (1);
}
/* Print command output lines for specific vdev in a specific pool */
static void
zpool_print_cmd(vdev_cmd_data_list_t *vcdl, const char *pool, char *path)
{
vdev_cmd_data_t *data;
int i, j;
char *val;
for (i = 0; i < vcdl->count; i++) {
if ((strcmp(vcdl->data[i].path, path) != 0) ||
(strcmp(vcdl->data[i].pool, pool) != 0)) {
/* Not the vdev we're looking for */
continue;
}
data = &vcdl->data[i];
/* Print out all the output values for this vdev */
for (j = 0; j < vcdl->uniq_cols_cnt; j++) {
val = NULL;
/* Does this vdev have values for this column? */
for (int k = 0; k < data->cols_cnt; k++) {
if (strcmp(data->cols[k],
vcdl->uniq_cols[j]) == 0) {
/* yes it does, record the value */
val = data->lines[k];
break;
}
}
/*
* Mark empty values with dashes to make output
* awk-able.
*/
if (is_blank_str(val))
val = "-";
printf("%*s", vcdl->uniq_cols_width[j], val);
if (j < vcdl->uniq_cols_cnt - 1)
printf(" ");
}
/* Print out any values that aren't in a column at the end */
for (j = data->cols_cnt; j < data->lines_cnt; j++) {
/* Did we have any columns? If so print a spacer. */
if (vcdl->uniq_cols_cnt > 0)
printf(" ");
val = data->lines[j];
printf("%s", val ? val : "");
}
break;
}
}
/*
* Print out configuration state as requested by status_callback.
*/
static void
print_status_config(zpool_handle_t *zhp, status_cbdata_t *cb, const char *name,
nvlist_t *nv, int depth, boolean_t isspare)
{
nvlist_t **child;
uint_t c, children;
pool_scan_stat_t *ps = NULL;
vdev_stat_t *vs;
char rbuf[6], wbuf[6], cbuf[6];
char *vname;
uint64_t notpresent;
spare_cbdata_t spare_cb;
const char *state;
+ char *type;
char *path = NULL;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
children = 0;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
+ verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
+
+ if (strcmp(type, VDEV_TYPE_INDIRECT) == 0)
+ return;
+
state = zpool_state_to_name(vs->vs_state, vs->vs_aux);
if (isspare) {
/*
* For hot spares, we use the terms 'INUSE' and 'AVAILABLE' for
* online drives.
*/
if (vs->vs_aux == VDEV_AUX_SPARED)
state = "INUSE";
else if (vs->vs_state == VDEV_STATE_HEALTHY)
state = "AVAIL";
}
(void) printf("\t%*s%-*s %-8s", depth, "", cb->cb_namewidth - depth,
name, state);
if (!isspare) {
zfs_nicenum(vs->vs_read_errors, rbuf, sizeof (rbuf));
zfs_nicenum(vs->vs_write_errors, wbuf, sizeof (wbuf));
zfs_nicenum(vs->vs_checksum_errors, cbuf, sizeof (cbuf));
(void) printf(" %5s %5s %5s", rbuf, wbuf, cbuf);
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&notpresent) == 0) {
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0);
(void) printf(" was %s", path);
} else if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_SPARED:
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&spare_cb.cb_guid) == 0);
if (zpool_iter(g_zfs, find_spare, &spare_cb) == 1) {
if (strcmp(zpool_get_name(spare_cb.cb_zhp),
zpool_get_name(zhp)) == 0)
(void) printf(gettext("currently in "
"use"));
else
(void) printf(gettext("in use by "
"pool '%s'"),
zpool_get_name(spare_cb.cb_zhp));
zpool_close(spare_cb.cb_zhp);
} else {
(void) printf(gettext("currently in use"));
}
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_IO_FAILURE:
(void) printf(gettext("experienced I/O failures"));
break;
case VDEV_AUX_BAD_LOG:
(void) printf(gettext("bad intent log"));
break;
case VDEV_AUX_EXTERNAL:
(void) printf(gettext("external device fault"));
break;
case VDEV_AUX_SPLIT_POOL:
(void) printf(gettext("split into new pool"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_SCAN_STATS,
(uint64_t **)&ps, &c);
if (ps != NULL && ps->pss_state == DSS_SCANNING &&
vs->vs_scan_processed != 0 && children == 0) {
(void) printf(gettext(" (%s)"),
(ps->pss_func == POOL_SCAN_RESILVER) ?
"resilvering" : "repairing");
}
if (cb->vcdl != NULL) {
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
(void) printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE, ishole = B_FALSE;
/* Don't print logs or holes here */
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
if (islog || ishole)
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_status_config(zhp, cb, vname, child[c], depth + 2,
isspare);
free(vname);
}
}
/*
* Print the configuration of an exported pool. Iterate over all vdevs in the
* pool, printing out the name and status for each one.
*/
static void
print_import_config(status_cbdata_t *cb, const char *name, nvlist_t *nv,
int depth)
{
nvlist_t **child;
uint_t c, children;
vdev_stat_t *vs;
char *type, *vname;
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (strcmp(type, VDEV_TYPE_MISSING) == 0 ||
strcmp(type, VDEV_TYPE_HOLE) == 0)
return;
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
(void) printf("\t%*s%-*s", depth, "", cb->cb_namewidth - depth, name);
(void) printf(" %s", zpool_state_to_name(vs->vs_state, vs->vs_aux));
if (vs->vs_aux != 0) {
(void) printf(" ");
switch (vs->vs_aux) {
case VDEV_AUX_OPEN_FAILED:
(void) printf(gettext("cannot open"));
break;
case VDEV_AUX_BAD_GUID_SUM:
(void) printf(gettext("missing device"));
break;
case VDEV_AUX_NO_REPLICAS:
(void) printf(gettext("insufficient replicas"));
break;
case VDEV_AUX_VERSION_NEWER:
(void) printf(gettext("newer version"));
break;
case VDEV_AUX_UNSUP_FEAT:
(void) printf(gettext("unsupported feature(s)"));
break;
case VDEV_AUX_ERR_EXCEEDED:
(void) printf(gettext("too many errors"));
break;
case VDEV_AUX_ACTIVE:
(void) printf(gettext("currently in use"));
break;
default:
(void) printf(gettext("corrupted data"));
break;
}
}
(void) printf("\n");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (is_log)
continue;
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
print_import_config(cb, vname, child[c], depth + 2);
free(vname);
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
(void) printf(gettext("\tcache\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
(void) printf(gettext("\tspares\n"));
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, NULL, child[c],
cb->cb_name_flags);
(void) printf("\t %s\n", vname);
free(vname);
}
}
}
/*
* Print log vdevs.
* Logs are recorded as top level vdevs in the main pool child array
* but with "is_log" set to 1. We use either print_status_config() or
* print_import_config() to print the top level logs then any log
* children (eg mirrored slogs) are printed recursively - which
* works because only the top level vdev is marked "is_log"
*/
static void
print_logs(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t *nv)
{
uint_t c, children;
nvlist_t **child;
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return;
(void) printf(gettext("\tlogs\n"));
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE;
char *name;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
if (!is_log)
continue;
name = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cb->cb_print_status)
print_status_config(zhp, cb, name, child[c], 2,
B_FALSE);
else
print_import_config(cb, name, child[c], 2);
free(name);
}
}
/*
* Display the status for the given pool.
*/
static void
show_import(nvlist_t *config)
{
uint64_t pool_state;
vdev_stat_t *vs;
char *name;
uint64_t guid;
uint64_t hostid = 0;
char *msgid;
char *hostname = "unknown";
nvlist_t *nvroot, *nvinfo;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t vsc;
char *comment;
status_cbdata_t cb = { 0 };
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&name) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&guid) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
reason = zpool_import_status(config, &msgid, &errata);
(void) printf(gettext(" pool: %s\n"), name);
(void) printf(gettext(" id: %llu\n"), (u_longlong_t)guid);
(void) printf(gettext(" state: %s"), health);
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext(" (DESTROYED)"));
(void) printf("\n");
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext(" status: One or more devices are "
"missing from the system.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
(void) printf(gettext(" status: One or more devices contains "
"corrupted data.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
(void) printf(
gettext(" status: The pool data is corrupted.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
(void) printf(gettext(" status: One or more devices "
"are offlined.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
(void) printf(gettext(" status: The pool metadata is "
"corrupted.\n"));
break;
case ZPOOL_STATUS_VERSION_OLDER:
(void) printf(gettext(" status: The pool is formatted using a "
"legacy on-disk version.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext(" status: The pool is formatted using an "
"incompatible version.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
(void) printf(gettext(" status: Some supported features are "
"not enabled on the pool.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
(void) printf(gettext("status: The pool uses the following "
"feature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
(void) printf(gettext("status: The pool can only be accessed "
"in read-only mode on this system. It\n\tcannot be "
"accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
(void) printf(gettext(" status: The pool is currently "
"imported by another system.\n"));
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext(" status: The pool has the "
"multihost property on. It cannot\n\tbe safely imported "
"when the system hostid is not set.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
(void) printf(gettext(" status: The pool was last accessed by "
"another system.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
case ZPOOL_STATUS_FAULTED_DEV_NR:
(void) printf(gettext(" status: One or more devices are "
"faulted.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
(void) printf(gettext(" status: An intent log record cannot be "
"read.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
(void) printf(gettext(" status: One or more devices were being "
"resilvered.\n"));
break;
case ZPOOL_STATUS_ERRATA:
(void) printf(gettext(" status: Errata #%d detected.\n"),
errata);
break;
default:
/*
* No other status can be seen when importing pools.
*/
assert(reason == ZPOOL_STATUS_OK);
}
/*
* Print out an action according to the overall state of the pool.
*/
if (vs->vs_state == VDEV_STATE_HEALTHY) {
if (reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric identifier, "
"though\n\tsome features will not be available "
"without an explicit 'zpool upgrade'.\n"));
} else if (reason == ZPOOL_STATUS_HOSTID_MISMATCH) {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier and\n\tthe '-f' flag.\n"));
} else if (reason == ZPOOL_STATUS_ERRATA) {
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext(" action: The pool can "
"be imported using its name or numeric "
"identifier,\n\thowever there is a compat"
"ibility issue which should be corrected"
"\n\tby running 'zpool scrub'\n"));
break;
case ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY:
(void) printf(gettext(" action: The pool can"
"not be imported with this version of ZFS "
"due to\n\tan active asynchronous destroy. "
"Revert to an earlier version\n\tand "
"allow the destroy to complete before "
"updating.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext(" action: Existing "
"encrypted datasets contain an on-disk "
"incompatibility, which\n\tneeds to be "
"corrected. Backup these datasets to new "
"encrypted datasets\n\tand destroy the "
"old ones.\n"));
break;
default:
/*
* All errata must contain an action message.
*/
assert(0);
}
} else {
(void) printf(gettext(" action: The pool can be "
"imported using its name or numeric "
"identifier.\n"));
}
} else if (vs->vs_state == VDEV_STATE_DEGRADED) {
(void) printf(gettext(" action: The pool can be imported "
"despite missing or damaged devices. The\n\tfault "
"tolerance of the pool may be compromised if imported.\n"));
} else {
switch (reason) {
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext(" action: The pool cannot be "
"imported. Access the pool on a system running "
"newer\n\tsoftware, or recreate the pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
(void) printf(gettext("action: The pool cannot be "
"imported. Access the pool on a system that "
"supports\n\tthe required feature(s), or recreate "
"the pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
(void) printf(gettext("action: The pool cannot be "
"imported in read-write mode. Import the pool "
"with\n"
"\t\"-o readonly=on\", access the pool on a system "
"that supports the\n\trequired feature(s), or "
"recreate the pool from backup.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_R:
case ZPOOL_STATUS_MISSING_DEV_NR:
case ZPOOL_STATUS_BAD_GUID_SUM:
(void) printf(gettext(" action: The pool cannot be "
"imported. Attach the missing\n\tdevices and try "
"again.\n"));
break;
case ZPOOL_STATUS_HOSTID_ACTIVE:
VERIFY0(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo));
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) printf(gettext(" action: The pool must be "
"exported from %s (hostid=%lx)\n\tbefore it "
"can be safely imported.\n"), hostname,
(unsigned long) hostid);
break;
case ZPOOL_STATUS_HOSTID_REQUIRED:
(void) printf(gettext(" action: Set a unique system "
"hostid with the zgenhostid(8) command.\n"));
break;
default:
(void) printf(gettext(" action: The pool cannot be "
"imported due to damaged devices or data.\n"));
}
}
/* Print the comment attached to the pool. */
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
(void) printf(gettext("comment: %s\n"), comment);
/*
* If the state is "closed" or "can't open", and the aux state
* is "corrupt data":
*/
if (((vs->vs_state == VDEV_STATE_CLOSED) ||
(vs->vs_state == VDEV_STATE_CANT_OPEN)) &&
(vs->vs_aux == VDEV_AUX_CORRUPT_DATA)) {
if (pool_state == POOL_STATE_DESTROYED)
(void) printf(gettext("\tThe pool was destroyed, "
"but can be imported using the '-Df' flags.\n"));
else if (pool_state != POOL_STATE_EXPORTED)
(void) printf(gettext("\tThe pool may be active on "
"another system, but can be imported using\n\t"
"the '-f' flag.\n"));
}
if (msgid != NULL)
(void) printf(gettext(" see: http://zfsonlinux.org/msg/%s\n"),
msgid);
(void) printf(gettext(" config:\n\n"));
cb.cb_namewidth = max_width(NULL, nvroot, 0, strlen(name),
VDEV_NAME_TYPE_ID);
if (cb.cb_namewidth < 10)
cb.cb_namewidth = 10;
print_import_config(&cb, name, nvroot, 0);
if (num_logs(nvroot) > 0)
print_logs(NULL, &cb, nvroot);
if (reason == ZPOOL_STATUS_BAD_GUID_SUM) {
(void) printf(gettext("\n\tAdditional devices are known to "
"be part of this pool, though their\n\texact "
"configuration cannot be determined.\n"));
}
}
static boolean_t
zfs_force_import_required(nvlist_t *config)
{
uint64_t state;
uint64_t hostid = 0;
nvlist_t *nvinfo;
state = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_HOSTID, &hostid);
if (state != POOL_STATE_EXPORTED && hostid != get_system_hostid())
return (B_TRUE);
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE)) {
mmp_state_t mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state != MMP_STATE_INACTIVE)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Perform the import for the given configuration. This passes the heavy
* lifting off to zpool_import_props(), and then mounts the datasets contained
* within the pool.
*/
static int
do_import(nvlist_t *config, const char *newname, const char *mntopts,
nvlist_t *props, int flags)
{
int ret = 0;
zpool_handle_t *zhp;
char *name;
uint64_t version;
name = fnvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME);
version = fnvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
(void) fprintf(stderr, gettext("cannot import '%s': pool "
"is formatted using an unsupported ZFS version\n"), name);
return (1);
} else if (zfs_force_import_required(config) &&
!(flags & ZFS_IMPORT_ANY_HOST)) {
mmp_state_t mmp_state = MMP_STATE_INACTIVE;
nvlist_t *nvinfo;
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_STATE))
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (mmp_state == MMP_STATE_ACTIVE) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool is imported on %s (hostid: "
"0x%lx)\nExport the pool on the other system, "
"then run 'zpool import'.\n"),
name, hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) fprintf(stderr, gettext("Cannot import '%s': "
"pool has the multihost property on and the\n"
"system's hostid is not set. Set a unique hostid "
"with the zgenhostid(8) command.\n"), name);
} else {
char *hostname = "<unknown>";
uint64_t timestamp = 0;
uint64_t hostid = 0;
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTNAME))
hostname = fnvlist_lookup_string(config,
ZPOOL_CONFIG_HOSTNAME);
if (nvlist_exists(config, ZPOOL_CONFIG_TIMESTAMP))
timestamp = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_TIMESTAMP);
if (nvlist_exists(config, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(config,
ZPOOL_CONFIG_HOSTID);
(void) fprintf(stderr, gettext("cannot import '%s': "
"pool was previously in use from another system.\n"
"Last accessed by %s (hostid=%lx) at %s"
"The pool can be imported, use 'zpool import -f' "
"to import the pool.\n"), name, hostname,
(unsigned long)hostid, ctime((time_t *)&timestamp));
}
return (1);
}
if (zpool_import_props(g_zfs, config, newname, props, flags) != 0)
return (1);
if (newname != NULL)
name = (char *)newname;
if ((zhp = zpool_open_canfail(g_zfs, name)) == NULL)
return (1);
/*
* Loading keys is best effort. We don't want to return immediately
* if it fails but we do want to give the error to the caller.
*/
if (flags & ZFS_IMPORT_LOAD_KEYS) {
ret = zfs_crypto_attempt_load_keys(g_zfs, name);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
!(flags & ZFS_IMPORT_ONLY) &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
zpool_close(zhp);
return (1);
}
zpool_close(zhp);
return (ret);
}
/*
* zpool import [-d dir] [-D]
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile] [-f] -a
* import [-o mntopts] [-o prop=value] ... [-R root] [-D] [-l]
* [-d dir | -c cachefile] [-f] [-n] [-F] <pool | id> [newpool]
*
* -c Read pool information from a cachefile instead of searching
* devices.
*
* -d Scan in a specific directory, other than /dev/. More than
* one directory can be specified using multiple '-d' options.
*
* -D Scan for previously destroyed pools or import all or only
* specified destroyed pools.
*
* -R Temporarily import the pool, with all mountpoints relative to
* the given root. The pool will remain exported when the machine
* is rebooted.
*
* -V Import even in the presence of faulted vdevs. This is an
* intentionally undocumented option for testing purposes, and
* treats the pool configuration as complete, leaving any bad
* vdevs in the FAULTED state. In other words, it does verbatim
* import.
*
* -f Force import, even if it appears that the pool is active.
*
* -F Attempt rewind if necessary.
*
* -n See if rewind would work, but don't actually rewind.
*
* -N Import the pool but don't mount datasets.
*
* -T Specify a starting txg to use for import. This option is
* intentionally undocumented option for testing purposes.
*
* -a Import all pools found.
*
* -l Load encryption keys while importing.
*
* -o Set property=value and/or temporary mount options (without '=').
*
* -s Scan using the default search path, the libblkid cache will
* not be consulted.
*
* The import command scans for pools to import, and import pools based on pool
* name and GUID. The pool can also be renamed as part of the import process.
*/
int
zpool_do_import(int argc, char **argv)
{
char **searchdirs = NULL;
char *env, *envdup = NULL;
int nsearch = 0;
int c;
int err = 0;
nvlist_t *pools = NULL;
boolean_t do_all = B_FALSE;
boolean_t do_destroyed = B_FALSE;
char *mntopts = NULL;
nvpair_t *elem;
nvlist_t *config;
uint64_t searchguid = 0;
char *searchname = NULL;
char *propval;
nvlist_t *found_config;
nvlist_t *policy = NULL;
nvlist_t *props = NULL;
boolean_t first;
int flags = ZFS_IMPORT_NORMAL;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
boolean_t do_scan = B_FALSE;
uint64_t pool_state, txg = -1ULL;
char *cachefile = NULL;
importargs_t idata = { 0 };
char *endptr;
/* check options */
while ((c = getopt(argc, argv, ":aCc:d:DEfFlmnNo:R:stT:VX")) != -1) {
switch (c) {
case 'a':
do_all = B_TRUE;
break;
case 'c':
cachefile = optarg;
break;
case 'd':
if (searchdirs == NULL) {
searchdirs = safe_malloc(sizeof (char *));
} else {
char **tmp = safe_malloc((nsearch + 1) *
sizeof (char *));
bcopy(searchdirs, tmp, nsearch *
sizeof (char *));
free(searchdirs);
searchdirs = tmp;
}
searchdirs[nsearch++] = optarg;
break;
case 'D':
do_destroyed = B_TRUE;
break;
case 'f':
flags |= ZFS_IMPORT_ANY_HOST;
break;
case 'F':
do_rewind = B_TRUE;
break;
case 'l':
flags |= ZFS_IMPORT_LOAD_KEYS;
break;
case 'm':
flags |= ZFS_IMPORT_MISSING_LOG;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'N':
flags |= ZFS_IMPORT_ONLY;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE))
goto error;
} else {
mntopts = optarg;
}
break;
case 'R':
if (add_prop_list(zpool_prop_to_name(
ZPOOL_PROP_ALTROOT), optarg, &props, B_TRUE))
goto error;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto error;
break;
case 's':
do_scan = B_TRUE;
break;
case 't':
flags |= ZFS_IMPORT_TEMP_NAME;
if (add_prop_list_default(zpool_prop_to_name(
ZPOOL_PROP_CACHEFILE), "none", &props, B_TRUE))
goto error;
break;
case 'T':
errno = 0;
txg = strtoull(optarg, &endptr, 0);
if (errno != 0 || *endptr != '\0') {
(void) fprintf(stderr,
gettext("invalid txg value\n"));
usage(B_FALSE);
}
rewind_policy = ZPOOL_DO_REWIND | ZPOOL_EXTREME_REWIND;
break;
case 'V':
flags |= ZFS_IMPORT_VERBATIM;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (cachefile && nsearch != 0) {
(void) fprintf(stderr, gettext("-c is incompatible with -d\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && (flags & ZFS_IMPORT_ONLY)) {
(void) fprintf(stderr, gettext("-l is incompatible with -N\n"));
usage(B_FALSE);
}
if ((flags & ZFS_IMPORT_LOAD_KEYS) && !do_all && argc == 0) {
(void) fprintf(stderr, gettext("-l is only meaningful during "
"an import\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In the future, we can capture further policy and include it here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint64(policy, ZPOOL_REWIND_REQUEST_TXG, txg) != 0 ||
nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
goto error;
/* check argument count */
if (do_all) {
if (argc != 0) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
} else {
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
}
/*
* Check for the effective uid. We do this explicitly here because
* otherwise any attempt to discover pools will silently fail.
*/
if (argc == 0 && geteuid() != 0) {
(void) fprintf(stderr, gettext("cannot "
"discover pools: permission denied\n"));
if (searchdirs != NULL)
free(searchdirs);
nvlist_free(props);
nvlist_free(policy);
return (1);
}
/*
* Depending on the arguments given, we do one of the following:
*
* <none> Iterate through all pools and display information about
* each one.
*
* -a Iterate through all pools and try to import each one.
*
* <id> Find the pool that corresponds to the given GUID/pool
* name and import that one.
*
* -D Above options applies only to destroyed pools.
*/
if (argc != 0) {
char *endptr;
errno = 0;
searchguid = strtoull(argv[0], &endptr, 10);
if (errno != 0 || *endptr != '\0') {
searchname = argv[0];
searchguid = 0;
}
found_config = NULL;
/*
* User specified a name or guid. Ensure it's unique.
*/
idata.unique = B_TRUE;
}
/*
* Check the environment for the preferred search path.
*/
if ((searchdirs == NULL) && (env = getenv("ZPOOL_IMPORT_PATH"))) {
char *dir;
envdup = strdup(env);
dir = strtok(envdup, ":");
while (dir != NULL) {
if (searchdirs == NULL) {
searchdirs = safe_malloc(sizeof (char *));
} else {
char **tmp = safe_malloc((nsearch + 1) *
sizeof (char *));
bcopy(searchdirs, tmp, nsearch *
sizeof (char *));
free(searchdirs);
searchdirs = tmp;
}
searchdirs[nsearch++] = dir;
dir = strtok(NULL, ":");
}
}
idata.path = searchdirs;
idata.paths = nsearch;
idata.poolname = searchname;
idata.guid = searchguid;
idata.cachefile = cachefile;
idata.scan = do_scan;
pools = zpool_search_import(g_zfs, &idata);
if (pools != NULL && idata.exists &&
(argc == 1 || strcmp(argv[0], argv[1]) == 0)) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name already exists\n"),
argv[0]);
(void) fprintf(stderr, gettext("use the form '%s "
"<pool | id> <newpool>' to give it a new name\n"),
"zpool import");
err = 1;
} else if (pools == NULL && idata.exists) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"a pool with that name is already created/imported,\n"),
argv[0]);
(void) fprintf(stderr, gettext("and no additional pools "
"with that name were found\n"));
err = 1;
} else if (pools == NULL) {
if (argc != 0) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
}
err = 1;
}
if (err == 1) {
if (searchdirs != NULL)
free(searchdirs);
if (envdup != NULL)
free(envdup);
nvlist_free(policy);
nvlist_free(pools);
nvlist_free(props);
return (1);
}
/*
* At this point we have a list of import candidate configs. Even if
* we were searching by pool name or guid, we still need to
* post-process the list to deal with pool state and possible
* duplicate names.
*/
err = 0;
elem = NULL;
first = B_TRUE;
while ((elem = nvlist_next_nvpair(pools, elem)) != NULL) {
verify(nvpair_value_nvlist(elem, &config) == 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE,
&pool_state) == 0);
if (!do_destroyed && pool_state == POOL_STATE_DESTROYED)
continue;
if (do_destroyed && pool_state != POOL_STATE_DESTROYED)
continue;
verify(nvlist_add_nvlist(config, ZPOOL_REWIND_POLICY,
policy) == 0);
if (argc == 0) {
if (first)
first = B_FALSE;
else if (!do_all)
(void) printf("\n");
if (do_all) {
err |= do_import(config, NULL, mntopts,
props, flags);
} else {
show_import(config);
}
} else if (searchname != NULL) {
char *name;
/*
* We are searching for a pool based on name.
*/
verify(nvlist_lookup_string(config,
ZPOOL_CONFIG_POOL_NAME, &name) == 0);
if (strcmp(name, searchname) == 0) {
if (found_config != NULL) {
(void) fprintf(stderr, gettext(
"cannot import '%s': more than "
"one matching pool\n"), searchname);
(void) fprintf(stderr, gettext(
"import by numeric ID instead\n"));
err = B_TRUE;
}
found_config = config;
}
} else {
uint64_t guid;
/*
* Search for a pool by guid.
*/
verify(nvlist_lookup_uint64(config,
ZPOOL_CONFIG_POOL_GUID, &guid) == 0);
if (guid == searchguid)
found_config = config;
}
}
/*
* If we were searching for a specific pool, verify that we found a
* pool, and then do the import.
*/
if (argc != 0 && err == 0) {
if (found_config == NULL) {
(void) fprintf(stderr, gettext("cannot import '%s': "
"no such pool available\n"), argv[0]);
err = B_TRUE;
} else {
err |= do_import(found_config, argc == 1 ? NULL :
argv[1], mntopts, props, flags);
}
}
/*
* If we were just looking for pools, report an error if none were
* found.
*/
if (argc == 0 && first)
(void) fprintf(stderr,
gettext("no pools available to import\n"));
error:
nvlist_free(props);
nvlist_free(pools);
nvlist_free(policy);
if (searchdirs != NULL)
free(searchdirs);
if (envdup != NULL)
free(envdup);
return (err ? 1 : 0);
}
/*
* zpool sync [-f] [pool] ...
*
* -f (undocumented) force uberblock (and config including zpool cache file)
* update.
*
* Sync the specified pool(s).
* Without arguments "zpool sync" will sync all pools.
* This command initiates TXG sync(s) and will return after the TXG(s) commit.
*
*/
static int
zpool_do_sync(int argc, char **argv)
{
int ret;
boolean_t force = B_FALSE;
/* check options */
while ((ret = getopt(argc, argv, "f")) != -1) {
switch (ret) {
case 'f':
force = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_sync_one on all pools */
ret = for_each_pool(argc, argv, B_FALSE, NULL, zpool_sync_one, &force);
return (ret);
}
typedef struct iostat_cbdata {
uint64_t cb_flags;
int cb_name_flags;
int cb_namewidth;
int cb_iteration;
char **cb_vdev_names; /* Only show these vdevs */
unsigned int cb_vdev_names_count;
boolean_t cb_verbose;
boolean_t cb_literal;
boolean_t cb_scripted;
zpool_list_t *cb_list;
vdev_cmd_data_list_t *vcdl;
} iostat_cbdata_t;
/* iostat labels */
typedef struct name_and_columns {
const char *name; /* Column name */
unsigned int columns; /* Center name to this number of columns */
} name_and_columns_t;
#define IOSTAT_MAX_LABELS 11 /* Max number of labels on one line */
static const name_and_columns_t iostat_top_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"capacity", 2}, {"operations", 2}, {"bandwidth", 2},
{NULL}},
[IOS_LATENCY] = {{"total_wait", 2}, {"disk_wait", 2}, {"syncq_wait", 2},
{"asyncq_wait", 2}, {"scrub"}},
[IOS_QUEUES] = {{"syncq_read", 2}, {"syncq_write", 2},
{"asyncq_read", 2}, {"asyncq_write", 2}, {"scrubq_read", 2},
{NULL}},
[IOS_L_HISTO] = {{"total_wait", 2}, {"disk_wait", 2},
{"sync_queue", 2}, {"async_queue", 2}, {NULL}},
[IOS_RQ_HISTO] = {{"sync_read", 2}, {"sync_write", 2},
{"async_read", 2}, {"async_write", 2}, {"scrub", 2}, {NULL}},
};
/* Shorthand - if "columns" field not set, default to 1 column */
static const name_and_columns_t iostat_bottom_labels[][IOSTAT_MAX_LABELS] =
{
[IOS_DEFAULT] = {{"alloc"}, {"free"}, {"read"}, {"write"}, {"read"},
{"write"}, {NULL}},
[IOS_LATENCY] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"wait"}, {NULL}},
[IOS_QUEUES] = {{"pend"}, {"activ"}, {"pend"}, {"activ"}, {"pend"},
{"activ"}, {"pend"}, {"activ"}, {"pend"}, {"activ"}, {NULL}},
[IOS_L_HISTO] = {{"read"}, {"write"}, {"read"}, {"write"}, {"read"},
{"write"}, {"read"}, {"write"}, {"scrub"}, {NULL}},
[IOS_RQ_HISTO] = {{"ind"}, {"agg"}, {"ind"}, {"agg"}, {"ind"}, {"agg"},
{"ind"}, {"agg"}, {"ind"}, {"agg"}, {NULL}},
};
static const char *histo_to_title[] = {
[IOS_L_HISTO] = "latency",
[IOS_RQ_HISTO] = "req_size",
};
/*
* Return the number of labels in a null-terminated name_and_columns_t
* array.
*
*/
static unsigned int
label_array_len(const name_and_columns_t *labels)
{
int i = 0;
while (labels[i].name)
i++;
return (i);
}
/*
* Return the number of strings in a null-terminated string array.
* For example:
*
* const char foo[] = {"bar", "baz", NULL}
*
* returns 2
*/
static uint64_t
str_array_len(const char *array[])
{
uint64_t i = 0;
while (array[i])
i++;
return (i);
}
/*
* Return a default column width for default/latency/queue columns. This does
* not include histograms, which have their columns autosized.
*/
static unsigned int
default_column_width(iostat_cbdata_t *cb, enum iostat_type type)
{
unsigned long column_width = 5; /* Normal niceprint */
static unsigned long widths[] = {
/*
* Choose some sane default column sizes for printing the
* raw numbers.
*/
[IOS_DEFAULT] = 15, /* 1PB capacity */
[IOS_LATENCY] = 10, /* 1B ns = 10sec */
[IOS_QUEUES] = 6, /* 1M queue entries */
};
if (cb->cb_literal)
column_width = widths[type];
return (column_width);
}
/*
* Print the column labels, i.e:
*
* capacity operations bandwidth
* alloc free read write read write ...
*
* If force_column_width is set, use it for the column width. If not set, use
* the default column width.
*/
void
print_iostat_labels(iostat_cbdata_t *cb, unsigned int force_column_width,
const name_and_columns_t labels[][IOSTAT_MAX_LABELS])
{
int i, idx, s;
unsigned int text_start, rw_column_width, spaces_to_end;
uint64_t flags = cb->cb_flags;
uint64_t f;
unsigned int column_width = force_column_width;
/* For each bit set in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
if (!force_column_width)
column_width = default_column_width(cb, idx);
/* Print our top labels centered over "read write" label. */
for (i = 0; i < label_array_len(labels[idx]); i++) {
const char *name = labels[idx][i].name;
/*
* We treat labels[][].columns == 0 as shorthand
* for one column. It makes writing out the label
* tables more concise.
*/
unsigned int columns = MAX(1, labels[idx][i].columns);
unsigned int slen = strlen(name);
rw_column_width = (column_width * columns) +
(2 * (columns - 1));
text_start = (int)((rw_column_width)/columns -
slen/columns);
printf(" "); /* Two spaces between columns */
/* Space from beginning of column to label */
for (s = 0; s < text_start; s++)
printf(" ");
printf("%s", name);
/* Print space after label to end of column */
spaces_to_end = rw_column_width - text_start - slen;
for (s = 0; s < spaces_to_end; s++)
printf(" ");
}
}
}
/*
* print_cmd_columns - Print custom column titles from -c
*
* If the user specified the "zpool status|iostat -c" then print their custom
* column titles in the header. For example, print_cmd_columns() would print
* the " col1 col2" part of this:
*
* $ zpool iostat -vc 'echo col1=val1; echo col2=val2'
* ...
* capacity operations bandwidth
* pool alloc free read write read write col1 col2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
* mypool 269K 1008M 0 0 107 946
* mirror 269K 1008M 0 0 107 946
* sdb - - 0 0 102 473 val1 val2
* sdc - - 0 0 5 473 val1 val2
* ---------- ----- ----- ----- ----- ----- ----- ---- ----
*/
void
print_cmd_columns(vdev_cmd_data_list_t *vcdl, int use_dashes)
{
int i, j;
vdev_cmd_data_t *data = &vcdl->data[0];
if (vcdl->count == 0 || data == NULL)
return;
/*
* Each vdev cmd should have the same column names unless the user did
* something weird with their cmd. Just take the column names from the
* first vdev and assume it works for all of them.
*/
for (i = 0; i < vcdl->uniq_cols_cnt; i++) {
printf(" ");
if (use_dashes) {
for (j = 0; j < vcdl->uniq_cols_width[i]; j++)
printf("-");
} else {
printf("%*s", vcdl->uniq_cols_width[i],
vcdl->uniq_cols[i]);
}
}
}
/*
* Utility function to print out a line of dashes like:
*
* -------------------------------- ----- ----- ----- ----- -----
*
* ...or a dashed named-row line like:
*
* logs - - - - -
*
* @cb: iostat data
*
* @force_column_width If non-zero, use the value as the column width.
* Otherwise use the default column widths.
*
* @name: Print a dashed named-row line starting
* with @name. Otherwise, print a regular
* dashed line.
*/
static void
print_iostat_dashes(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *name)
{
int i;
unsigned int namewidth;
uint64_t flags = cb->cb_flags;
uint64_t f;
int idx;
const name_and_columns_t *labels;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdev_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
name ? strlen(name) : 0);
if (name) {
printf("%-*s", namewidth, name);
} else {
for (i = 0; i < namewidth; i++)
(void) printf("-");
}
/* For each bit in flags */
for (f = flags; f; f &= ~(1ULL << idx)) {
unsigned int column_width;
idx = lowbit64(f) - 1;
if (force_column_width)
column_width = force_column_width;
else
column_width = default_column_width(cb, idx);
labels = iostat_bottom_labels[idx];
for (i = 0; i < label_array_len(labels); i++) {
if (name)
printf(" %*s-", column_width - 1, " ");
else
printf(" %.*s", column_width,
"--------------------");
}
}
}
static void
print_iostat_separator_impl(iostat_cbdata_t *cb,
unsigned int force_column_width)
{
print_iostat_dashes(cb, force_column_width, NULL);
}
static void
print_iostat_separator(iostat_cbdata_t *cb)
{
print_iostat_separator_impl(cb, 0);
}
static void
print_iostat_header_impl(iostat_cbdata_t *cb, unsigned int force_column_width,
const char *histo_vdev_name)
{
unsigned int namewidth;
const char *title;
if (cb->cb_flags & IOS_ANYHISTO_M) {
title = histo_to_title[IOS_HISTO_IDX(cb->cb_flags)];
} else if (cb->cb_vdev_names_count) {
title = "vdev";
} else {
title = "pool";
}
namewidth = MAX(MAX(strlen(title), cb->cb_namewidth),
histo_vdev_name ? strlen(histo_vdev_name) : 0);
if (histo_vdev_name)
printf("%-*s", namewidth, histo_vdev_name);
else
printf("%*s", namewidth, "");
print_iostat_labels(cb, force_column_width, iostat_top_labels);
printf("\n");
printf("%-*s", namewidth, title);
print_iostat_labels(cb, force_column_width, iostat_bottom_labels);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 0);
printf("\n");
print_iostat_separator_impl(cb, force_column_width);
if (cb->vcdl != NULL)
print_cmd_columns(cb->vcdl, 1);
printf("\n");
}
static void
print_iostat_header(iostat_cbdata_t *cb)
{
print_iostat_header_impl(cb, 0, NULL);
}
/*
* Display a single statistic.
*/
static void
print_one_stat(uint64_t value, enum zfs_nicenum_format format,
unsigned int column_size, boolean_t scripted)
{
char buf[64];
zfs_nicenum_format(value, buf, sizeof (buf), format);
if (scripted)
printf("\t%s", buf);
else
printf(" %*s", column_size, buf);
}
/*
* Calculate the default vdev stats
*
* Subtract oldvs from newvs, apply a scaling factor, and save the resulting
* stats into calcvs.
*/
static void
calc_default_iostats(vdev_stat_t *oldvs, vdev_stat_t *newvs,
vdev_stat_t *calcvs)
{
int i;
memcpy(calcvs, newvs, sizeof (*calcvs));
for (i = 0; i < ARRAY_SIZE(calcvs->vs_ops); i++)
calcvs->vs_ops[i] = (newvs->vs_ops[i] - oldvs->vs_ops[i]);
for (i = 0; i < ARRAY_SIZE(calcvs->vs_bytes); i++)
calcvs->vs_bytes[i] = (newvs->vs_bytes[i] - oldvs->vs_bytes[i]);
}
/*
* Internal representation of the extended iostats data.
*
* The extended iostat stats are exported in nvlists as either uint64_t arrays
* or single uint64_t's. We make both look like arrays to make them easier
* to process. In order to make single uint64_t's look like arrays, we set
* __data to the stat data, and then set *data = &__data with count = 1. Then,
* we can just use *data and count.
*/
struct stat_array {
uint64_t *data;
uint_t count; /* Number of entries in data[] */
uint64_t __data; /* Only used when data is a single uint64_t */
};
static uint64_t
stat_histo_max(struct stat_array *nva, unsigned int len)
{
uint64_t max = 0;
int i;
for (i = 0; i < len; i++)
max = MAX(max, array64_max(nva[i].data, nva[i].count));
return (max);
}
/*
* Helper function to lookup a uint64_t array or uint64_t value and store its
* data as a stat_array. If the nvpair is a single uint64_t value, then we make
* it look like a one element array to make it easier to process.
*/
static int
nvpair64_to_stat_array(nvlist_t *nvl, const char *name,
struct stat_array *nva)
{
nvpair_t *tmp;
int ret;
verify(nvlist_lookup_nvpair(nvl, name, &tmp) == 0);
switch (nvpair_type(tmp)) {
case DATA_TYPE_UINT64_ARRAY:
ret = nvpair_value_uint64_array(tmp, &nva->data, &nva->count);
break;
case DATA_TYPE_UINT64:
ret = nvpair_value_uint64(tmp, &nva->__data);
nva->data = &nva->__data;
nva->count = 1;
break;
default:
/* Not a uint64_t */
ret = EINVAL;
break;
}
return (ret);
}
/*
* Given a list of nvlist names, look up the extended stats in newnv and oldnv,
* subtract them, and return the results in a newly allocated stat_array.
* You must free the returned array after you are done with it with
* free_calc_stats().
*
* Additionally, you can set "oldnv" to NULL if you simply want the newnv
* values.
*/
static struct stat_array *
calc_and_alloc_stats_ex(const char **names, unsigned int len, nvlist_t *oldnv,
nvlist_t *newnv)
{
nvlist_t *oldnvx = NULL, *newnvx;
struct stat_array *oldnva, *newnva, *calcnva;
int i, j;
unsigned int alloc_size = (sizeof (struct stat_array)) * len;
/* Extract our extended stats nvlist from the main list */
verify(nvlist_lookup_nvlist(newnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&newnvx) == 0);
if (oldnv) {
verify(nvlist_lookup_nvlist(oldnv, ZPOOL_CONFIG_VDEV_STATS_EX,
&oldnvx) == 0);
}
newnva = safe_malloc(alloc_size);
oldnva = safe_malloc(alloc_size);
calcnva = safe_malloc(alloc_size);
for (j = 0; j < len; j++) {
verify(nvpair64_to_stat_array(newnvx, names[j],
&newnva[j]) == 0);
calcnva[j].count = newnva[j].count;
alloc_size = calcnva[j].count * sizeof (calcnva[j].data[0]);
calcnva[j].data = safe_malloc(alloc_size);
memcpy(calcnva[j].data, newnva[j].data, alloc_size);
if (oldnvx) {
verify(nvpair64_to_stat_array(oldnvx, names[j],
&oldnva[j]) == 0);
for (i = 0; i < oldnva[j].count; i++)
calcnva[j].data[i] -= oldnva[j].data[i];
}
}
free(newnva);
free(oldnva);
return (calcnva);
}
static void
free_calc_stats(struct stat_array *nva, unsigned int len)
{
int i;
for (i = 0; i < len; i++)
free(nva[i].data);
free(nva);
}
static void
print_iostat_histo(struct stat_array *nva, unsigned int len,
iostat_cbdata_t *cb, unsigned int column_width, unsigned int namewidth,
double scale)
{
int i, j;
char buf[6];
uint64_t val;
enum zfs_nicenum_format format;
unsigned int buckets;
unsigned int start_bucket;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
/* All these histos are the same size, so just use nva[0].count */
buckets = nva[0].count;
if (cb->cb_flags & IOS_RQ_HISTO_M) {
/* Start at 512 - req size should never be lower than this */
start_bucket = 9;
} else {
start_bucket = 0;
}
for (j = start_bucket; j < buckets; j++) {
/* Print histogram bucket label */
if (cb->cb_flags & IOS_L_HISTO_M) {
/* Ending range of this bucket */
val = (1UL << (j + 1)) - 1;
zfs_nicetime(val, buf, sizeof (buf));
} else {
/* Request size (starting range of bucket) */
val = (1UL << j);
zfs_nicenum(val, buf, sizeof (buf));
}
if (cb->cb_scripted)
printf("%llu", (u_longlong_t)val);
else
printf("%-*s", namewidth, buf);
/* Print the values on the line */
for (i = 0; i < len; i++) {
print_one_stat(nva[i].data[j] * scale, format,
column_width, cb->cb_scripted);
}
printf("\n");
}
}
static void
print_solid_separator(unsigned int length)
{
while (length--)
printf("-");
printf("\n");
}
static void
print_iostat_histos(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale, const char *name)
{
unsigned int column_width;
unsigned int namewidth;
unsigned int entire_width;
enum iostat_type type;
struct stat_array *nva;
const char **names;
unsigned int names_len;
/* What type of histo are we? */
type = IOS_HISTO_IDX(cb->cb_flags);
/* Get NULL-terminated array of nvlist names for our histo */
names = vsx_type_to_nvlist[type];
names_len = str_array_len(names); /* num of names */
nva = calc_and_alloc_stats_ex(names, names_len, oldnv, newnv);
if (cb->cb_literal) {
column_width = MAX(5,
(unsigned int) log10(stat_histo_max(nva, names_len)) + 1);
} else {
column_width = 5;
}
namewidth = MAX(cb->cb_namewidth,
strlen(histo_to_title[IOS_HISTO_IDX(cb->cb_flags)]));
/*
* Calculate the entire line width of what we're printing. The
* +2 is for the two spaces between columns:
*/
/* read write */
/* ----- ----- */
/* |___| <---------- column_width */
/* */
/* |__________| <--- entire_width */
/* */
entire_width = namewidth + (column_width + 2) *
label_array_len(iostat_bottom_labels[type]);
if (cb->cb_scripted)
printf("%s\n", name);
else
print_iostat_header_impl(cb, column_width, name);
print_iostat_histo(nva, names_len, cb, column_width,
namewidth, scale);
free_calc_stats(nva, names_len);
if (!cb->cb_scripted)
print_solid_separator(entire_width);
}
/*
* Calculate the average latency of a power-of-two latency histogram
*/
static uint64_t
single_histo_average(uint64_t *histo, unsigned int buckets)
{
int i;
uint64_t count = 0, total = 0;
for (i = 0; i < buckets; i++) {
/*
* Our buckets are power-of-two latency ranges. Use the
* midpoint latency of each bucket to calculate the average.
* For example:
*
* Bucket Midpoint
* 8ns-15ns: 12ns
* 16ns-31ns: 24ns
* ...
*/
if (histo[i] != 0) {
total += histo[i] * (((1UL << i) + ((1UL << i)/2)));
count += histo[i];
}
}
/* Prevent divide by zero */
return (count == 0 ? 0 : total / count);
}
static void
print_iostat_queues(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_QUEUES);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), NULL, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
for (i = 0; i < ARRAY_SIZE(names); i++) {
val = nva[i].data[0] * scale;
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
static void
print_iostat_latency(iostat_cbdata_t *cb, nvlist_t *oldnv,
nvlist_t *newnv, double scale)
{
int i;
uint64_t val;
const char *names[] = {
ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
};
struct stat_array *nva;
unsigned int column_width = default_column_width(cb, IOS_LATENCY);
enum zfs_nicenum_format format;
nva = calc_and_alloc_stats_ex(names, ARRAY_SIZE(names), oldnv, newnv);
if (cb->cb_literal)
format = ZFS_NICENUM_RAWTIME;
else
format = ZFS_NICENUM_TIME;
/* Print our avg latencies on the line */
for (i = 0; i < ARRAY_SIZE(names); i++) {
/* Compute average latency for a latency histo */
val = single_histo_average(nva[i].data, nva[i].count) * scale;
print_one_stat(val, format, column_width, cb->cb_scripted);
}
free_calc_stats(nva, ARRAY_SIZE(names));
}
/*
* Print default statistics (capacity/operations/bandwidth)
*/
static void
print_iostat_default(vdev_stat_t *vs, iostat_cbdata_t *cb, double scale)
{
unsigned int column_width = default_column_width(cb, IOS_DEFAULT);
enum zfs_nicenum_format format;
char na; /* char to print for "not applicable" values */
if (cb->cb_literal) {
format = ZFS_NICENUM_RAW;
na = '0';
} else {
format = ZFS_NICENUM_1024;
na = '-';
}
/* only toplevel vdevs have capacity stats */
if (vs->vs_space == 0) {
if (cb->cb_scripted)
printf("\t%c\t%c", na, na);
else
printf(" %*c %*c", column_width, na, column_width,
na);
} else {
print_one_stat(vs->vs_alloc, format, column_width,
cb->cb_scripted);
print_one_stat(vs->vs_space - vs->vs_alloc, format,
column_width, cb->cb_scripted);
}
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_ops[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_READ] * scale),
format, column_width, cb->cb_scripted);
print_one_stat((uint64_t)(vs->vs_bytes[ZIO_TYPE_WRITE] * scale),
format, column_width, cb->cb_scripted);
}
/*
* Print out all the statistics for the given vdev. This can either be the
* toplevel configuration, or called recursively. If 'name' is NULL, then this
* is a verbose output, and we don't want to display the toplevel pool stats.
*
* Returns the number of stat lines printed.
*/
unsigned int
print_vdev_stats(zpool_handle_t *zhp, const char *name, nvlist_t *oldnv,
nvlist_t *newnv, iostat_cbdata_t *cb, int depth)
{
nvlist_t **oldchild, **newchild;
uint_t c, children, oldchildren;
vdev_stat_t *oldvs, *newvs, *calcvs;
vdev_stat_t zerovs = { 0 };
char *vname;
int i;
int ret = 0;
uint64_t tdelta;
double scale;
calcvs = safe_malloc(sizeof (*calcvs));
+ if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
+ return (ret);
+
if (oldnv != NULL) {
verify(nvlist_lookup_uint64_array(oldnv,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&oldvs, &c) == 0);
} else {
oldvs = &zerovs;
}
/* Do we only want to see a specific vdev? */
for (i = 0; i < cb->cb_vdev_names_count; i++) {
/* Yes we do. Is this the vdev? */
if (strcmp(name, cb->cb_vdev_names[i]) == 0) {
/*
* This is our vdev. Since it is the only vdev we
* will be displaying, make depth = 0 so that it
* doesn't get indented.
*/
depth = 0;
break;
}
}
if (cb->cb_vdev_names_count && (i == cb->cb_vdev_names_count)) {
/* Couldn't match the name */
goto children;
}
verify(nvlist_lookup_uint64_array(newnv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&newvs, &c) == 0);
/*
* Print the vdev name unless it's is a histogram. Histograms
* display the vdev name in the header itself.
*/
if (!(cb->cb_flags & IOS_ANYHISTO_M)) {
if (cb->cb_scripted) {
printf("%s", name);
} else {
if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) -
depth), "");
}
}
/* Calculate our scaling factor */
tdelta = newvs->vs_timestamp - oldvs->vs_timestamp;
if ((oldvs->vs_timestamp == 0) && (cb->cb_flags & IOS_ANYHISTO_M)) {
/*
* If we specify printing histograms with no time interval, then
* print the histogram numbers over the entire lifetime of the
* vdev.
*/
scale = 1;
} else {
if (tdelta == 0)
scale = 1.0;
else
scale = (double)NANOSEC / tdelta;
}
if (cb->cb_flags & IOS_DEFAULT_M) {
calc_default_iostats(oldvs, newvs, calcvs);
print_iostat_default(calcvs, cb, scale);
}
if (cb->cb_flags & IOS_LATENCY_M)
print_iostat_latency(cb, oldnv, newnv, scale);
if (cb->cb_flags & IOS_QUEUES_M)
print_iostat_queues(cb, oldnv, newnv, scale);
if (cb->cb_flags & IOS_ANYHISTO_M) {
printf("\n");
print_iostat_histos(cb, oldnv, newnv, scale, name);
}
if (cb->vcdl != NULL) {
char *path;
if (nvlist_lookup_string(newnv, ZPOOL_CONFIG_PATH,
&path) == 0) {
printf(" ");
zpool_print_cmd(cb->vcdl, zpool_get_name(zhp), path);
}
}
if (!(cb->cb_flags & IOS_ANYHISTO_M))
printf("\n");
ret++;
children:
free(calcvs);
if (!cb->cb_verbose)
return (ret);
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_CHILDREN,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_CHILDREN,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE, islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_HOLE,
&ishole);
(void) nvlist_lookup_uint64(newchild[c], ZPOOL_CONFIG_IS_LOG,
&islog);
if (ishole || islog)
continue;
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c] : NULL,
newchild[c], cb, depth + 2);
free(vname);
}
/*
* Log device section
*/
if (num_logs(newnv) > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdev_names) {
print_iostat_dashes(cb, 0, "logs");
}
printf("\n");
for (c = 0; c < children; c++) {
uint64_t islog = B_FALSE;
(void) nvlist_lookup_uint64(newchild[c],
ZPOOL_CONFIG_IS_LOG, &islog);
if (islog) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ?
oldchild[c] : NULL, newchild[c],
cb, depth + 2);
free(vname);
}
}
}
/*
* Include level 2 ARC devices in iostat output
*/
if (nvlist_lookup_nvlist_array(newnv, ZPOOL_CONFIG_L2CACHE,
&newchild, &children) != 0)
return (ret);
if (oldnv) {
if (nvlist_lookup_nvlist_array(oldnv, ZPOOL_CONFIG_L2CACHE,
&oldchild, &oldchildren) != 0)
return (ret);
children = MIN(oldchildren, children);
}
if (children > 0) {
if ((!(cb->cb_flags & IOS_ANYHISTO_M)) && !cb->cb_scripted &&
!cb->cb_vdev_names) {
print_iostat_dashes(cb, 0, "cache");
}
printf("\n");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, newchild[c],
cb->cb_name_flags);
ret += print_vdev_stats(zhp, vname, oldnv ? oldchild[c]
: NULL, newchild[c], cb, depth + 2);
free(vname);
}
}
return (ret);
}
static int
refresh_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
boolean_t missing;
/*
* If the pool has disappeared, remove it from the list and continue.
*/
if (zpool_refresh_stats(zhp, &missing) != 0)
return (-1);
if (missing)
pool_list_remove(cb->cb_list, zhp);
return (0);
}
/*
* Callback to print out the iostats for the given pool.
*/
int
print_iostat(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *oldconfig, *newconfig;
nvlist_t *oldnvroot, *newnvroot;
int ret;
newconfig = zpool_get_config(zhp, &oldconfig);
if (cb->cb_iteration == 1)
oldconfig = NULL;
verify(nvlist_lookup_nvlist(newconfig, ZPOOL_CONFIG_VDEV_TREE,
&newnvroot) == 0);
if (oldconfig == NULL)
oldnvroot = NULL;
else
verify(nvlist_lookup_nvlist(oldconfig, ZPOOL_CONFIG_VDEV_TREE,
&oldnvroot) == 0);
ret = print_vdev_stats(zhp, zpool_get_name(zhp), oldnvroot, newnvroot,
cb, 0);
if ((ret != 0) && !(cb->cb_flags & IOS_ANYHISTO_M) &&
!cb->cb_scripted && cb->cb_verbose && !cb->cb_vdev_names_count) {
print_iostat_separator(cb);
if (cb->vcdl != NULL) {
print_cmd_columns(cb->vcdl, 1);
}
printf("\n");
}
return (ret);
}
static int
get_columns(void)
{
struct winsize ws;
int columns = 80;
int error;
if (isatty(STDOUT_FILENO)) {
error = ioctl(STDOUT_FILENO, TIOCGWINSZ, &ws);
if (error == 0)
columns = ws.ws_col;
} else {
columns = 999;
}
return (columns);
}
int
get_namewidth(zpool_handle_t *zhp, void *data)
{
iostat_cbdata_t *cb = data;
nvlist_t *config, *nvroot;
int columns;
if ((config = zpool_get_config(zhp, NULL)) != NULL) {
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
unsigned int poolname_len = strlen(zpool_get_name(zhp));
if (!cb->cb_verbose)
cb->cb_namewidth = MAX(poolname_len, cb->cb_namewidth);
else
cb->cb_namewidth = MAX(poolname_len,
max_width(zhp, nvroot, 0, cb->cb_namewidth,
cb->cb_name_flags));
}
/*
* The width must be at least 10, but may be as large as the
* column width - 42 so that we can still fit in one line.
*/
columns = get_columns();
if (cb->cb_namewidth < 10)
cb->cb_namewidth = 10;
if (cb->cb_namewidth > columns - 42)
cb->cb_namewidth = columns - 42;
return (0);
}
/*
* Parse the input string, get the 'interval' and 'count' value if there is one.
*/
static void
get_interval_count(int *argcp, char **argv, float *iv,
unsigned long *cnt)
{
float interval = 0;
unsigned long count = 0;
int argc = *argcp;
/*
* Determine if the last argument is an integer or a pool name
*/
if (argc > 0 && isnumber(argv[argc - 1])) {
char *end;
errno = 0;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext("interval "
"cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
/*
* If this is not a valid number, just plow on. The
* user will get a more informative error message later
* on.
*/
interval = 0;
}
}
/*
* If the last argument is also an integer, then we have both a count
* and an interval.
*/
if (argc > 0 && isnumber(argv[argc - 1])) {
char *end;
errno = 0;
count = interval;
interval = strtof(argv[argc - 1], &end);
if (*end == '\0' && errno == 0) {
if (interval == 0) {
(void) fprintf(stderr, gettext("interval "
"cannot be zero\n"));
usage(B_FALSE);
}
/*
* Ignore the last parameter
*/
argc--;
} else {
interval = 0;
}
}
*iv = interval;
*cnt = count;
*argcp = argc;
}
static void
get_timestamp_arg(char c)
{
if (c == 'u')
timestamp_fmt = UDATE;
else if (c == 'd')
timestamp_fmt = DDATE;
else
usage(B_FALSE);
}
/*
* Return stat flags that are supported by all pools by both the module and
* zpool iostat. "*data" should be initialized to all 0xFFs before running.
* It will get ANDed down until only the flags that are supported on all pools
* remain.
*/
static int
get_stat_flags_cb(zpool_handle_t *zhp, void *data)
{
uint64_t *mask = data;
nvlist_t *config, *nvroot, *nvx;
uint64_t flags = 0;
int i, j;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
/* Default stats are always supported, but for completeness.. */
if (nvlist_exists(nvroot, ZPOOL_CONFIG_VDEV_STATS))
flags |= IOS_DEFAULT_M;
/* Get our extended stats nvlist from the main list */
if (nvlist_lookup_nvlist(nvroot, ZPOOL_CONFIG_VDEV_STATS_EX,
&nvx) != 0) {
/*
* No extended stats; they're probably running an older
* module. No big deal, we support that too.
*/
goto end;
}
/* For each extended stat, make sure all its nvpairs are supported */
for (j = 0; j < ARRAY_SIZE(vsx_type_to_nvlist); j++) {
if (!vsx_type_to_nvlist[j][0])
continue;
/* Start off by assuming the flag is supported, then check */
flags |= (1ULL << j);
for (i = 0; vsx_type_to_nvlist[j][i]; i++) {
if (!nvlist_exists(nvx, vsx_type_to_nvlist[j][i])) {
/* flag isn't supported */
flags = flags & ~(1ULL << j);
break;
}
}
}
end:
*mask = *mask & flags;
return (0);
}
/*
* Return a bitmask of stats that are supported on all pools by both the module
* and zpool iostat.
*/
static uint64_t
get_stat_flags(zpool_list_t *list)
{
uint64_t mask = -1;
/*
* get_stat_flags_cb() will lop off bits from "mask" until only the
* flags that are supported on all pools remain.
*/
pool_list_iter(list, B_FALSE, get_stat_flags_cb, &mask);
return (mask);
}
/*
* Return 1 if cb_data->cb_vdev_names[0] is this vdev's name, 0 otherwise.
*/
static int
is_vdev_cb(zpool_handle_t *zhp, nvlist_t *nv, void *cb_data)
{
iostat_cbdata_t *cb = cb_data;
char *name = NULL;
int ret = 0;
name = zpool_vdev_name(g_zfs, zhp, nv, cb->cb_name_flags);
if (strcmp(name, cb->cb_vdev_names[0]) == 0)
ret = 1; /* match */
free(name);
return (ret);
}
/*
* Returns 1 if cb_data->cb_vdev_names[0] is a vdev name, 0 otherwise.
*/
static int
is_vdev(zpool_handle_t *zhp, void *cb_data)
{
return (for_each_vdev(zhp, is_vdev_cb, cb_data));
}
/*
* Check if vdevs are in a pool
*
* Return 1 if all argv[] strings are vdev names in pool "pool_name". Otherwise
* return 0. If pool_name is NULL, then search all pools.
*/
static int
are_vdevs_in_pool(int argc, char **argv, char *pool_name,
iostat_cbdata_t *cb)
{
char **tmp_name;
int ret = 0;
int i;
int pool_count = 0;
if ((argc == 0) || !*argv)
return (0);
if (pool_name)
pool_count = 1;
/* Temporarily hijack cb_vdev_names for a second... */
tmp_name = cb->cb_vdev_names;
/* Go though our list of prospective vdev names */
for (i = 0; i < argc; i++) {
cb->cb_vdev_names = argv + i;
/* Is this name a vdev in our pools? */
ret = for_each_pool(pool_count, &pool_name, B_TRUE, NULL,
is_vdev, cb);
if (!ret) {
/* No match */
break;
}
}
cb->cb_vdev_names = tmp_name;
return (ret);
}
static int
is_pool_cb(zpool_handle_t *zhp, void *data)
{
char *name = data;
if (strcmp(name, zpool_get_name(zhp)) == 0)
return (1);
return (0);
}
/*
* Do we have a pool named *name? If so, return 1, otherwise 0.
*/
static int
is_pool(char *name)
{
return (for_each_pool(0, NULL, B_TRUE, NULL, is_pool_cb, name));
}
/* Are all our argv[] strings pool names? If so return 1, 0 otherwise. */
static int
are_all_pools(int argc, char **argv)
{
if ((argc == 0) || !*argv)
return (0);
while (--argc >= 0)
if (!is_pool(argv[argc]))
return (0);
return (1);
}
/*
* Helper function to print out vdev/pool names we can't resolve. Used for an
* error message.
*/
static void
error_list_unresolved_vdevs(int argc, char **argv, char *pool_name,
iostat_cbdata_t *cb)
{
int i;
char *name;
char *str;
for (i = 0; i < argc; i++) {
name = argv[i];
if (is_pool(name))
str = gettext("pool");
else if (are_vdevs_in_pool(1, &name, pool_name, cb))
str = gettext("vdev in this pool");
else if (are_vdevs_in_pool(1, &name, NULL, cb))
str = gettext("vdev in another pool");
else
str = gettext("unknown");
fprintf(stderr, "\t%s (%s)\n", name, str);
}
}
/*
* Same as get_interval_count(), but with additional checks to not misinterpret
* guids as interval/count values. Assumes VDEV_NAME_GUID is set in
* cb.cb_name_flags.
*/
static void
get_interval_count_filter_guids(int *argc, char **argv, float *interval,
unsigned long *count, iostat_cbdata_t *cb)
{
char **tmpargv = argv;
int argc_for_interval = 0;
/* Is the last arg an interval value? Or a guid? */
if (*argc >= 1 && !are_vdevs_in_pool(1, &argv[*argc - 1], NULL, cb)) {
/*
* The last arg is not a guid, so it's probably an
* interval value.
*/
argc_for_interval++;
if (*argc >= 2 &&
!are_vdevs_in_pool(1, &argv[*argc - 2], NULL, cb)) {
/*
* The 2nd to last arg is not a guid, so it's probably
* an interval value.
*/
argc_for_interval++;
}
}
/* Point to our list of possible intervals */
tmpargv = &argv[*argc - argc_for_interval];
*argc = *argc - argc_for_interval;
get_interval_count(&argc_for_interval, tmpargv,
interval, count);
}
/*
* Floating point sleep(). Allows you to pass in a floating point value for
* seconds.
*/
static void
fsleep(float sec)
{
struct timespec req;
req.tv_sec = floor(sec);
req.tv_nsec = (sec - (float)req.tv_sec) * NANOSEC;
nanosleep(&req, NULL);
}
/*
* Run one of the zpool status/iostat -c scripts with the help (-h) option and
* print the result.
*
* name: Short name of the script ('iostat').
* path: Full path to the script ('/usr/local/etc/zfs/zpool.d/iostat');
*/
static void
print_zpool_script_help(char *name, char *path)
{
char *argv[] = {path, "-h", NULL};
char **lines = NULL;
int lines_cnt = 0;
int rc;
rc = libzfs_run_process_get_stdout_nopath(path, argv, NULL, &lines,
&lines_cnt);
if (rc != 0 || lines == NULL || lines_cnt <= 0) {
if (lines != NULL)
libzfs_free_str_array(lines, lines_cnt);
return;
}
for (int i = 0; i < lines_cnt; i++)
if (!is_blank_str(lines[i]))
printf(" %-14s %s\n", name, lines[i]);
libzfs_free_str_array(lines, lines_cnt);
}
/*
* Go though the zpool status/iostat -c scripts in the user's path, run their
* help option (-h), and print out the results.
*/
static void
print_zpool_dir_scripts(char *dirpath)
{
DIR *dir;
struct dirent *ent;
char fullpath[MAXPATHLEN];
struct stat dir_stat;
if ((dir = opendir(dirpath)) != NULL) {
/* print all the files and directories within directory */
while ((ent = readdir(dir)) != NULL) {
sprintf(fullpath, "%s/%s", dirpath, ent->d_name);
/* Print the scripts */
if (stat(fullpath, &dir_stat) == 0)
if (dir_stat.st_mode & S_IXUSR &&
S_ISREG(dir_stat.st_mode))
print_zpool_script_help(ent->d_name,
fullpath);
}
closedir(dir);
}
}
/*
* Print out help text for all zpool status/iostat -c scripts.
*/
static void
print_zpool_script_list(char *subcommand)
{
char *dir, *sp;
printf(gettext("Available 'zpool %s -c' commands:\n"), subcommand);
sp = zpool_get_cmd_search_path();
if (sp == NULL)
return;
dir = strtok(sp, ":");
while (dir != NULL) {
print_zpool_dir_scripts(dir);
dir = strtok(NULL, ":");
}
free(sp);
}
/*
* zpool iostat [[-c [script1,script2,...]] [-lq]|[-rw]] [-ghHLpPvy] [-n name]
* [-T d|u] [[ pool ...]|[pool vdev ...]|[vdev ...]]
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display statistics for individual vdevs
* -h Display help
* -p Display values in parsable (exact) format.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -l Display average latency
* -q Display queue depths
* -w Display latency histograms
* -r Display request size histogram
* -T Display a timestamp in date(1) or Unix format
*
* This command can be tricky because we want to be able to deal with pool
* creation/destruction as well as vdev configuration changes. The bulk of this
* processing is handled by the pool_list_* routines in zpool_iter.c. We rely
* on pool_list_update() to detect the addition of new pools. Configuration
* changes are all handled within libzfs.
*/
int
zpool_do_iostat(int argc, char **argv)
{
int c;
int ret;
int npools;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t verbose = B_FALSE;
boolean_t latency = B_FALSE, l_histo = B_FALSE, rq_histo = B_FALSE;
boolean_t queues = B_FALSE, parsable = B_FALSE, scripted = B_FALSE;
boolean_t omit_since_boot = B_FALSE;
boolean_t guid = B_FALSE;
boolean_t follow_links = B_FALSE;
boolean_t full_name = B_FALSE;
iostat_cbdata_t cb = { 0 };
char *cmd = NULL;
/* Used for printing error message */
const char flag_to_arg[] = {[IOS_LATENCY] = 'l', [IOS_QUEUES] = 'q',
[IOS_L_HISTO] = 'w', [IOS_RQ_HISTO] = 'r'};
uint64_t unsupported_flags;
/* check options */
while ((c = getopt(argc, argv, "c:gLPT:vyhplqrwH")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
verbose = B_TRUE;
break;
case 'g':
guid = B_TRUE;
break;
case 'L':
follow_links = B_TRUE;
break;
case 'P':
full_name = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
verbose = B_TRUE;
break;
case 'p':
parsable = B_TRUE;
break;
case 'l':
latency = B_TRUE;
break;
case 'q':
queues = B_TRUE;
break;
case 'H':
scripted = B_TRUE;
break;
case 'w':
l_histo = B_TRUE;
break;
case 'r':
rq_histo = B_TRUE;
break;
case 'y':
omit_since_boot = B_TRUE;
break;
case 'h':
usage(B_FALSE);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("iostat");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
cb.cb_literal = parsable;
cb.cb_scripted = scripted;
if (guid)
cb.cb_name_flags |= VDEV_NAME_GUID;
if (follow_links)
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (full_name)
cb.cb_name_flags |= VDEV_NAME_PATH;
cb.cb_iteration = 0;
cb.cb_namewidth = 0;
cb.cb_verbose = verbose;
/* Get our interval and count values (if any) */
if (guid) {
get_interval_count_filter_guids(&argc, argv, &interval,
&count, &cb);
} else {
get_interval_count(&argc, argv, &interval, &count);
}
if (argc == 0) {
/* No args, so just print the defaults. */
} else if (are_all_pools(argc, argv)) {
/* All the args are pool names */
} else if (are_vdevs_in_pool(argc, argv, NULL, &cb)) {
/* All the args are vdevs */
cb.cb_vdev_names = argv;
cb.cb_vdev_names_count = argc;
argc = 0; /* No pools to process */
} else if (are_all_pools(1, argv)) {
/* The first arg is a pool name */
if (are_vdevs_in_pool(argc - 1, argv + 1, argv[0], &cb)) {
/* ...and the rest are vdev names */
cb.cb_vdev_names = argv + 1;
cb.cb_vdev_names_count = argc - 1;
argc = 1; /* One pool to process */
} else {
fprintf(stderr, gettext("Expected either a list of "));
fprintf(stderr, gettext("pools, or list of vdevs in"));
fprintf(stderr, " \"%s\", ", argv[0]);
fprintf(stderr, gettext("but got:\n"));
error_list_unresolved_vdevs(argc - 1, argv + 1,
argv[0], &cb);
fprintf(stderr, "\n");
usage(B_FALSE);
return (1);
}
} else {
/*
* The args don't make sense. The first arg isn't a pool name,
* nor are all the args vdevs.
*/
fprintf(stderr, gettext("Unable to parse pools/vdevs list.\n"));
fprintf(stderr, "\n");
return (1);
}
if (cb.cb_vdev_names_count != 0) {
/*
* If user specified vdevs, it implies verbose.
*/
cb.cb_verbose = B_TRUE;
}
/*
* Construct the list of all interesting pools.
*/
ret = 0;
if ((list = pool_list_get(argc, argv, NULL, &ret)) == NULL)
return (1);
if (pool_list_count(list) == 0 && argc != 0) {
pool_list_free(list);
return (1);
}
if (pool_list_count(list) == 0 && interval == 0) {
pool_list_free(list);
(void) fprintf(stderr, gettext("no pools available\n"));
return (1);
}
if ((l_histo || rq_histo) && (cmd != NULL || latency || queues)) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("[-r|-w] isn't allowed with [-c|-l|-q]\n"));
usage(B_FALSE);
return (1);
}
if (l_histo && rq_histo) {
pool_list_free(list);
(void) fprintf(stderr,
gettext("Only one of [-r|-w] can be passed at a time\n"));
usage(B_FALSE);
return (1);
}
/*
* Enter the main iostat loop.
*/
cb.cb_list = list;
if (l_histo) {
/*
* Histograms tables look out of place when you try to display
* them with the other stats, so make a rule that you can only
* print histograms by themselves.
*/
cb.cb_flags = IOS_L_HISTO_M;
} else if (rq_histo) {
cb.cb_flags = IOS_RQ_HISTO_M;
} else {
cb.cb_flags = IOS_DEFAULT_M;
if (latency)
cb.cb_flags |= IOS_LATENCY_M;
if (queues)
cb.cb_flags |= IOS_QUEUES_M;
}
/*
* See if the module supports all the stats we want to display.
*/
unsupported_flags = cb.cb_flags & ~get_stat_flags(list);
if (unsupported_flags) {
uint64_t f;
int idx;
fprintf(stderr,
gettext("The loaded zfs module doesn't support:"));
/* for each bit set in unsupported_flags */
for (f = unsupported_flags; f; f &= ~(1ULL << idx)) {
idx = lowbit64(f) - 1;
fprintf(stderr, " -%c", flag_to_arg[idx]);
}
fprintf(stderr, ". Try running a newer module.\n");
pool_list_free(list);
return (1);
}
for (;;) {
if ((npools = pool_list_count(list)) == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else {
/*
* If this is the first iteration and -y was supplied
* we skip any printing.
*/
boolean_t skip = (omit_since_boot &&
cb.cb_iteration == 0);
/*
* Refresh all statistics. This is done as an
* explicit step before calculating the maximum name
* width, so that any * configuration changes are
* properly accounted for.
*/
(void) pool_list_iter(list, B_FALSE, refresh_iostat,
&cb);
/*
* Iterate over all pools to determine the maximum width
* for the pool / device name column across all pools.
*/
cb.cb_namewidth = 0;
(void) pool_list_iter(list, B_FALSE, get_namewidth,
&cb);
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL && cb.cb_verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) {
cb.vcdl = all_pools_for_each_vdev_run(argc,
argv, cmd, g_zfs, cb.cb_vdev_names,
cb.cb_vdev_names_count, cb.cb_name_flags);
} else {
cb.vcdl = NULL;
}
/*
* If it's the first time and we're not skipping it,
* or either skip or verbose mode, print the header.
*
* The histogram code explicitly prints its header on
* every vdev, so skip this for histograms.
*/
if (((++cb.cb_iteration == 1 && !skip) ||
(skip != verbose)) &&
(!(cb.cb_flags & IOS_ANYHISTO_M)) &&
!cb.cb_scripted)
print_iostat_header(&cb);
if (skip) {
(void) fsleep(interval);
continue;
}
pool_list_iter(list, B_FALSE, print_iostat, &cb);
/*
* If there's more than one pool, and we're not in
* verbose mode (which prints a separator for us),
* then print a separator.
*
* In addition, if we're printing specific vdevs then
* we also want an ending separator.
*/
if (((npools > 1 && !verbose &&
!(cb.cb_flags & IOS_ANYHISTO_M)) ||
(!(cb.cb_flags & IOS_ANYHISTO_M) &&
cb.cb_vdev_names_count)) &&
!cb.cb_scripted) {
print_iostat_separator(&cb);
if (cb.vcdl != NULL)
print_cmd_columns(cb.vcdl, 1);
printf("\n");
}
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
}
/*
* Flush the output so that redirection to a file isn't buffered
* indefinitely.
*/
(void) fflush(stdout);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
pool_list_free(list);
return (ret);
}
typedef struct list_cbdata {
boolean_t cb_verbose;
int cb_name_flags;
int cb_namewidth;
boolean_t cb_scripted;
zprop_list_t *cb_proplist;
boolean_t cb_literal;
} list_cbdata_t;
/*
* Given a list of columns to display, output appropriate headers for each one.
*/
static void
print_header(list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
char headerbuf[ZPOOL_MAXPROPLEN];
const char *header;
boolean_t first = B_TRUE;
boolean_t right_justify;
size_t width = 0;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first)
(void) printf(" ");
else
first = B_FALSE;
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
header = zpool_prop_column_name(pl->pl_prop);
right_justify = zpool_prop_align_right(pl->pl_prop);
} else {
int i;
for (i = 0; pl->pl_user_prop[i] != '\0'; i++)
headerbuf[i] = toupper(pl->pl_user_prop[i]);
headerbuf[i] = '\0';
header = headerbuf;
}
if (pl->pl_next == NULL && !right_justify)
(void) printf("%s", header);
else if (right_justify)
(void) printf("%*s", (int)width, header);
else
(void) printf("%-*s", (int)width, header);
}
(void) printf("\n");
}
/*
* Given a pool and a list of properties, print out all the properties according
* to the described layout.
*/
static void
print_pool(zpool_handle_t *zhp, list_cbdata_t *cb)
{
zprop_list_t *pl = cb->cb_proplist;
boolean_t first = B_TRUE;
char property[ZPOOL_MAXPROPLEN];
char *propstr;
boolean_t right_justify;
size_t width;
for (; pl != NULL; pl = pl->pl_next) {
width = pl->pl_width;
if (first && cb->cb_verbose) {
/*
* Reset the width to accommodate the verbose listing
* of devices.
*/
width = cb->cb_namewidth;
}
if (!first) {
if (cb->cb_scripted)
(void) printf("\t");
else
(void) printf(" ");
} else {
first = B_FALSE;
}
right_justify = B_FALSE;
if (pl->pl_prop != ZPROP_INVAL) {
if (zpool_get_prop(zhp, pl->pl_prop, property,
sizeof (property), NULL, cb->cb_literal) != 0)
propstr = "-";
else
propstr = property;
right_justify = zpool_prop_align_right(pl->pl_prop);
} else if ((zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop)) &&
zpool_prop_get_feature(zhp, pl->pl_user_prop, property,
sizeof (property)) == 0) {
propstr = property;
} else {
propstr = "-";
}
/*
* If this is being called in scripted mode, or if this is the
* last column and it is left-justified, don't include a width
* format specifier.
*/
if (cb->cb_scripted || (pl->pl_next == NULL && !right_justify))
(void) printf("%s", propstr);
else if (right_justify)
(void) printf("%*s", (int)width, propstr);
else
(void) printf("%-*s", (int)width, propstr);
}
(void) printf("\n");
}
static void
print_one_column(zpool_prop_t prop, uint64_t value, boolean_t scripted,
boolean_t valid, enum zfs_nicenum_format format)
{
char propval[64];
boolean_t fixed;
size_t width = zprop_width(prop, &fixed, ZFS_TYPE_POOL);
switch (prop) {
case ZPOOL_PROP_EXPANDSZ:
if (value == 0)
(void) strlcpy(propval, "-", sizeof (propval));
else
zfs_nicenum_format(value, propval, sizeof (propval),
format);
break;
case ZPOOL_PROP_FRAGMENTATION:
if (value == ZFS_FRAG_INVALID) {
(void) strlcpy(propval, "-", sizeof (propval));
} else if (format == ZFS_NICENUM_RAW) {
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
} else {
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
}
break;
case ZPOOL_PROP_CAPACITY:
if (format == ZFS_NICENUM_RAW)
(void) snprintf(propval, sizeof (propval), "%llu",
(unsigned long long)value);
else
(void) snprintf(propval, sizeof (propval), "%llu%%",
(unsigned long long)value);
break;
default:
zfs_nicenum_format(value, propval, sizeof (propval), format);
}
if (!valid)
(void) strlcpy(propval, "-", sizeof (propval));
if (scripted)
(void) printf("\t%s", propval);
else
(void) printf(" %*s", (int)width, propval);
}
void
print_list_stats(zpool_handle_t *zhp, const char *name, nvlist_t *nv,
list_cbdata_t *cb, int depth)
{
nvlist_t **child;
vdev_stat_t *vs;
uint_t c, children;
char *vname;
boolean_t scripted = cb->cb_scripted;
uint64_t islog = B_FALSE;
boolean_t haslog = B_FALSE;
char *dashes = "%-*s - - - - - -\n";
verify(nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
if (name != NULL) {
boolean_t toplevel = (vs->vs_space != 0);
uint64_t cap;
enum zfs_nicenum_format format;
if (cb->cb_literal)
format = ZFS_NICENUM_RAW;
else
format = ZFS_NICENUM_1024;
+ if (strcmp(name, VDEV_TYPE_INDIRECT) == 0)
+ return;
+
if (scripted)
(void) printf("\t%s", name);
else if (strlen(name) + depth > cb->cb_namewidth)
(void) printf("%*s%s", depth, "", name);
else
(void) printf("%*s%s%*s", depth, "", name,
(int)(cb->cb_namewidth - strlen(name) - depth), "");
/*
* Print the properties for the individual vdevs. Some
* properties are only applicable to toplevel vdevs. The
* 'toplevel' boolean value is passed to the print_one_column()
* to indicate that the value is valid.
*/
print_one_column(ZPOOL_PROP_SIZE, vs->vs_space, scripted,
toplevel, format);
print_one_column(ZPOOL_PROP_ALLOCATED, vs->vs_alloc, scripted,
toplevel, format);
print_one_column(ZPOOL_PROP_FREE, vs->vs_space - vs->vs_alloc,
scripted, toplevel, format);
print_one_column(ZPOOL_PROP_EXPANDSZ, vs->vs_esize, scripted,
B_TRUE, format);
print_one_column(ZPOOL_PROP_FRAGMENTATION,
vs->vs_fragmentation, scripted,
(vs->vs_fragmentation != ZFS_FRAG_INVALID && toplevel),
format);
cap = (vs->vs_space == 0) ? 0 :
(vs->vs_alloc * 100 / vs->vs_space);
print_one_column(ZPOOL_PROP_CAPACITY, cap, scripted, toplevel,
format);
(void) printf("\n");
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
uint64_t ishole = B_FALSE;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_HOLE, &ishole) == 0 && ishole)
continue;
if (nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &islog) == 0 && islog) {
haslog = B_TRUE;
continue;
}
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname);
}
if (haslog == B_TRUE) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "log");
for (c = 0; c < children; c++) {
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&islog) != 0 || !islog)
continue;
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "cache");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES, &child,
&children) == 0 && children > 0) {
/* LINTED E_SEC_PRINTF_VAR_FMT */
(void) printf(dashes, cb->cb_namewidth, "spare");
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(g_zfs, zhp, child[c],
cb->cb_name_flags);
print_list_stats(zhp, vname, child[c], cb, depth + 2);
free(vname);
}
}
}
/*
* Generic callback function to list a pool.
*/
int
list_callback(zpool_handle_t *zhp, void *data)
{
list_cbdata_t *cbp = data;
nvlist_t *config;
nvlist_t *nvroot;
config = zpool_get_config(zhp, NULL);
print_pool(zhp, cbp);
if (!cbp->cb_verbose)
return (0);
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
print_list_stats(zhp, NULL, nvroot, cbp, 0);
return (0);
}
/*
* zpool list [-gHLpP] [-o prop[,prop]*] [-T d|u] [pool] ... [interval [count]]
*
* -g Display guid for individual vdev name.
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -L Follow links when resolving vdev path name.
* -o List of properties to display. Defaults to
* "name,size,allocated,free,expandsize,fragmentation,capacity,"
* "dedupratio,health,altroot"
* -p Display values in parsable (exact) format.
* -P Display full path for vdev name.
* -T Display a timestamp in date(1) or Unix format
*
* List all pools in the system, whether or not they're healthy. Output space
* statistics for each one, as well as health status summary.
*/
int
zpool_do_list(int argc, char **argv)
{
int c;
int ret = 0;
list_cbdata_t cb = { 0 };
static char default_props[] =
"name,size,allocated,free,expandsize,fragmentation,capacity,"
"dedupratio,health,altroot";
char *props = default_props;
float interval = 0;
unsigned long count = 0;
zpool_list_t *list;
boolean_t first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, ":gHLo:pPT:v")) != -1) {
switch (c) {
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'o':
props = optarg;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (zprop_get_list(g_zfs, props, &cb.cb_proplist, ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
for (;;) {
if ((list = pool_list_get(argc, argv, &cb.cb_proplist,
&ret)) == NULL)
return (1);
if (pool_list_count(list) == 0)
break;
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (!cb.cb_scripted && (first || cb.cb_verbose)) {
print_header(&cb);
first = B_FALSE;
}
ret = pool_list_iter(list, B_TRUE, list_callback, &cb);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
pool_list_free(list);
(void) fsleep(interval);
}
if (argc == 0 && !cb.cb_scripted && pool_list_count(list) == 0) {
(void) printf(gettext("no pools available\n"));
ret = 0;
}
pool_list_free(list);
zprop_free_list(cb.cb_proplist);
return (ret);
}
static int
zpool_do_attach_or_replace(int argc, char **argv, int replacing)
{
boolean_t force = B_FALSE;
int c;
nvlist_t *nvroot;
char *poolname, *old_disk, *new_disk;
zpool_handle_t *zhp;
nvlist_t *props = NULL;
char *propval;
int ret;
/* check options */
while ((c = getopt(argc, argv, "fo:")) != -1) {
switch (c) {
case 'f':
force = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) == NULL) {
(void) fprintf(stderr, gettext("missing "
"'=' for -o option\n"));
usage(B_FALSE);
}
*propval = '\0';
propval++;
if ((strcmp(optarg, ZPOOL_CONFIG_ASHIFT) != 0) ||
(add_prop_list(optarg, propval, &props, B_TRUE)))
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
poolname = argv[0];
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
old_disk = argv[1];
if (argc < 3) {
if (!replacing) {
(void) fprintf(stderr,
gettext("missing <new_device> specification\n"));
usage(B_FALSE);
}
new_disk = old_disk;
argc -= 1;
argv += 1;
} else {
new_disk = argv[2];
argc -= 2;
argv += 2;
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((zhp = zpool_open(g_zfs, poolname)) == NULL) {
nvlist_free(props);
return (1);
}
if (zpool_get_config(zhp, NULL) == NULL) {
(void) fprintf(stderr, gettext("pool '%s' is unavailable\n"),
poolname);
zpool_close(zhp);
nvlist_free(props);
return (1);
}
/* unless manually specified use "ashift" pool property (if set) */
if (!nvlist_exists(props, ZPOOL_CONFIG_ASHIFT)) {
int intval;
zprop_source_t src;
char strval[ZPOOL_MAXPROPLEN];
intval = zpool_get_prop_int(zhp, ZPOOL_PROP_ASHIFT, &src);
if (src != ZPROP_SRC_DEFAULT) {
(void) sprintf(strval, "%" PRId32, intval);
verify(add_prop_list(ZPOOL_CONFIG_ASHIFT, strval,
&props, B_TRUE) == 0);
}
}
nvroot = make_root_vdev(zhp, props, force, B_FALSE, replacing, B_FALSE,
argc, argv);
if (nvroot == NULL) {
zpool_close(zhp);
nvlist_free(props);
return (1);
}
ret = zpool_vdev_attach(zhp, old_disk, new_disk, nvroot, replacing);
nvlist_free(props);
nvlist_free(nvroot);
zpool_close(zhp);
return (ret);
}
/*
* zpool replace [-f] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
*
* Replace <device> with <new_device>.
*/
/* ARGSUSED */
int
zpool_do_replace(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_TRUE));
}
/*
* zpool attach [-f] [-o property=value] <pool> <device> <new_device>
*
* -f Force attach, even if <new_device> appears to be in use.
* -o Set property=value.
*
* Attach <new_device> to the mirror containing <device>. If <device> is not
* part of a mirror, then <device> will be transformed into a mirror of
* <device> and <new_device>. In either case, <new_device> will begin life
* with a DTL of [0, now], and will immediately begin to resilver itself.
*/
int
zpool_do_attach(int argc, char **argv)
{
return (zpool_do_attach_or_replace(argc, argv, B_FALSE));
}
/*
* zpool detach [-f] <pool> <device>
*
* -f Force detach of <device>, even if DTLs argue against it
* (not supported yet)
*
* Detach a device from a mirror. The operation will be refused if <device>
* is the last device in the mirror, or if the DTLs indicate that this device
* has the only valid copy of some data.
*/
/* ARGSUSED */
int
zpool_do_detach(int argc, char **argv)
{
int c;
char *poolname, *path;
zpool_handle_t *zhp;
int ret;
/* check options */
while ((c = getopt(argc, argv, "f")) != -1) {
switch (c) {
case 'f':
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr,
gettext("missing <device> specification\n"));
usage(B_FALSE);
}
poolname = argv[0];
path = argv[1];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_vdev_detach(zhp, path);
zpool_close(zhp);
return (ret);
}
/*
* zpool split [-gLnP] [-o prop=val] ...
* [-o mntopt] ...
* [-R altroot] <pool> <newpool> [<device> ...]
*
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -n Do not split the pool, but display the resulting layout if
* it were to be split.
* -o Set property=value, or set mount options.
* -P Display full path for vdev name.
* -R Mount the split-off pool under an alternate root.
* -l Load encryption keys while importing.
*
* Splits the named pool and gives it the new pool name. Devices to be split
* off may be listed, provided that no more than one device is specified
* per top-level vdev mirror. The newly split pool is left in an exported
* state unless -R is specified.
*
* Restrictions: the top-level of the pool pool must only be made up of
* mirrors; all devices in the pool must be healthy; no device may be
* undergoing a resilvering operation.
*/
int
zpool_do_split(int argc, char **argv)
{
char *srcpool, *newpool, *propval;
char *mntopts = NULL;
splitflags_t flags;
int c, ret = 0;
boolean_t loadkeys = B_FALSE;
zpool_handle_t *zhp;
nvlist_t *config, *props = NULL;
flags.dryrun = B_FALSE;
flags.import = B_FALSE;
flags.name_flags = 0;
/* check options */
while ((c = getopt(argc, argv, ":gLR:lno:P")) != -1) {
switch (c) {
case 'g':
flags.name_flags |= VDEV_NAME_GUID;
break;
case 'L':
flags.name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'R':
flags.import = B_TRUE;
if (add_prop_list(
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), optarg,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
break;
case 'l':
loadkeys = B_TRUE;
break;
case 'n':
flags.dryrun = B_TRUE;
break;
case 'o':
if ((propval = strchr(optarg, '=')) != NULL) {
*propval = '\0';
propval++;
if (add_prop_list(optarg, propval,
&props, B_TRUE) != 0) {
nvlist_free(props);
usage(B_FALSE);
}
} else {
mntopts = optarg;
}
break;
case 'P':
flags.name_flags |= VDEV_NAME_PATH;
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
break;
}
}
if (!flags.import && mntopts != NULL) {
(void) fprintf(stderr, gettext("setting mntopts is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
if (!flags.import && loadkeys) {
(void) fprintf(stderr, gettext("loading keys is only "
"valid when importing the pool\n"));
usage(B_FALSE);
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("Missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("Missing new pool name\n"));
usage(B_FALSE);
}
srcpool = argv[0];
newpool = argv[1];
argc -= 2;
argv += 2;
if ((zhp = zpool_open(g_zfs, srcpool)) == NULL) {
nvlist_free(props);
return (1);
}
config = split_mirror_vdev(zhp, newpool, props, flags, argc, argv);
if (config == NULL) {
ret = 1;
} else {
if (flags.dryrun) {
(void) printf(gettext("would create '%s' with the "
"following layout:\n\n"), newpool);
print_vdev_tree(NULL, newpool, config, 0, B_FALSE,
flags.name_flags);
}
}
zpool_close(zhp);
if (ret != 0 || flags.dryrun || !flags.import) {
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* The split was successful. Now we need to open the new
* pool and import it.
*/
if ((zhp = zpool_open_canfail(g_zfs, newpool)) == NULL) {
nvlist_free(config);
nvlist_free(props);
return (1);
}
if (loadkeys) {
ret = zfs_crypto_attempt_load_keys(g_zfs, newpool);
if (ret != 0)
ret = 1;
}
if (zpool_get_state(zhp) != POOL_STATE_UNAVAIL &&
zpool_enable_datasets(zhp, mntopts, 0) != 0) {
ret = 1;
(void) fprintf(stderr, gettext("Split was successful, but "
"the datasets could not all be mounted\n"));
(void) fprintf(stderr, gettext("Try doing '%s' with a "
"different altroot\n"), "zpool import");
}
zpool_close(zhp);
nvlist_free(config);
nvlist_free(props);
return (ret);
}
/*
* zpool online <pool> <device> ...
*/
int
zpool_do_online(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
vdev_state_t newstate;
int flags = 0;
/* check options */
while ((c = getopt(argc, argv, "et")) != -1) {
switch (c) {
case 'e':
flags |= ZFS_ONLINE_EXPAND;
break;
case 't':
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (zpool_vdev_online(zhp, argv[i], flags, &newstate) == 0) {
if (newstate != VDEV_STATE_HEALTHY) {
(void) printf(gettext("warning: device '%s' "
"onlined, but remains in faulted state\n"),
argv[i]);
if (newstate == VDEV_STATE_FAULTED)
(void) printf(gettext("use 'zpool "
"clear' to restore a faulted "
"device\n"));
else
(void) printf(gettext("use 'zpool "
"replace' to replace devices "
"that are no longer present\n"));
}
} else {
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool offline [-ft] <pool> <device> ...
*
* -f Force the device into a faulted state.
*
* -t Only take the device off-line temporarily. The offline/faulted
* state will not be persistent across reboots.
*/
/* ARGSUSED */
int
zpool_do_offline(int argc, char **argv)
{
int c, i;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
boolean_t istmp = B_FALSE;
boolean_t fault = B_FALSE;
/* check options */
while ((c = getopt(argc, argv, "ft")) != -1) {
switch (c) {
case 'f':
fault = B_TRUE;
break;
case 't':
istmp = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing device name\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
for (i = 1; i < argc; i++) {
if (fault) {
uint64_t guid = zpool_vdev_path_to_guid(zhp, argv[i]);
vdev_aux_t aux;
if (istmp == B_FALSE) {
/* Force the fault to persist across imports */
aux = VDEV_AUX_EXTERNAL_PERSIST;
} else {
aux = VDEV_AUX_EXTERNAL;
}
if (guid == 0 || zpool_vdev_fault(zhp, guid, aux) != 0)
ret = 1;
} else {
if (zpool_vdev_offline(zhp, argv[i], istmp) != 0)
ret = 1;
}
}
zpool_close(zhp);
return (ret);
}
/*
* zpool clear <pool> [device]
*
* Clear all errors associated with a pool or a particular device.
*/
int
zpool_do_clear(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t dryrun = B_FALSE;
boolean_t do_rewind = B_FALSE;
boolean_t xtreme_rewind = B_FALSE;
uint32_t rewind_policy = ZPOOL_NO_REWIND;
nvlist_t *policy = NULL;
zpool_handle_t *zhp;
char *pool, *device;
/* check options */
while ((c = getopt(argc, argv, "FnX")) != -1) {
switch (c) {
case 'F':
do_rewind = B_TRUE;
break;
case 'n':
dryrun = B_TRUE;
break;
case 'X':
xtreme_rewind = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 2) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
if ((dryrun || xtreme_rewind) && !do_rewind) {
(void) fprintf(stderr,
gettext("-n or -X only meaningful with -F\n"));
usage(B_FALSE);
}
if (dryrun)
rewind_policy = ZPOOL_TRY_REWIND;
else if (do_rewind)
rewind_policy = ZPOOL_DO_REWIND;
if (xtreme_rewind)
rewind_policy |= ZPOOL_EXTREME_REWIND;
/* In future, further rewind policy choices can be passed along here */
if (nvlist_alloc(&policy, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_uint32(policy, ZPOOL_REWIND_REQUEST, rewind_policy) != 0)
return (1);
pool = argv[0];
device = argc == 2 ? argv[1] : NULL;
if ((zhp = zpool_open_canfail(g_zfs, pool)) == NULL) {
nvlist_free(policy);
return (1);
}
if (zpool_clear(zhp, device, policy) != 0)
ret = 1;
zpool_close(zhp);
nvlist_free(policy);
return (ret);
}
/*
* zpool reguid <pool>
*/
int
zpool_do_reguid(int argc, char **argv)
{
int c;
char *poolname;
zpool_handle_t *zhp;
int ret = 0;
/* check options */
while ((c = getopt(argc, argv, "")) != -1) {
switch (c) {
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* get pool name and check number of arguments */
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
}
poolname = argv[0];
if ((zhp = zpool_open(g_zfs, poolname)) == NULL)
return (1);
ret = zpool_reguid(zhp);
zpool_close(zhp);
return (ret);
}
/*
* zpool reopen <pool>
*
* Reopen the pool so that the kernel can update the sizes of all vdevs.
*/
int
zpool_do_reopen(int argc, char **argv)
{
int c;
int ret = 0;
boolean_t scrub_restart = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "n")) != -1) {
switch (c) {
case 'n':
scrub_restart = B_FALSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
/* if argc == 0 we will execute zpool_reopen_one on all pools */
ret = for_each_pool(argc, argv, B_TRUE, NULL, zpool_reopen_one,
&scrub_restart);
return (ret);
}
typedef struct scrub_cbdata {
int cb_type;
int cb_argc;
char **cb_argv;
pool_scrub_cmd_t cb_scrub_cmd;
} scrub_cbdata_t;
int
scrub_callback(zpool_handle_t *zhp, void *data)
{
scrub_cbdata_t *cb = data;
int err;
/*
* Ignore faulted pools.
*/
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
(void) fprintf(stderr, gettext("cannot scrub '%s': pool is "
"currently unavailable\n"), zpool_get_name(zhp));
return (1);
}
err = zpool_scan(zhp, cb->cb_type, cb->cb_scrub_cmd);
return (err != 0);
}
/*
* zpool scrub [-s | -p] <pool> ...
*
* -s Stop. Stops any in-progress scrub.
* -p Pause. Pause in-progress scrub.
*/
int
zpool_do_scrub(int argc, char **argv)
{
int c;
scrub_cbdata_t cb;
cb.cb_type = POOL_SCAN_SCRUB;
cb.cb_scrub_cmd = POOL_SCRUB_NORMAL;
/* check options */
while ((c = getopt(argc, argv, "sp")) != -1) {
switch (c) {
case 's':
cb.cb_type = POOL_SCAN_NONE;
break;
case 'p':
cb.cb_scrub_cmd = POOL_SCRUB_PAUSE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
if (cb.cb_type == POOL_SCAN_NONE &&
cb.cb_scrub_cmd == POOL_SCRUB_PAUSE) {
(void) fprintf(stderr, gettext("invalid option combination: "
"-s and -p are mutually exclusive\n"));
usage(B_FALSE);
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing pool name argument\n"));
usage(B_FALSE);
}
return (for_each_pool(argc, argv, B_TRUE, NULL, scrub_callback, &cb));
}
/*
* Print out detailed scrub status.
*/
-void
+static void
print_scan_status(pool_scan_stat_t *ps)
{
time_t start, end, pause;
uint64_t total_secs_left;
uint64_t elapsed, secs_left, mins_left, hours_left, days_left;
uint64_t pass_scanned, scanned, pass_issued, issued, total;
uint64_t scan_rate, issue_rate;
double fraction_done;
char processed_buf[7], scanned_buf[7], issued_buf[7], total_buf[7];
char srate_buf[7], irate_buf[7];
(void) printf(gettext(" scan: "));
/* If there's never been a scan, there's not much to say. */
if (ps == NULL || ps->pss_func == POOL_SCAN_NONE ||
ps->pss_func >= POOL_SCAN_FUNCS) {
(void) printf(gettext("none requested\n"));
return;
}
start = ps->pss_start_time;
end = ps->pss_end_time;
pause = ps->pss_pass_scrub_pause;
zfs_nicebytes(ps->pss_processed, processed_buf, sizeof (processed_buf));
assert(ps->pss_func == POOL_SCAN_SCRUB ||
ps->pss_func == POOL_SCAN_RESILVER);
/* Scan is finished or canceled. */
if (ps->pss_state == DSS_FINISHED) {
total_secs_left = end - start;
days_left = total_secs_left / 60 / 60 / 24;
hours_left = (total_secs_left / 60 / 60) % 24;
mins_left = (total_secs_left / 60) % 60;
secs_left = (total_secs_left % 60);
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub repaired %s "
"in %llu days %02llu:%02llu:%02llu "
"with %llu errors on %s"), processed_buf,
(u_longlong_t)days_left, (u_longlong_t)hours_left,
(u_longlong_t)mins_left, (u_longlong_t)secs_left,
(u_longlong_t)ps->pss_errors, ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilvered %s "
"in %llu days %02llu:%02llu:%02llu "
"with %llu errors on %s"), processed_buf,
(u_longlong_t)days_left, (u_longlong_t)hours_left,
(u_longlong_t)mins_left, (u_longlong_t)secs_left,
(u_longlong_t)ps->pss_errors, ctime(&end));
}
return;
} else if (ps->pss_state == DSS_CANCELED) {
if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("scrub canceled on %s"),
ctime(&end));
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver canceled on %s"),
ctime(&end));
}
return;
}
assert(ps->pss_state == DSS_SCANNING);
/* Scan is in progress. Resilvers can't be paused. */
if (ps->pss_func == POOL_SCAN_SCRUB) {
if (pause == 0) {
(void) printf(gettext("scrub in progress since %s"),
ctime(&start));
} else {
(void) printf(gettext("scrub paused since %s"),
ctime(&pause));
(void) printf(gettext("\tscrub started on %s"),
ctime(&start));
}
} else if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("resilver in progress since %s"),
ctime(&start));
}
scanned = ps->pss_examined;
pass_scanned = ps->pss_pass_exam;
issued = ps->pss_issued;
pass_issued = ps->pss_pass_issued;
total = ps->pss_to_examine;
/* we are only done with a block once we have issued the IO for it */
fraction_done = (double)issued / total;
/* elapsed time for this pass, rounding up to 1 if it's 0 */
elapsed = time(NULL) - ps->pss_pass_start;
elapsed -= ps->pss_pass_scrub_spent_paused;
elapsed = (elapsed != 0) ? elapsed : 1;
scan_rate = pass_scanned / elapsed;
issue_rate = pass_issued / elapsed;
total_secs_left = (issue_rate != 0) ?
((total - issued) / issue_rate) : UINT64_MAX;
days_left = total_secs_left / 60 / 60 / 24;
hours_left = (total_secs_left / 60 / 60) % 24;
mins_left = (total_secs_left / 60) % 60;
secs_left = (total_secs_left % 60);
/* format all of the numbers we will be reporting */
zfs_nicebytes(scanned, scanned_buf, sizeof (scanned_buf));
zfs_nicebytes(issued, issued_buf, sizeof (issued_buf));
zfs_nicebytes(total, total_buf, sizeof (total_buf));
zfs_nicebytes(scan_rate, srate_buf, sizeof (srate_buf));
zfs_nicebytes(issue_rate, irate_buf, sizeof (irate_buf));
/* do not print estimated time if we have a paused scrub */
if (pause == 0) {
(void) printf(gettext("\t%s scanned at %s/s, "
"%s issued at %s/s, %s total\n"),
scanned_buf, srate_buf, issued_buf, irate_buf, total_buf);
} else {
(void) printf(gettext("\t%s scanned, %s issued, %s total\n"),
scanned_buf, issued_buf, total_buf);
}
if (ps->pss_func == POOL_SCAN_RESILVER) {
(void) printf(gettext("\t%s resilvered, %.2f%% done"),
processed_buf, 100 * fraction_done);
} else if (ps->pss_func == POOL_SCAN_SCRUB) {
(void) printf(gettext("\t%s repaired, %.2f%% done"),
processed_buf, 100 * fraction_done);
}
if (pause == 0) {
if (issue_rate >= 10 * 1024 * 1024) {
(void) printf(gettext(", %llu days "
"%02llu:%02llu:%02llu to go\n"),
(u_longlong_t)days_left, (u_longlong_t)hours_left,
(u_longlong_t)mins_left, (u_longlong_t)secs_left);
} else {
(void) printf(gettext(", no estimated "
"completion time\n"));
}
} else {
(void) printf(gettext("\n"));
}
}
+/*
+ * Print out detailed removal status.
+ */
+static void
+print_removal_status(zpool_handle_t *zhp, pool_removal_stat_t *prs)
+{
+ char copied_buf[7], examined_buf[7], total_buf[7], rate_buf[7];
+ time_t start, end;
+ nvlist_t *config, *nvroot;
+ nvlist_t **child;
+ uint_t children;
+ char *vdev_name;
+
+ if (prs == NULL || prs->prs_state == DSS_NONE)
+ return;
+
+ /*
+ * Determine name of vdev.
+ */
+ config = zpool_get_config(zhp, NULL);
+ nvroot = fnvlist_lookup_nvlist(config,
+ ZPOOL_CONFIG_VDEV_TREE);
+ verify(nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
+ &child, &children) == 0);
+ assert(prs->prs_removing_vdev < children);
+ vdev_name = zpool_vdev_name(g_zfs, zhp,
+ child[prs->prs_removing_vdev], B_TRUE);
+
+ (void) printf(gettext("remove: "));
+
+ start = prs->prs_start_time;
+ end = prs->prs_end_time;
+ zfs_nicenum(prs->prs_copied, copied_buf, sizeof (copied_buf));
+
+ /*
+ * Removal is finished or canceled.
+ */
+ if (prs->prs_state == DSS_FINISHED) {
+ uint64_t minutes_taken = (end - start) / 60;
+
+ (void) printf(gettext("Removal of vdev %llu copied %s "
+ "in %lluh%um, completed on %s"),
+ (longlong_t)prs->prs_removing_vdev,
+ copied_buf,
+ (u_longlong_t)(minutes_taken / 60),
+ (uint_t)(minutes_taken % 60),
+ ctime((time_t *)&end));
+ } else if (prs->prs_state == DSS_CANCELED) {
+ (void) printf(gettext("Removal of %s canceled on %s"),
+ vdev_name, ctime(&end));
+ } else {
+ uint64_t copied, total, elapsed, mins_left, hours_left;
+ double fraction_done;
+ uint_t rate;
+
+ assert(prs->prs_state == DSS_SCANNING);
+
+ /*
+ * Removal is in progress.
+ */
+ (void) printf(gettext(
+ "Evacuation of %s in progress since %s"),
+ vdev_name, ctime(&start));
+
+ copied = prs->prs_copied > 0 ? prs->prs_copied : 1;
+ total = prs->prs_to_copy;
+ fraction_done = (double)copied / total;
+
+ /* elapsed time for this pass */
+ elapsed = time(NULL) - prs->prs_start_time;
+ elapsed = elapsed > 0 ? elapsed : 1;
+ rate = copied / elapsed;
+ rate = rate > 0 ? rate : 1;
+ mins_left = ((total - copied) / rate) / 60;
+ hours_left = mins_left / 60;
+
+ zfs_nicenum(copied, examined_buf, sizeof (examined_buf));
+ zfs_nicenum(total, total_buf, sizeof (total_buf));
+ zfs_nicenum(rate, rate_buf, sizeof (rate_buf));
+
+ /*
+ * do not print estimated time if hours_left is more than
+ * 30 days
+ */
+ (void) printf(gettext(" %s copied out of %s at %s/s, "
+ "%.2f%% done"),
+ examined_buf, total_buf, rate_buf, 100 * fraction_done);
+ if (hours_left < (30 * 24)) {
+ (void) printf(gettext(", %lluh%um to go\n"),
+ (u_longlong_t)hours_left, (uint_t)(mins_left % 60));
+ } else {
+ (void) printf(gettext(
+ ", (copy is slow, no estimated time)\n"));
+ }
+ }
+
+ if (prs->prs_mapping_memory > 0) {
+ char mem_buf[7];
+ zfs_nicenum(prs->prs_mapping_memory, mem_buf, sizeof (mem_buf));
+ (void) printf(gettext(" %s memory used for "
+ "removed device mappings\n"),
+ mem_buf);
+ }
+}
+
static void
print_error_log(zpool_handle_t *zhp)
{
nvlist_t *nverrlist = NULL;
nvpair_t *elem;
char *pathname;
size_t len = MAXPATHLEN * 2;
if (zpool_get_errlog(zhp, &nverrlist) != 0)
return;
(void) printf("errors: Permanent errors have been "
"detected in the following files:\n\n");
pathname = safe_malloc(len);
elem = NULL;
while ((elem = nvlist_next_nvpair(nverrlist, elem)) != NULL) {
nvlist_t *nv;
uint64_t dsobj, obj;
verify(nvpair_value_nvlist(elem, &nv) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_DATASET,
&dsobj) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_ERR_OBJECT,
&obj) == 0);
zpool_obj_to_path(zhp, dsobj, obj, pathname, len);
(void) printf("%7s %s\n", "", pathname);
}
free(pathname);
nvlist_free(nverrlist);
}
static void
print_spares(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **spares,
uint_t nspares)
{
uint_t i;
char *name;
if (nspares == 0)
return;
(void) printf(gettext("\tspares\n"));
for (i = 0; i < nspares; i++) {
name = zpool_vdev_name(g_zfs, zhp, spares[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, spares[i], 2, B_TRUE);
free(name);
}
}
static void
print_l2cache(zpool_handle_t *zhp, status_cbdata_t *cb, nvlist_t **l2cache,
uint_t nl2cache)
{
uint_t i;
char *name;
if (nl2cache == 0)
return;
(void) printf(gettext("\tcache\n"));
for (i = 0; i < nl2cache; i++) {
name = zpool_vdev_name(g_zfs, zhp, l2cache[i],
cb->cb_name_flags);
print_status_config(zhp, cb, name, l2cache[i], 2, B_FALSE);
free(name);
}
}
static void
print_dedup_stats(nvlist_t *config)
{
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
uint_t c;
char dspace[6], mspace[6];
/*
* If the pool was faulted then we may not have been able to
* obtain the config. Otherwise, if we have anything in the dedup
* table continue processing the stats.
*/
if (nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t **)&ddo, &c) != 0)
return;
(void) printf("\n");
(void) printf(gettext(" dedup: "));
if (ddo->ddo_count == 0) {
(void) printf(gettext("no DDT entries\n"));
return;
}
zfs_nicebytes(ddo->ddo_dspace, dspace, sizeof (dspace));
zfs_nicebytes(ddo->ddo_mspace, mspace, sizeof (mspace));
(void) printf("DDT entries %llu, size %s on disk, %s in core\n",
(u_longlong_t)ddo->ddo_count,
dspace,
mspace);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_STATS,
(uint64_t **)&dds, &c) == 0);
verify(nvlist_lookup_uint64_array(config, ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t **)&ddh, &c) == 0);
zpool_dump_ddt(dds, ddh);
}
/*
* Display a summary of pool status. Displays a summary such as:
*
* pool: tank
* status: DEGRADED
* reason: One or more devices ...
* see: http://zfsonlinux.org/msg/ZFS-xxxx-01
* config:
* mirror DEGRADED
* c1t0d0 OK
* c2t0d0 UNAVAIL
*
* When given the '-v' option, we print out the complete config. If the '-e'
* option is specified, then we print out error rate information as well.
*/
int
status_callback(zpool_handle_t *zhp, void *data)
{
status_cbdata_t *cbp = data;
nvlist_t *config, *nvroot;
char *msgid;
zpool_status_t reason;
zpool_errata_t errata;
const char *health;
uint_t c;
vdev_stat_t *vs;
config = zpool_get_config(zhp, NULL);
reason = zpool_get_status(zhp, &msgid, &errata);
cbp->cb_count++;
/*
* If we were given 'zpool status -x', only report those pools with
* problems.
*/
if (cbp->cb_explain &&
(reason == ZPOOL_STATUS_OK ||
reason == ZPOOL_STATUS_VERSION_OLDER ||
reason == ZPOOL_STATUS_FEAT_DISABLED)) {
if (!cbp->cb_allpools) {
(void) printf(gettext("pool '%s' is healthy\n"),
zpool_get_name(zhp));
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
}
return (0);
}
if (cbp->cb_first)
cbp->cb_first = B_FALSE;
else
(void) printf("\n");
- verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
- &nvroot) == 0);
+ nvroot = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE);
verify(nvlist_lookup_uint64_array(nvroot, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &c) == 0);
health = zpool_state_to_name(vs->vs_state, vs->vs_aux);
(void) printf(gettext(" pool: %s\n"), zpool_get_name(zhp));
(void) printf(gettext(" state: %s\n"), health);
switch (reason) {
case ZPOOL_STATUS_MISSING_DEV_R:
(void) printf(gettext("status: One or more devices could not "
"be opened. Sufficient replicas exist for\n\tthe pool to "
"continue functioning in a degraded state.\n"));
(void) printf(gettext("action: Attach the missing device and "
"online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_MISSING_DEV_NR:
(void) printf(gettext("status: One or more devices could not "
"be opened. There are insufficient\n\treplicas for the "
"pool to continue functioning.\n"));
(void) printf(gettext("action: Attach the missing device and "
"online it using 'zpool online'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_R:
(void) printf(gettext("status: One or more devices could not "
"be used because the label is missing or\n\tinvalid. "
"Sufficient replicas exist for the pool to continue\n\t"
"functioning in a degraded state.\n"));
(void) printf(gettext("action: Replace the device using "
"'zpool replace'.\n"));
break;
case ZPOOL_STATUS_CORRUPT_LABEL_NR:
(void) printf(gettext("status: One or more devices could not "
"be used because the label is missing \n\tor invalid. "
"There are insufficient replicas for the pool to "
"continue\n\tfunctioning.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_FAILING_DEV:
(void) printf(gettext("status: One or more devices has "
"experienced an unrecoverable error. An\n\tattempt was "
"made to correct the error. Applications are "
"unaffected.\n"));
(void) printf(gettext("action: Determine if the device needs "
"to be replaced, and clear the errors\n\tusing "
"'zpool clear' or replace the device with 'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_OFFLINE_DEV:
(void) printf(gettext("status: One or more devices has "
"been taken offline by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
(void) printf(gettext("action: Online the device using "
"'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_REMOVED_DEV:
(void) printf(gettext("status: One or more devices has "
"been removed by the administrator.\n\tSufficient "
"replicas exist for the pool to continue functioning in "
"a\n\tdegraded state.\n"));
(void) printf(gettext("action: Online the device using "
"'zpool online' or replace the device with\n\t'zpool "
"replace'.\n"));
break;
case ZPOOL_STATUS_RESILVERING:
(void) printf(gettext("status: One or more devices is "
"currently being resilvered. The pool will\n\tcontinue "
"to function, possibly in a degraded state.\n"));
(void) printf(gettext("action: Wait for the resilver to "
"complete.\n"));
break;
case ZPOOL_STATUS_CORRUPT_DATA:
(void) printf(gettext("status: One or more devices has "
"experienced an error resulting in data\n\tcorruption. "
"Applications may be affected.\n"));
(void) printf(gettext("action: Restore the file in question "
"if possible. Otherwise restore the\n\tentire pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_CORRUPT_POOL:
(void) printf(gettext("status: The pool metadata is corrupted "
"and the pool cannot be opened.\n"));
zpool_explain_recover(zpool_get_handle(zhp),
zpool_get_name(zhp), reason, config);
break;
case ZPOOL_STATUS_VERSION_OLDER:
(void) printf(gettext("status: The pool is formatted using a "
"legacy on-disk format. The pool can\n\tstill be used, "
"but some features are unavailable.\n"));
(void) printf(gettext("action: Upgrade the pool using 'zpool "
"upgrade'. Once this is done, the\n\tpool will no longer "
"be accessible on software that does not support\n\t"
"feature flags.\n"));
break;
case ZPOOL_STATUS_VERSION_NEWER:
(void) printf(gettext("status: The pool has been upgraded to a "
"newer, incompatible on-disk version.\n\tThe pool cannot "
"be accessed on this system.\n"));
(void) printf(gettext("action: Access the pool from a system "
"running more recent software, or\n\trestore the pool from "
"backup.\n"));
break;
case ZPOOL_STATUS_FEAT_DISABLED:
(void) printf(gettext("status: Some supported features are not "
"enabled on the pool. The pool can\n\tstill be used, but "
"some features are unavailable.\n"));
(void) printf(gettext("action: Enable all features using "
"'zpool upgrade'. Once this is done,\n\tthe pool may no "
"longer be accessible by software that does not support\n\t"
"the features. See zpool-features(5) for details.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_READ:
(void) printf(gettext("status: The pool cannot be accessed on "
"this system because it uses the\n\tfollowing feature(s) "
"not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
(void) printf(gettext("action: Access the pool from a system "
"that supports the required feature(s),\n\tor restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_UNSUP_FEAT_WRITE:
(void) printf(gettext("status: The pool can only be accessed "
"in read-only mode on this system. It\n\tcannot be "
"accessed in read-write mode because it uses the "
"following\n\tfeature(s) not supported on this system:\n"));
zpool_print_unsup_feat(config);
(void) printf("\n");
(void) printf(gettext("action: The pool cannot be accessed in "
"read-write mode. Import the pool with\n"
"\t\"-o readonly=on\", access the pool from a system that "
"supports the\n\trequired feature(s), or restore the "
"pool from backup.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_R:
(void) printf(gettext("status: One or more devices are "
"faulted in response to persistent errors.\n\tSufficient "
"replicas exist for the pool to continue functioning "
"in a\n\tdegraded state.\n"));
(void) printf(gettext("action: Replace the faulted device, "
"or use 'zpool clear' to mark the device\n\trepaired.\n"));
break;
case ZPOOL_STATUS_FAULTED_DEV_NR:
(void) printf(gettext("status: One or more devices are "
"faulted in response to persistent errors. There are "
"insufficient replicas for the pool to\n\tcontinue "
"functioning.\n"));
(void) printf(gettext("action: Destroy and re-create the pool "
"from a backup source. Manually marking the device\n"
"\trepaired using 'zpool clear' may allow some data "
"to be recovered.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_MMP:
(void) printf(gettext("status: The pool is suspended because "
"multihost writes failed or were delayed;\n\tanother "
"system could import the pool undetected.\n"));
(void) printf(gettext("action: Make sure the pool's devices "
"are connected, then reboot your system and\n\timport the "
"pool.\n"));
break;
case ZPOOL_STATUS_IO_FAILURE_WAIT:
case ZPOOL_STATUS_IO_FAILURE_CONTINUE:
(void) printf(gettext("status: One or more devices are "
"faulted in response to IO failures.\n"));
(void) printf(gettext("action: Make sure the affected devices "
"are connected, then run 'zpool clear'.\n"));
break;
case ZPOOL_STATUS_BAD_LOG:
(void) printf(gettext("status: An intent log record "
"could not be read.\n"
"\tWaiting for administrator intervention to fix the "
"faulted pool.\n"));
(void) printf(gettext("action: Either restore the affected "
"device(s) and run 'zpool online',\n"
"\tor ignore the intent log records by running "
"'zpool clear'.\n"));
break;
case ZPOOL_STATUS_HOSTID_MISMATCH:
(void) printf(gettext("status: Mismatch between pool hostid "
"and system hostid on imported pool.\n\tThis pool was "
"previously imported into a system with a different "
"hostid,\n\tand then was verbatim imported into this "
"system.\n"));
(void) printf(gettext("action: Export this pool on all systems "
"on which it is imported.\n"
"\tThen import it to correct the mismatch.\n"));
break;
case ZPOOL_STATUS_ERRATA:
(void) printf(gettext("status: Errata #%d detected.\n"),
errata);
switch (errata) {
case ZPOOL_ERRATA_NONE:
break;
case ZPOOL_ERRATA_ZOL_2094_SCRUB:
(void) printf(gettext("action: To correct the issue "
"run 'zpool scrub'.\n"));
break;
case ZPOOL_ERRATA_ZOL_6845_ENCRYPTION:
(void) printf(gettext("\tExisting encrypted datasets "
"contain an on-disk incompatibility\n\twhich "
"needs to be corrected.\n"));
(void) printf(gettext("action: To correct the issue "
"backup existing encrypted datasets to new\n\t"
"encrypted datasets and destroy the old ones. "
"'zfs mount -o ro' can\n\tbe used to temporarily "
"mount existing encrypted datasets readonly.\n"));
break;
default:
/*
* All errata which allow the pool to be imported
* must contain an action message.
*/
assert(0);
}
break;
default:
/*
* The remaining errors can't actually be generated, yet.
*/
assert(reason == ZPOOL_STATUS_OK);
}
if (msgid != NULL)
(void) printf(gettext(" see: http://zfsonlinux.org/msg/%s\n"),
msgid);
if (config != NULL) {
uint64_t nerr;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
pool_scan_stat_t *ps = NULL;
+ pool_removal_stat_t *prs = NULL;
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &c);
print_scan_status(ps);
+ (void) nvlist_lookup_uint64_array(nvroot,
+ ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t **)&prs, &c);
+ print_removal_status(zhp, prs);
+
cbp->cb_namewidth = max_width(zhp, nvroot, 0, 0,
cbp->cb_name_flags | VDEV_NAME_TYPE_ID);
if (cbp->cb_namewidth < 10)
cbp->cb_namewidth = 10;
(void) printf(gettext("config:\n\n"));
(void) printf(gettext("\t%-*s %-8s %5s %5s %5s"),
cbp->cb_namewidth, "NAME", "STATE", "READ", "WRITE",
"CKSUM");
if (cbp->vcdl != NULL)
print_cmd_columns(cbp->vcdl, 0);
printf("\n");
print_status_config(zhp, cbp, zpool_get_name(zhp), nvroot, 0,
B_FALSE);
if (num_logs(nvroot) > 0)
print_logs(zhp, cbp, nvroot);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0)
print_l2cache(zhp, cbp, l2cache, nl2cache);
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0)
print_spares(zhp, cbp, spares, nspares);
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_ERRCOUNT,
&nerr) == 0) {
nvlist_t *nverrlist = NULL;
/*
* If the approximate error count is small, get a
* precise count by fetching the entire log and
* uniquifying the results.
*/
if (nerr > 0 && nerr < 100 && !cbp->cb_verbose &&
zpool_get_errlog(zhp, &nverrlist) == 0) {
nvpair_t *elem;
elem = NULL;
nerr = 0;
while ((elem = nvlist_next_nvpair(nverrlist,
elem)) != NULL) {
nerr++;
}
}
nvlist_free(nverrlist);
(void) printf("\n");
if (nerr == 0)
(void) printf(gettext("errors: No known data "
"errors\n"));
else if (!cbp->cb_verbose)
(void) printf(gettext("errors: %llu data "
"errors, use '-v' for a list\n"),
(u_longlong_t)nerr);
else
print_error_log(zhp);
}
if (cbp->cb_dedup_stats)
print_dedup_stats(config);
} else {
(void) printf(gettext("config: The configuration cannot be "
"determined.\n"));
}
return (0);
}
/*
* zpool status [-c [script1,script2,...]] [-gLPvx] [-T d|u] [pool] ...
* [interval [count]]
*
* -c CMD For each vdev, run command CMD
* -g Display guid for individual vdev name.
* -L Follow links when resolving vdev path name.
* -P Display full path for vdev name.
* -v Display complete error logs
* -x Display only pools with potential problems
* -D Display dedup status (undocumented)
* -T Display a timestamp in date(1) or Unix format
*
* Describes the health status of all pools or some subset.
*/
int
zpool_do_status(int argc, char **argv)
{
int c;
int ret;
float interval = 0;
unsigned long count = 0;
status_cbdata_t cb = { 0 };
char *cmd = NULL;
/* check options */
while ((c = getopt(argc, argv, "c:gLPvxDT:")) != -1) {
switch (c) {
case 'c':
if (cmd != NULL) {
fprintf(stderr,
gettext("Can't set -c flag twice\n"));
exit(1);
}
if (getenv("ZPOOL_SCRIPTS_ENABLED") != NULL &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_ENABLED")) {
fprintf(stderr, gettext(
"Can't run -c, disabled by "
"ZPOOL_SCRIPTS_ENABLED.\n"));
exit(1);
}
if ((getuid() <= 0 || geteuid() <= 0) &&
!libzfs_envvar_is_set("ZPOOL_SCRIPTS_AS_ROOT")) {
fprintf(stderr, gettext(
"Can't run -c with root privileges "
"unless ZPOOL_SCRIPTS_AS_ROOT is set.\n"));
exit(1);
}
cmd = optarg;
break;
case 'g':
cb.cb_name_flags |= VDEV_NAME_GUID;
break;
case 'L':
cb.cb_name_flags |= VDEV_NAME_FOLLOW_LINKS;
break;
case 'P':
cb.cb_name_flags |= VDEV_NAME_PATH;
break;
case 'v':
cb.cb_verbose = B_TRUE;
break;
case 'x':
cb.cb_explain = B_TRUE;
break;
case 'D':
cb.cb_dedup_stats = B_TRUE;
break;
case 'T':
get_timestamp_arg(*optarg);
break;
case '?':
if (optopt == 'c') {
print_zpool_script_list("status");
exit(0);
} else {
fprintf(stderr,
gettext("invalid option '%c'\n"), optopt);
}
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
get_interval_count(&argc, argv, &interval, &count);
if (argc == 0)
cb.cb_allpools = B_TRUE;
cb.cb_first = B_TRUE;
cb.cb_print_status = B_TRUE;
for (;;) {
if (timestamp_fmt != NODATE)
print_timestamp(timestamp_fmt);
if (cmd != NULL)
cb.vcdl = all_pools_for_each_vdev_run(argc, argv, cmd,
NULL, NULL, 0, 0);
ret = for_each_pool(argc, argv, B_TRUE, NULL,
status_callback, &cb);
if (cb.vcdl != NULL)
free_vdev_cmd_data_list(cb.vcdl);
if (argc == 0 && cb.cb_count == 0)
(void) fprintf(stderr, gettext("no pools available\n"));
else if (cb.cb_explain && cb.cb_first && cb.cb_allpools)
(void) printf(gettext("all pools are healthy\n"));
if (ret != 0)
return (ret);
if (interval == 0)
break;
if (count != 0 && --count == 0)
break;
(void) fsleep(interval);
}
return (0);
}
typedef struct upgrade_cbdata {
int cb_first;
int cb_argc;
uint64_t cb_version;
char **cb_argv;
} upgrade_cbdata_t;
static int
check_unsupp_fs(zfs_handle_t *zhp, void *unsupp_fs)
{
int zfs_version = (int)zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
int *count = (int *)unsupp_fs;
if (zfs_version > ZPL_VERSION) {
(void) printf(gettext("%s (v%d) is not supported by this "
"implementation of ZFS.\n"),
zfs_get_name(zhp), zfs_version);
(*count)++;
}
zfs_iter_filesystems(zhp, check_unsupp_fs, unsupp_fs);
zfs_close(zhp);
return (0);
}
static int
upgrade_version(zpool_handle_t *zhp, uint64_t version)
{
int ret;
nvlist_t *config;
uint64_t oldversion;
int unsupp_fs = 0;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&oldversion) == 0);
assert(SPA_VERSION_IS_SUPPORTED(oldversion));
assert(oldversion < version);
ret = zfs_iter_root(zpool_get_handle(zhp), check_unsupp_fs, &unsupp_fs);
if (ret != 0)
return (ret);
if (unsupp_fs) {
(void) fprintf(stderr, gettext("Upgrade not performed due "
"to %d unsupported filesystems (max v%d).\n"),
unsupp_fs, (int)ZPL_VERSION);
return (1);
}
ret = zpool_upgrade(zhp, version);
if (ret != 0)
return (ret);
if (version >= SPA_VERSION_FEATURES) {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to feature flags.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion);
} else {
(void) printf(gettext("Successfully upgraded "
"'%s' from version %llu to version %llu.\n"),
zpool_get_name(zhp), (u_longlong_t)oldversion,
(u_longlong_t)version);
}
return (0);
}
static int
upgrade_enable_all(zpool_handle_t *zhp, int *countp)
{
int i, ret, count;
boolean_t firstff = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
count = 0;
for (i = 0; i < SPA_FEATURES; i++) {
const char *fname = spa_feature_table[i].fi_uname;
const char *fguid = spa_feature_table[i].fi_guid;
if (!nvlist_exists(enabled, fguid)) {
char *propname;
verify(-1 != asprintf(&propname, "feature@%s", fname));
ret = zpool_set_prop(zhp, propname,
ZFS_FEATURE_ENABLED);
if (ret != 0) {
free(propname);
return (ret);
}
count++;
if (firstff) {
(void) printf(gettext("Enabled the "
"following features on '%s':\n"),
zpool_get_name(zhp));
firstff = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
free(propname);
}
}
if (countp != NULL)
*countp = count;
return (0);
}
static int
upgrade_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
boolean_t printnl = B_FALSE;
int ret;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < cbp->cb_version) {
cbp->cb_first = B_FALSE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
printnl = B_TRUE;
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count > 0) {
cbp->cb_first = B_FALSE;
printnl = B_TRUE;
}
}
if (printnl) {
(void) printf(gettext("\n"));
}
return (0);
}
static int
upgrade_list_older_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
assert(SPA_VERSION_IS_SUPPORTED(version));
if (version < SPA_VERSION_FEATURES) {
if (cbp->cb_first) {
(void) printf(gettext("The following pools are "
"formatted with legacy version numbers and can\n"
"be upgraded to use feature flags. After "
"being upgraded, these pools\nwill no "
"longer be accessible by software that does not "
"support feature\nflags.\n\n"));
(void) printf(gettext("VER POOL\n"));
(void) printf(gettext("--- ------------\n"));
cbp->cb_first = B_FALSE;
}
(void) printf("%2llu %s\n", (u_longlong_t)version,
zpool_get_name(zhp));
}
return (0);
}
static int
upgrade_list_disabled_cb(zpool_handle_t *zhp, void *arg)
{
upgrade_cbdata_t *cbp = arg;
nvlist_t *config;
uint64_t version;
config = zpool_get_config(zhp, NULL);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if (version >= SPA_VERSION_FEATURES) {
int i;
boolean_t poolfirst = B_TRUE;
nvlist_t *enabled = zpool_get_features(zhp);
for (i = 0; i < SPA_FEATURES; i++) {
const char *fguid = spa_feature_table[i].fi_guid;
const char *fname = spa_feature_table[i].fi_uname;
if (!nvlist_exists(enabled, fguid)) {
if (cbp->cb_first) {
(void) printf(gettext("\nSome "
"supported features are not "
"enabled on the following pools. "
"Once a\nfeature is enabled the "
"pool may become incompatible with "
"software\nthat does not support "
"the feature. See "
"zpool-features(5) for "
"details.\n\n"));
(void) printf(gettext("POOL "
"FEATURE\n"));
(void) printf(gettext("------"
"---------\n"));
cbp->cb_first = B_FALSE;
}
if (poolfirst) {
(void) printf(gettext("%s\n"),
zpool_get_name(zhp));
poolfirst = B_FALSE;
}
(void) printf(gettext(" %s\n"), fname);
}
/*
* If they did "zpool upgrade -a", then we could
* be doing ioctls to different pools. We need
* to log this history once to each pool, and bypass
* the normal history logging that happens in main().
*/
(void) zpool_log_history(g_zfs, history_str);
log_history = B_FALSE;
}
}
return (0);
}
/* ARGSUSED */
static int
upgrade_one(zpool_handle_t *zhp, void *data)
{
boolean_t printnl = B_FALSE;
upgrade_cbdata_t *cbp = data;
uint64_t cur_version;
int ret;
if (strcmp("log", zpool_get_name(zhp)) == 0) {
(void) fprintf(stderr, gettext("'log' is now a reserved word\n"
"Pool 'log' must be renamed using export and import"
" to upgrade.\n"));
return (1);
}
cur_version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (cur_version > cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using more current version '%llu'.\n\n"),
zpool_get_name(zhp), (u_longlong_t)cur_version);
return (0);
}
if (cbp->cb_version != SPA_VERSION && cur_version == cbp->cb_version) {
(void) printf(gettext("Pool '%s' is already formatted "
"using version %llu.\n\n"), zpool_get_name(zhp),
(u_longlong_t)cbp->cb_version);
return (0);
}
if (cur_version != cbp->cb_version) {
printnl = B_TRUE;
ret = upgrade_version(zhp, cbp->cb_version);
if (ret != 0)
return (ret);
}
if (cbp->cb_version >= SPA_VERSION_FEATURES) {
int count = 0;
ret = upgrade_enable_all(zhp, &count);
if (ret != 0)
return (ret);
if (count != 0) {
printnl = B_TRUE;
} else if (cur_version == SPA_VERSION) {
(void) printf(gettext("Pool '%s' already has all "
"supported features enabled.\n"),
zpool_get_name(zhp));
}
}
if (printnl) {
(void) printf(gettext("\n"));
}
return (0);
}
/*
* zpool upgrade
* zpool upgrade -v
* zpool upgrade [-V version] <-a | pool ...>
*
* With no arguments, display downrev'd ZFS pool available for upgrade.
* Individual pools can be upgraded by specifying the pool, and '-a' will
* upgrade all pools.
*/
int
zpool_do_upgrade(int argc, char **argv)
{
int c;
upgrade_cbdata_t cb = { 0 };
int ret = 0;
boolean_t showversions = B_FALSE;
boolean_t upgradeall = B_FALSE;
char *end;
/* check options */
while ((c = getopt(argc, argv, ":avV:")) != -1) {
switch (c) {
case 'a':
upgradeall = B_TRUE;
break;
case 'v':
showversions = B_TRUE;
break;
case 'V':
cb.cb_version = strtoll(optarg, &end, 10);
if (*end != '\0' ||
!SPA_VERSION_IS_SUPPORTED(cb.cb_version)) {
(void) fprintf(stderr,
gettext("invalid version '%s'\n"), optarg);
usage(B_FALSE);
}
break;
case ':':
(void) fprintf(stderr, gettext("missing argument for "
"'%c' option\n"), optopt);
usage(B_FALSE);
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
cb.cb_argc = argc;
cb.cb_argv = argv;
argc -= optind;
argv += optind;
if (cb.cb_version == 0) {
cb.cb_version = SPA_VERSION;
} else if (!upgradeall && argc == 0) {
(void) fprintf(stderr, gettext("-V option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
if (showversions) {
if (upgradeall || argc != 0) {
(void) fprintf(stderr, gettext("-v option is "
"incompatible with other arguments\n"));
usage(B_FALSE);
}
} else if (upgradeall) {
if (argc != 0) {
(void) fprintf(stderr, gettext("-a option should not "
"be used along with a pool name\n"));
usage(B_FALSE);
}
}
(void) printf(gettext("This system supports ZFS pool feature "
"flags.\n\n"));
if (showversions) {
int i;
(void) printf(gettext("The following features are "
"supported:\n\n"));
(void) printf(gettext("FEAT DESCRIPTION\n"));
(void) printf("----------------------------------------------"
"---------------\n");
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *fi = &spa_feature_table[i];
const char *ro =
(fi->fi_flags & ZFEATURE_FLAG_READONLY_COMPAT) ?
" (read-only compatible)" : "";
(void) printf("%-37s%s\n", fi->fi_uname, ro);
(void) printf(" %s\n", fi->fi_desc);
}
(void) printf("\n");
(void) printf(gettext("The following legacy versions are also "
"supported:\n\n"));
(void) printf(gettext("VER DESCRIPTION\n"));
(void) printf("--- -----------------------------------------"
"---------------\n");
(void) printf(gettext(" 1 Initial ZFS version\n"));
(void) printf(gettext(" 2 Ditto blocks "
"(replicated metadata)\n"));
(void) printf(gettext(" 3 Hot spares and double parity "
"RAID-Z\n"));
(void) printf(gettext(" 4 zpool history\n"));
(void) printf(gettext(" 5 Compression using the gzip "
"algorithm\n"));
(void) printf(gettext(" 6 bootfs pool property\n"));
(void) printf(gettext(" 7 Separate intent log devices\n"));
(void) printf(gettext(" 8 Delegated administration\n"));
(void) printf(gettext(" 9 refquota and refreservation "
"properties\n"));
(void) printf(gettext(" 10 Cache devices\n"));
(void) printf(gettext(" 11 Improved scrub performance\n"));
(void) printf(gettext(" 12 Snapshot properties\n"));
(void) printf(gettext(" 13 snapused property\n"));
(void) printf(gettext(" 14 passthrough-x aclinherit\n"));
(void) printf(gettext(" 15 user/group space accounting\n"));
(void) printf(gettext(" 16 stmf property support\n"));
(void) printf(gettext(" 17 Triple-parity RAID-Z\n"));
(void) printf(gettext(" 18 Snapshot user holds\n"));
(void) printf(gettext(" 19 Log device removal\n"));
(void) printf(gettext(" 20 Compression using zle "
"(zero-length encoding)\n"));
(void) printf(gettext(" 21 Deduplication\n"));
(void) printf(gettext(" 22 Received properties\n"));
(void) printf(gettext(" 23 Slim ZIL\n"));
(void) printf(gettext(" 24 System attributes\n"));
(void) printf(gettext(" 25 Improved scrub stats\n"));
(void) printf(gettext(" 26 Improved snapshot deletion "
"performance\n"));
(void) printf(gettext(" 27 Improved snapshot creation "
"performance\n"));
(void) printf(gettext(" 28 Multiple vdev replacements\n"));
(void) printf(gettext("\nFor more information on a particular "
"version, including supported releases,\n"));
(void) printf(gettext("see the ZFS Administration Guide.\n\n"));
} else if (argc == 0 && upgradeall) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_cb, &cb);
if (ret == 0 && cb.cb_first) {
if (cb.cb_version == SPA_VERSION) {
(void) printf(gettext("All pools are already "
"formatted using feature flags.\n\n"));
(void) printf(gettext("Every feature flags "
"pool already has all supported features "
"enabled.\n"));
} else {
(void) printf(gettext("All pools are already "
"formatted with version %llu or higher.\n"),
(u_longlong_t)cb.cb_version);
}
}
} else if (argc == 0) {
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_older_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("All pools are formatted "
"using feature flags.\n\n"));
} else {
(void) printf(gettext("\nUse 'zpool upgrade -v' "
"for a list of available legacy versions.\n"));
}
cb.cb_first = B_TRUE;
ret = zpool_iter(g_zfs, upgrade_list_disabled_cb, &cb);
assert(ret == 0);
if (cb.cb_first) {
(void) printf(gettext("Every feature flags pool has "
"all supported features enabled.\n"));
} else {
(void) printf(gettext("\n"));
}
} else {
ret = for_each_pool(argc, argv, B_FALSE, NULL,
upgrade_one, &cb);
}
return (ret);
}
typedef struct hist_cbdata {
boolean_t first;
boolean_t longfmt;
boolean_t internal;
} hist_cbdata_t;
/*
* Print out the command history for a specific pool.
*/
static int
get_history_one(zpool_handle_t *zhp, void *data)
{
nvlist_t *nvhis;
nvlist_t **records;
uint_t numrecords;
int ret, i;
hist_cbdata_t *cb = (hist_cbdata_t *)data;
cb->first = B_FALSE;
(void) printf(gettext("History for '%s':\n"), zpool_get_name(zhp));
if ((ret = zpool_get_history(zhp, &nvhis)) != 0)
return (ret);
verify(nvlist_lookup_nvlist_array(nvhis, ZPOOL_HIST_RECORD,
&records, &numrecords) == 0);
for (i = 0; i < numrecords; i++) {
nvlist_t *rec = records[i];
char tbuf[30] = "";
if (nvlist_exists(rec, ZPOOL_HIST_TIME)) {
time_t tsec;
struct tm t;
tsec = fnvlist_lookup_uint64(records[i],
ZPOOL_HIST_TIME);
(void) localtime_r(&tsec, &t);
(void) strftime(tbuf, sizeof (tbuf), "%F.%T", &t);
}
if (nvlist_exists(rec, ZPOOL_HIST_CMD)) {
(void) printf("%s %s", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_CMD));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_EVENT)) {
int ievent =
fnvlist_lookup_uint64(rec, ZPOOL_HIST_INT_EVENT);
if (!cb->internal)
continue;
if (ievent >= ZFS_NUM_LEGACY_HISTORY_EVENTS) {
(void) printf("%s unrecognized record:\n",
tbuf);
dump_nvlist(rec, 4);
continue;
}
(void) printf("%s [internal %s txg:%lld] %s", tbuf,
zfs_history_event_names[ievent],
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_INT_NAME)) {
if (!cb->internal)
continue;
(void) printf("%s [txg:%lld] %s", tbuf,
(longlong_t)fnvlist_lookup_uint64(
rec, ZPOOL_HIST_TXG),
fnvlist_lookup_string(rec, ZPOOL_HIST_INT_NAME));
if (nvlist_exists(rec, ZPOOL_HIST_DSNAME)) {
(void) printf(" %s (%llu)",
fnvlist_lookup_string(rec,
ZPOOL_HIST_DSNAME),
(u_longlong_t)fnvlist_lookup_uint64(rec,
ZPOOL_HIST_DSID));
}
(void) printf(" %s", fnvlist_lookup_string(rec,
ZPOOL_HIST_INT_STR));
} else if (nvlist_exists(rec, ZPOOL_HIST_IOCTL)) {
if (!cb->internal)
continue;
(void) printf("%s ioctl %s\n", tbuf,
fnvlist_lookup_string(rec, ZPOOL_HIST_IOCTL));
if (nvlist_exists(rec, ZPOOL_HIST_INPUT_NVL)) {
(void) printf(" input:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_INPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_OUTPUT_NVL)) {
(void) printf(" output:\n");
dump_nvlist(fnvlist_lookup_nvlist(rec,
ZPOOL_HIST_OUTPUT_NVL), 8);
}
if (nvlist_exists(rec, ZPOOL_HIST_ERRNO)) {
(void) printf(" errno: %lld\n",
(longlong_t)fnvlist_lookup_int64(rec,
ZPOOL_HIST_ERRNO));
}
} else {
if (!cb->internal)
continue;
(void) printf("%s unrecognized record:\n", tbuf);
dump_nvlist(rec, 4);
}
if (!cb->longfmt) {
(void) printf("\n");
continue;
}
(void) printf(" [");
if (nvlist_exists(rec, ZPOOL_HIST_WHO)) {
uid_t who = fnvlist_lookup_uint64(rec, ZPOOL_HIST_WHO);
struct passwd *pwd = getpwuid(who);
(void) printf("user %d ", (int)who);
if (pwd != NULL)
(void) printf("(%s) ", pwd->pw_name);
}
if (nvlist_exists(rec, ZPOOL_HIST_HOST)) {
(void) printf("on %s",
fnvlist_lookup_string(rec, ZPOOL_HIST_HOST));
}
if (nvlist_exists(rec, ZPOOL_HIST_ZONE)) {
(void) printf(":%s",
fnvlist_lookup_string(rec, ZPOOL_HIST_ZONE));
}
(void) printf("]");
(void) printf("\n");
}
(void) printf("\n");
nvlist_free(nvhis);
return (ret);
}
/*
* zpool history <pool>
*
* Displays the history of commands that modified pools.
*/
int
zpool_do_history(int argc, char **argv)
{
hist_cbdata_t cbdata = { 0 };
int ret;
int c;
cbdata.first = B_TRUE;
/* check options */
while ((c = getopt(argc, argv, "li")) != -1) {
switch (c) {
case 'l':
cbdata.longfmt = B_TRUE;
break;
case 'i':
cbdata.internal = B_TRUE;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
ret = for_each_pool(argc, argv, B_FALSE, NULL, get_history_one,
&cbdata);
if (argc == 0 && cbdata.first == B_TRUE) {
(void) fprintf(stderr, gettext("no pools available\n"));
return (0);
}
return (ret);
}
typedef struct ev_opts {
int verbose;
int scripted;
int follow;
int clear;
char poolname[ZFS_MAX_DATASET_NAME_LEN];
} ev_opts_t;
static void
zpool_do_events_short(nvlist_t *nvl, ev_opts_t *opts)
{
char ctime_str[26], str[32], *ptr;
int64_t *tv;
uint_t n;
verify(nvlist_lookup_int64_array(nvl, FM_EREPORT_TIME, &tv, &n) == 0);
memset(str, ' ', 32);
(void) ctime_r((const time_t *)&tv[0], ctime_str);
(void) strncpy(str, ctime_str+4, 6); /* 'Jun 30' */
(void) strncpy(str+7, ctime_str+20, 4); /* '1993' */
(void) strncpy(str+12, ctime_str+11, 8); /* '21:49:08' */
(void) sprintf(str+20, ".%09lld", (longlong_t)tv[1]); /* '.123456789' */
if (opts->scripted)
(void) printf(gettext("%s\t"), str);
else
(void) printf(gettext("%s "), str);
verify(nvlist_lookup_string(nvl, FM_CLASS, &ptr) == 0);
(void) printf(gettext("%s\n"), ptr);
}
static void
zpool_do_events_nvprint(nvlist_t *nvl, int depth)
{
nvpair_t *nvp;
for (nvp = nvlist_next_nvpair(nvl, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(nvl, nvp)) {
data_type_t type = nvpair_type(nvp);
const char *name = nvpair_name(nvp);
boolean_t b;
uint8_t i8;
uint16_t i16;
uint32_t i32;
uint64_t i64;
char *str;
nvlist_t *cnv;
printf(gettext("%*s%s = "), depth, "", name);
switch (type) {
case DATA_TYPE_BOOLEAN:
printf(gettext("%s"), "1");
break;
case DATA_TYPE_BOOLEAN_VALUE:
(void) nvpair_value_boolean_value(nvp, &b);
printf(gettext("%s"), b ? "1" : "0");
break;
case DATA_TYPE_BYTE:
(void) nvpair_value_byte(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT8:
(void) nvpair_value_int8(nvp, (void *)&i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_UINT8:
(void) nvpair_value_uint8(nvp, &i8);
printf(gettext("0x%x"), i8);
break;
case DATA_TYPE_INT16:
(void) nvpair_value_int16(nvp, (void *)&i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_UINT16:
(void) nvpair_value_uint16(nvp, &i16);
printf(gettext("0x%x"), i16);
break;
case DATA_TYPE_INT32:
(void) nvpair_value_int32(nvp, (void *)&i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_UINT32:
(void) nvpair_value_uint32(nvp, &i32);
printf(gettext("0x%x"), i32);
break;
case DATA_TYPE_INT64:
(void) nvpair_value_int64(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_UINT64:
(void) nvpair_value_uint64(nvp, &i64);
/*
* translate vdev state values to readable
* strings to aide zpool events consumers
*/
if (strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_STATE) == 0 ||
strcmp(name,
FM_EREPORT_PAYLOAD_ZFS_VDEV_LASTSTATE) == 0) {
printf(gettext("\"%s\" (0x%llx)"),
zpool_state_to_name(i64, VDEV_AUX_NONE),
(u_longlong_t)i64);
} else {
printf(gettext("0x%llx"), (u_longlong_t)i64);
}
break;
case DATA_TYPE_HRTIME:
(void) nvpair_value_hrtime(nvp, (void *)&i64);
printf(gettext("0x%llx"), (u_longlong_t)i64);
break;
case DATA_TYPE_STRING:
(void) nvpair_value_string(nvp, &str);
printf(gettext("\"%s\""), str ? str : "<NULL>");
break;
case DATA_TYPE_NVLIST:
printf(gettext("(embedded nvlist)\n"));
(void) nvpair_value_nvlist(nvp, &cnv);
zpool_do_events_nvprint(cnv, depth + 8);
printf(gettext("%*s(end %s)"), depth, "", name);
break;
case DATA_TYPE_NVLIST_ARRAY: {
nvlist_t **val;
uint_t i, nelem;
(void) nvpair_value_nvlist_array(nvp, &val, &nelem);
printf(gettext("(%d embedded nvlists)\n"), nelem);
for (i = 0; i < nelem; i++) {
printf(gettext("%*s%s[%d] = %s\n"),
depth, "", name, i, "(embedded nvlist)");
zpool_do_events_nvprint(val[i], depth + 8);
printf(gettext("%*s(end %s[%i])\n"),
depth, "", name, i);
}
printf(gettext("%*s(end %s)\n"), depth, "", name);
}
break;
case DATA_TYPE_INT8_ARRAY: {
int8_t *val;
uint_t i, nelem;
(void) nvpair_value_int8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT8_ARRAY: {
uint8_t *val;
uint_t i, nelem;
(void) nvpair_value_uint8_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT16_ARRAY: {
int16_t *val;
uint_t i, nelem;
(void) nvpair_value_int16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT16_ARRAY: {
uint16_t *val;
uint_t i, nelem;
(void) nvpair_value_uint16_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT32_ARRAY: {
int32_t *val;
uint_t i, nelem;
(void) nvpair_value_int32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_UINT32_ARRAY: {
uint32_t *val;
uint_t i, nelem;
(void) nvpair_value_uint32_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%x "), val[i]);
break;
}
case DATA_TYPE_INT64_ARRAY: {
int64_t *val;
uint_t i, nelem;
(void) nvpair_value_int64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_UINT64_ARRAY: {
uint64_t *val;
uint_t i, nelem;
(void) nvpair_value_uint64_array(nvp, &val, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("0x%llx "),
(u_longlong_t)val[i]);
break;
}
case DATA_TYPE_STRING_ARRAY: {
char **str;
uint_t i, nelem;
(void) nvpair_value_string_array(nvp, &str, &nelem);
for (i = 0; i < nelem; i++)
printf(gettext("\"%s\" "),
str[i] ? str[i] : "<NULL>");
break;
}
case DATA_TYPE_BOOLEAN_ARRAY:
case DATA_TYPE_BYTE_ARRAY:
case DATA_TYPE_DOUBLE:
case DATA_TYPE_UNKNOWN:
printf(gettext("<unknown>"));
break;
}
printf(gettext("\n"));
}
}
static int
zpool_do_events_next(ev_opts_t *opts)
{
nvlist_t *nvl;
int zevent_fd, ret, dropped;
char *pool;
zevent_fd = open(ZFS_DEV, O_RDWR);
VERIFY(zevent_fd >= 0);
if (!opts->scripted)
(void) printf(gettext("%-30s %s\n"), "TIME", "CLASS");
while (1) {
ret = zpool_events_next(g_zfs, &nvl, &dropped,
(opts->follow ? ZEVENT_NONE : ZEVENT_NONBLOCK), zevent_fd);
if (ret || nvl == NULL)
break;
if (dropped > 0)
(void) printf(gettext("dropped %d events\n"), dropped);
if (strlen(opts->poolname) > 0 &&
nvlist_lookup_string(nvl, FM_FMRI_ZFS_POOL, &pool) == 0 &&
strcmp(opts->poolname, pool) != 0)
continue;
zpool_do_events_short(nvl, opts);
if (opts->verbose) {
zpool_do_events_nvprint(nvl, 8);
printf(gettext("\n"));
}
(void) fflush(stdout);
nvlist_free(nvl);
}
VERIFY(0 == close(zevent_fd));
return (ret);
}
static int
zpool_do_events_clear(ev_opts_t *opts)
{
int count, ret;
ret = zpool_events_clear(g_zfs, &count);
if (!ret)
(void) printf(gettext("cleared %d events\n"), count);
return (ret);
}
/*
* zpool events [-vHf [pool] | -c]
*
* Displays events logs by ZFS.
*/
int
zpool_do_events(int argc, char **argv)
{
ev_opts_t opts = { 0 };
int ret;
int c;
/* check options */
while ((c = getopt(argc, argv, "vHfc")) != -1) {
switch (c) {
case 'v':
opts.verbose = 1;
break;
case 'H':
opts.scripted = 1;
break;
case 'f':
opts.follow = 1;
break;
case 'c':
opts.clear = 1;
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc > 1) {
(void) fprintf(stderr, gettext("too many arguments\n"));
usage(B_FALSE);
} else if (argc == 1) {
(void) strlcpy(opts.poolname, argv[0], sizeof (opts.poolname));
if (!zfs_name_valid(opts.poolname, ZFS_TYPE_POOL)) {
(void) fprintf(stderr,
gettext("invalid pool name '%s'\n"), opts.poolname);
usage(B_FALSE);
}
}
if ((argc == 1 || opts.verbose || opts.scripted || opts.follow) &&
opts.clear) {
(void) fprintf(stderr,
gettext("invalid options combined with -c\n"));
usage(B_FALSE);
}
if (opts.clear)
ret = zpool_do_events_clear(&opts);
else
ret = zpool_do_events_next(&opts);
return (ret);
}
static int
get_callback(zpool_handle_t *zhp, void *data)
{
zprop_get_cbdata_t *cbp = (zprop_get_cbdata_t *)data;
char value[MAXNAMELEN];
zprop_source_t srctype;
zprop_list_t *pl;
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* Skip the special fake placeholder. This will also skip
* over the name property when 'all' is specified.
*/
if (pl->pl_prop == ZPOOL_PROP_NAME &&
pl == cbp->cb_proplist)
continue;
if (pl->pl_prop == ZPROP_INVAL &&
(zpool_prop_feature(pl->pl_user_prop) ||
zpool_prop_unsupported(pl->pl_user_prop))) {
srctype = ZPROP_SRC_LOCAL;
if (zpool_prop_get_feature(zhp, pl->pl_user_prop,
value, sizeof (value)) == 0) {
zprop_print_one_property(zpool_get_name(zhp),
cbp, pl->pl_user_prop, value, srctype,
NULL, NULL);
}
} else {
if (zpool_get_prop(zhp, pl->pl_prop, value,
sizeof (value), &srctype, cbp->cb_literal) != 0)
continue;
zprop_print_one_property(zpool_get_name(zhp), cbp,
zpool_prop_to_name(pl->pl_prop), value, srctype,
NULL, NULL);
}
}
return (0);
}
/*
* zpool get [-Hp] [-o "all" | field[,...]] <"all" | property[,...]> <pool> ...
*
* -H Scripted mode. Don't display headers, and separate properties
* by a single tab.
* -o List of columns to display. Defaults to
* "name,property,value,source".
* -p Display values in parsable (exact) format.
*
* Get properties of pools in the system. Output space statistics
* for each one as well as other attributes.
*/
int
zpool_do_get(int argc, char **argv)
{
zprop_get_cbdata_t cb = { 0 };
zprop_list_t fake_name = { 0 };
int ret;
int c, i;
char *value;
cb.cb_first = B_TRUE;
/*
* Set up default columns and sources.
*/
cb.cb_sources = ZPROP_SRC_ALL;
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
cb.cb_type = ZFS_TYPE_POOL;
/* check options */
while ((c = getopt(argc, argv, ":Hpo:")) != -1) {
switch (c) {
case 'p':
cb.cb_literal = B_TRUE;
break;
case 'H':
cb.cb_scripted = B_TRUE;
break;
case 'o':
bzero(&cb.cb_columns, sizeof (cb.cb_columns));
i = 0;
while (*optarg != '\0') {
static char *col_subopts[] =
{ "name", "property", "value", "source",
"all", NULL };
if (i == ZFS_GET_NCOLS) {
(void) fprintf(stderr, gettext("too "
"many fields given to -o "
"option\n"));
usage(B_FALSE);
}
switch (getsubopt(&optarg, col_subopts,
&value)) {
case 0:
cb.cb_columns[i++] = GET_COL_NAME;
break;
case 1:
cb.cb_columns[i++] = GET_COL_PROPERTY;
break;
case 2:
cb.cb_columns[i++] = GET_COL_VALUE;
break;
case 3:
cb.cb_columns[i++] = GET_COL_SOURCE;
break;
case 4:
if (i > 0) {
(void) fprintf(stderr,
gettext("\"all\" conflicts "
"with specific fields "
"given to -o option\n"));
usage(B_FALSE);
}
cb.cb_columns[0] = GET_COL_NAME;
cb.cb_columns[1] = GET_COL_PROPERTY;
cb.cb_columns[2] = GET_COL_VALUE;
cb.cb_columns[3] = GET_COL_SOURCE;
i = ZFS_GET_NCOLS;
break;
default:
(void) fprintf(stderr,
gettext("invalid column name "
"'%s'\n"), value);
usage(B_FALSE);
}
}
break;
case '?':
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
optopt);
usage(B_FALSE);
}
}
argc -= optind;
argv += optind;
if (argc < 1) {
(void) fprintf(stderr, gettext("missing property "
"argument\n"));
usage(B_FALSE);
}
if (zprop_get_list(g_zfs, argv[0], &cb.cb_proplist,
ZFS_TYPE_POOL) != 0)
usage(B_FALSE);
argc--;
argv++;
if (cb.cb_proplist != NULL) {
fake_name.pl_prop = ZPOOL_PROP_NAME;
fake_name.pl_width = strlen(gettext("NAME"));
fake_name.pl_next = cb.cb_proplist;
cb.cb_proplist = &fake_name;
}
ret = for_each_pool(argc, argv, B_TRUE, &cb.cb_proplist,
get_callback, &cb);
if (cb.cb_proplist == &fake_name)
zprop_free_list(fake_name.pl_next);
else
zprop_free_list(cb.cb_proplist);
return (ret);
}
typedef struct set_cbdata {
char *cb_propname;
char *cb_value;
boolean_t cb_any_successful;
} set_cbdata_t;
int
set_callback(zpool_handle_t *zhp, void *data)
{
int error;
set_cbdata_t *cb = (set_cbdata_t *)data;
error = zpool_set_prop(zhp, cb->cb_propname, cb->cb_value);
if (!error)
cb->cb_any_successful = B_TRUE;
return (error);
}
int
zpool_do_set(int argc, char **argv)
{
set_cbdata_t cb = { 0 };
int error;
if (argc > 1 && argv[1][0] == '-') {
(void) fprintf(stderr, gettext("invalid option '%c'\n"),
argv[1][1]);
usage(B_FALSE);
}
if (argc < 2) {
(void) fprintf(stderr, gettext("missing property=value "
"argument\n"));
usage(B_FALSE);
}
if (argc < 3) {
(void) fprintf(stderr, gettext("missing pool name\n"));
usage(B_FALSE);
}
if (argc > 3) {
(void) fprintf(stderr, gettext("too many pool names\n"));
usage(B_FALSE);
}
cb.cb_propname = argv[1];
cb.cb_value = strchr(cb.cb_propname, '=');
if (cb.cb_value == NULL) {
(void) fprintf(stderr, gettext("missing value in "
"property=value argument\n"));
usage(B_FALSE);
}
*(cb.cb_value) = '\0';
cb.cb_value++;
error = for_each_pool(argc - 2, argv + 2, B_TRUE, NULL,
set_callback, &cb);
return (error);
}
static int
find_command_idx(char *command, int *idx)
{
int i;
for (i = 0; i < NCOMMAND; i++) {
if (command_table[i].name == NULL)
continue;
if (strcmp(command, command_table[i].name) == 0) {
*idx = i;
return (0);
}
}
return (1);
}
int
main(int argc, char **argv)
{
int ret = 0;
int i = 0;
char *cmdname;
(void) setlocale(LC_ALL, "");
(void) textdomain(TEXT_DOMAIN);
srand(time(NULL));
opterr = 0;
/*
* Make sure the user has specified some command.
*/
if (argc < 2) {
(void) fprintf(stderr, gettext("missing command\n"));
usage(B_FALSE);
}
cmdname = argv[1];
/*
* Special case '-?'
*/
if ((strcmp(cmdname, "-?") == 0) || strcmp(cmdname, "--help") == 0)
usage(B_TRUE);
if ((g_zfs = libzfs_init()) == NULL) {
(void) fprintf(stderr, "%s", libzfs_error_init(errno));
return (1);
}
libzfs_print_on_error(g_zfs, B_TRUE);
zfs_save_arguments(argc, argv, history_str, sizeof (history_str));
/*
* Run the appropriate command.
*/
if (find_command_idx(cmdname, &i) == 0) {
current_command = &command_table[i];
ret = command_table[i].func(argc - 1, argv + 1);
} else if (strchr(cmdname, '=')) {
verify(find_command_idx("set", &i) == 0);
current_command = &command_table[i];
ret = command_table[i].func(argc, argv);
} else if (strcmp(cmdname, "freeze") == 0 && argc == 3) {
/*
* 'freeze' is a vile debugging abomination, so we treat
* it as such.
*/
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, argv[2], sizeof (zc.zc_name));
ret = zfs_ioctl(g_zfs, ZFS_IOC_POOL_FREEZE, &zc);
if (ret != 0) {
(void) fprintf(stderr,
gettext("failed to freeze pool: %d\n"), errno);
ret = 1;
}
log_history = 0;
} else {
(void) fprintf(stderr, gettext("unrecognized "
"command '%s'\n"), cmdname);
usage(B_FALSE);
ret = 1;
}
if (ret == 0 && log_history)
(void) zpool_log_history(g_zfs, history_str);
libzfs_fini(g_zfs);
/*
* The 'ZFS_ABORT' environment variable causes us to dump core on exit
* for the purposes of running ::findleaks.
*/
if (getenv("ZFS_ABORT") != NULL) {
(void) printf("dumping core by request\n");
abort();
}
return (ret);
}
diff --git a/cmd/ztest/ztest.c b/cmd/ztest/ztest.c
index 6f085ea306a5..0e3459d324b4 100644
--- a/cmd/ztest/ztest.c
+++ b/cmd/ztest/ztest.c
@@ -1,7343 +1,7414 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
*/
/*
* The objective of this program is to provide a DMU/ZAP/SPA stress test
* that runs entirely in userland, is easy to use, and easy to extend.
*
* The overall design of the ztest program is as follows:
*
* (1) For each major functional area (e.g. adding vdevs to a pool,
* creating and destroying datasets, reading and writing objects, etc)
* we have a simple routine to test that functionality. These
* individual routines do not have to do anything "stressful".
*
* (2) We turn these simple functionality tests into a stress test by
* running them all in parallel, with as many threads as desired,
* and spread across as many datasets, objects, and vdevs as desired.
*
* (3) While all this is happening, we inject faults into the pool to
* verify that self-healing data really works.
*
* (4) Every time we open a dataset, we change its checksum and compression
* functions. Thus even individual objects vary from block to block
* in which checksum they use and whether they're compressed.
*
* (5) To verify that we never lose on-disk consistency after a crash,
* we run the entire test in a child of the main process.
* At random times, the child self-immolates with a SIGKILL.
* This is the software equivalent of pulling the power cord.
* The parent then runs the test again, using the existing
* storage pool, as many times as desired. If backwards compatibility
* testing is enabled ztest will sometimes run the "older" version
* of ztest after a SIGKILL.
*
* (6) To verify that we don't have future leaks or temporal incursions,
* many of the functional tests record the transaction group number
* as part of their data. When reading old data, they verify that
* the transaction group number is less than the current, open txg.
* If you add a new test, please do this if applicable.
*
* (7) Threads are created with a reduced stack size, for sanity checking.
* Therefore, it's important not to allocate huge buffers on the stack.
*
* When run with no arguments, ztest runs for about five minutes and
* produces no output if successful. To get a little bit of information,
* specify -V. To get more information, specify -VV, and so on.
*
* To turn this into an overnight stress test, use -T to specify run time.
*
* You can ask more more vdevs [-v], datasets [-d], or threads [-t]
* to increase the pool capacity, fanout, and overall stress level.
*
* Use the -k option to set the desired frequency of kills.
*
* When ztest invokes itself it passes all relevant information through a
* temporary file which is mmap-ed in the child process. This allows shared
* memory to survive the exec syscall. The ztest_shared_hdr_t struct is always
* stored at offset 0 of this file and contains information on the size and
* number of shared structures in the file. The information stored in this file
* must remain backwards compatible with older versions of ztest so that
* ztest can invoke them during backwards compatibility testing (-B).
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/dmu_objset.h>
#include <sys/poll.h>
#include <sys/stat.h>
#include <sys/time.h>
#include <sys/wait.h>
#include <sys/mman.h>
#include <sys/resource.h>
#include <sys/zio.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/zfs_rlock.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/spa_impl.h>
#include <sys/metaslab_impl.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_scan.h>
#include <sys/zio_checksum.h>
#include <sys/refcount.h>
#include <sys/zfeature.h>
#include <sys/dsl_userhold.h>
#include <sys/abd.h>
#include <stdio.h>
#include <stdio_ext.h>
#include <stdlib.h>
#include <unistd.h>
#include <signal.h>
#include <umem.h>
#include <ctype.h>
#include <math.h>
#include <sys/fs/zfs.h>
#include <zfs_fletcher.h>
#include <libnvpair.h>
#include <libzfs.h>
#ifdef __GLIBC__
#include <execinfo.h> /* for backtrace() */
#endif
static int ztest_fd_data = -1;
static int ztest_fd_rand = -1;
typedef struct ztest_shared_hdr {
uint64_t zh_hdr_size;
uint64_t zh_opts_size;
uint64_t zh_size;
uint64_t zh_stats_size;
uint64_t zh_stats_count;
uint64_t zh_ds_size;
uint64_t zh_ds_count;
} ztest_shared_hdr_t;
static ztest_shared_hdr_t *ztest_shared_hdr;
typedef struct ztest_shared_opts {
char zo_pool[ZFS_MAX_DATASET_NAME_LEN];
char zo_dir[ZFS_MAX_DATASET_NAME_LEN];
char zo_alt_ztest[MAXNAMELEN];
char zo_alt_libpath[MAXNAMELEN];
uint64_t zo_vdevs;
uint64_t zo_vdevtime;
size_t zo_vdev_size;
int zo_ashift;
int zo_mirrors;
int zo_raidz;
int zo_raidz_parity;
int zo_datasets;
int zo_threads;
uint64_t zo_passtime;
uint64_t zo_killrate;
int zo_verbose;
int zo_init;
uint64_t zo_time;
uint64_t zo_maxloops;
uint64_t zo_metaslab_gang_bang;
int zo_mmp_test;
} ztest_shared_opts_t;
static const ztest_shared_opts_t ztest_opts_defaults = {
.zo_pool = { 'z', 't', 'e', 's', 't', '\0' },
.zo_dir = { '/', 't', 'm', 'p', '\0' },
.zo_alt_ztest = { '\0' },
.zo_alt_libpath = { '\0' },
.zo_vdevs = 5,
.zo_ashift = SPA_MINBLOCKSHIFT,
.zo_mirrors = 2,
.zo_raidz = 4,
.zo_raidz_parity = 1,
.zo_vdev_size = SPA_MINDEVSIZE * 4, /* 256m default size */
.zo_datasets = 7,
.zo_threads = 23,
.zo_passtime = 60, /* 60 seconds */
.zo_killrate = 70, /* 70% kill rate */
.zo_verbose = 0,
.zo_mmp_test = 0,
.zo_init = 1,
.zo_time = 300, /* 5 minutes */
.zo_maxloops = 50, /* max loops during spa_freeze() */
.zo_metaslab_gang_bang = 32 << 10
};
extern uint64_t metaslab_gang_bang;
extern uint64_t metaslab_df_alloc_threshold;
extern unsigned long zfs_deadman_synctime_ms;
extern int metaslab_preload_limit;
extern boolean_t zfs_compressed_arc_enabled;
extern int zfs_abd_scatter_enabled;
extern int dmu_object_alloc_chunk_shift;
static ztest_shared_opts_t *ztest_shared_opts;
static ztest_shared_opts_t ztest_opts;
static char *ztest_wkeydata = "abcdefghijklmnopqrstuvwxyz012345";
typedef struct ztest_shared_ds {
uint64_t zd_seq;
} ztest_shared_ds_t;
static ztest_shared_ds_t *ztest_shared_ds;
#define ZTEST_GET_SHARED_DS(d) (&ztest_shared_ds[d])
#define BT_MAGIC 0x123456789abcdefULL
#define MAXFAULTS(zs) \
(MAX((zs)->zs_mirrors, 1) * (ztest_opts.zo_raidz_parity + 1) - 1)
enum ztest_io_type {
ZTEST_IO_WRITE_TAG,
ZTEST_IO_WRITE_PATTERN,
ZTEST_IO_WRITE_ZEROES,
ZTEST_IO_TRUNCATE,
ZTEST_IO_SETATTR,
ZTEST_IO_REWRITE,
ZTEST_IO_TYPES
};
typedef struct ztest_block_tag {
uint64_t bt_magic;
uint64_t bt_objset;
uint64_t bt_object;
uint64_t bt_dnodesize;
uint64_t bt_offset;
uint64_t bt_gen;
uint64_t bt_txg;
uint64_t bt_crtxg;
} ztest_block_tag_t;
typedef struct bufwad {
uint64_t bw_index;
uint64_t bw_txg;
uint64_t bw_data;
} bufwad_t;
typedef struct rll {
void *rll_writer;
int rll_readers;
kmutex_t rll_lock;
kcondvar_t rll_cv;
} rll_t;
typedef struct zll {
list_t z_list;
kmutex_t z_lock;
} zll_t;
#define ZTEST_RANGE_LOCKS 64
#define ZTEST_OBJECT_LOCKS 64
/*
* Object descriptor. Used as a template for object lookup/create/remove.
*/
typedef struct ztest_od {
uint64_t od_dir;
uint64_t od_object;
dmu_object_type_t od_type;
dmu_object_type_t od_crtype;
uint64_t od_blocksize;
uint64_t od_crblocksize;
uint64_t od_crdnodesize;
uint64_t od_gen;
uint64_t od_crgen;
char od_name[ZFS_MAX_DATASET_NAME_LEN];
} ztest_od_t;
/*
* Per-dataset state.
*/
typedef struct ztest_ds {
ztest_shared_ds_t *zd_shared;
objset_t *zd_os;
rwlock_t zd_zilog_lock;
zilog_t *zd_zilog;
ztest_od_t *zd_od; /* debugging aid */
char zd_name[ZFS_MAX_DATASET_NAME_LEN];
kmutex_t zd_dirobj_lock;
rll_t zd_object_lock[ZTEST_OBJECT_LOCKS];
zll_t zd_range_lock[ZTEST_RANGE_LOCKS];
} ztest_ds_t;
/*
* Per-iteration state.
*/
typedef void ztest_func_t(ztest_ds_t *zd, uint64_t id);
typedef struct ztest_info {
ztest_func_t *zi_func; /* test function */
uint64_t zi_iters; /* iterations per execution */
uint64_t *zi_interval; /* execute every <interval> seconds */
const char *zi_funcname; /* name of test function */
} ztest_info_t;
typedef struct ztest_shared_callstate {
uint64_t zc_count; /* per-pass count */
uint64_t zc_time; /* per-pass time */
uint64_t zc_next; /* next time to call this function */
} ztest_shared_callstate_t;
static ztest_shared_callstate_t *ztest_shared_callstate;
#define ZTEST_GET_SHARED_CALLSTATE(c) (&ztest_shared_callstate[c])
ztest_func_t ztest_dmu_read_write;
ztest_func_t ztest_dmu_write_parallel;
ztest_func_t ztest_dmu_object_alloc_free;
ztest_func_t ztest_dmu_object_next_chunk;
ztest_func_t ztest_dmu_commit_callbacks;
ztest_func_t ztest_zap;
ztest_func_t ztest_zap_parallel;
ztest_func_t ztest_zil_commit;
ztest_func_t ztest_zil_remount;
ztest_func_t ztest_dmu_read_write_zcopy;
ztest_func_t ztest_dmu_objset_create_destroy;
ztest_func_t ztest_dmu_prealloc;
ztest_func_t ztest_fzap;
ztest_func_t ztest_dmu_snapshot_create_destroy;
ztest_func_t ztest_dsl_prop_get_set;
ztest_func_t ztest_spa_prop_get_set;
ztest_func_t ztest_spa_create_destroy;
ztest_func_t ztest_fault_inject;
ztest_func_t ztest_ddt_repair;
ztest_func_t ztest_dmu_snapshot_hold;
ztest_func_t ztest_mmp_enable_disable;
ztest_func_t ztest_spa_rename;
ztest_func_t ztest_scrub;
ztest_func_t ztest_dsl_dataset_promote_busy;
ztest_func_t ztest_vdev_attach_detach;
ztest_func_t ztest_vdev_LUN_growth;
ztest_func_t ztest_vdev_add_remove;
ztest_func_t ztest_vdev_aux_add_remove;
ztest_func_t ztest_split_pool;
ztest_func_t ztest_reguid;
ztest_func_t ztest_spa_upgrade;
+ztest_func_t ztest_device_removal;
+ztest_func_t ztest_remap_blocks;
ztest_func_t ztest_fletcher;
ztest_func_t ztest_fletcher_incr;
ztest_func_t ztest_verify_dnode_bt;
uint64_t zopt_always = 0ULL * NANOSEC; /* all the time */
uint64_t zopt_incessant = 1ULL * NANOSEC / 10; /* every 1/10 second */
uint64_t zopt_often = 1ULL * NANOSEC; /* every second */
uint64_t zopt_sometimes = 10ULL * NANOSEC; /* every 10 seconds */
uint64_t zopt_rarely = 60ULL * NANOSEC; /* every 60 seconds */
#define ZTI_INIT(func, iters, interval) \
{ .zi_func = (func), \
.zi_iters = (iters), \
.zi_interval = (interval), \
.zi_funcname = # func }
ztest_info_t ztest_info[] = {
ZTI_INIT(ztest_dmu_read_write, 1, &zopt_always),
ZTI_INIT(ztest_dmu_write_parallel, 10, &zopt_always),
ZTI_INIT(ztest_dmu_object_alloc_free, 1, &zopt_always),
ZTI_INIT(ztest_dmu_object_next_chunk, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_commit_callbacks, 1, &zopt_always),
ZTI_INIT(ztest_zap, 30, &zopt_always),
ZTI_INIT(ztest_zap_parallel, 100, &zopt_always),
ZTI_INIT(ztest_split_pool, 1, &zopt_always),
ZTI_INIT(ztest_zil_commit, 1, &zopt_incessant),
ZTI_INIT(ztest_zil_remount, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_read_write_zcopy, 1, &zopt_often),
ZTI_INIT(ztest_dmu_objset_create_destroy, 1, &zopt_often),
ZTI_INIT(ztest_dsl_prop_get_set, 1, &zopt_often),
ZTI_INIT(ztest_spa_prop_get_set, 1, &zopt_sometimes),
#if 0
ZTI_INIT(ztest_dmu_prealloc, 1, &zopt_sometimes),
#endif
ZTI_INIT(ztest_fzap, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_spa_create_destroy, 1, &zopt_sometimes),
ZTI_INIT(ztest_fault_inject, 1, &zopt_sometimes),
ZTI_INIT(ztest_ddt_repair, 1, &zopt_sometimes),
ZTI_INIT(ztest_dmu_snapshot_hold, 1, &zopt_sometimes),
ZTI_INIT(ztest_mmp_enable_disable, 1, &zopt_sometimes),
ZTI_INIT(ztest_reguid, 1, &zopt_rarely),
ZTI_INIT(ztest_spa_rename, 1, &zopt_rarely),
ZTI_INIT(ztest_scrub, 1, &zopt_rarely),
ZTI_INIT(ztest_spa_upgrade, 1, &zopt_rarely),
ZTI_INIT(ztest_dsl_dataset_promote_busy, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_attach_detach, 1, &zopt_sometimes),
ZTI_INIT(ztest_vdev_LUN_growth, 1, &zopt_rarely),
ZTI_INIT(ztest_vdev_add_remove, 1, &ztest_opts.zo_vdevtime),
ZTI_INIT(ztest_vdev_aux_add_remove, 1, &ztest_opts.zo_vdevtime),
+ ZTI_INIT(ztest_device_removal, 1, &zopt_sometimes),
+ ZTI_INIT(ztest_remap_blocks, 1, &zopt_sometimes),
ZTI_INIT(ztest_fletcher, 1, &zopt_rarely),
ZTI_INIT(ztest_fletcher_incr, 1, &zopt_rarely),
ZTI_INIT(ztest_verify_dnode_bt, 1, &zopt_sometimes),
};
#define ZTEST_FUNCS (sizeof (ztest_info) / sizeof (ztest_info_t))
/*
* The following struct is used to hold a list of uncalled commit callbacks.
* The callbacks are ordered by txg number.
*/
typedef struct ztest_cb_list {
kmutex_t zcl_callbacks_lock;
list_t zcl_callbacks;
} ztest_cb_list_t;
/*
* Stuff we need to share writably between parent and child.
*/
typedef struct ztest_shared {
boolean_t zs_do_init;
hrtime_t zs_proc_start;
hrtime_t zs_proc_stop;
hrtime_t zs_thread_start;
hrtime_t zs_thread_stop;
hrtime_t zs_thread_kill;
uint64_t zs_enospc_count;
uint64_t zs_vdev_next_leaf;
uint64_t zs_vdev_aux;
uint64_t zs_alloc;
uint64_t zs_space;
uint64_t zs_splits;
uint64_t zs_mirrors;
uint64_t zs_metaslab_sz;
uint64_t zs_metaslab_df_alloc_threshold;
uint64_t zs_guid;
} ztest_shared_t;
#define ID_PARALLEL -1ULL
static char ztest_dev_template[] = "%s/%s.%llua";
static char ztest_aux_template[] = "%s/%s.%s.%llu";
ztest_shared_t *ztest_shared;
static spa_t *ztest_spa = NULL;
static ztest_ds_t *ztest_ds;
static kmutex_t ztest_vdev_lock;
/*
* The ztest_name_lock protects the pool and dataset namespace used by
* the individual tests. To modify the namespace, consumers must grab
* this lock as writer. Grabbing the lock as reader will ensure that the
* namespace does not change while the lock is held.
*/
static rwlock_t ztest_name_lock;
static boolean_t ztest_dump_core = B_TRUE;
static boolean_t ztest_dump_debug_buffer = B_FALSE;
static boolean_t ztest_exiting;
/* Global commit callback list */
static ztest_cb_list_t zcl;
/* Commit cb delay */
static uint64_t zc_min_txg_delay = UINT64_MAX;
static int zc_cb_counter = 0;
/*
* Minimum number of commit callbacks that need to be registered for us to check
* whether the minimum txg delay is acceptable.
*/
#define ZTEST_COMMIT_CB_MIN_REG 100
/*
* If a number of txgs equal to this threshold have been created after a commit
* callback has been registered but not called, then we assume there is an
* implementation bug.
*/
#define ZTEST_COMMIT_CB_THRESH (TXG_CONCURRENT_STATES + 1000)
extern uint64_t metaslab_gang_bang;
extern uint64_t metaslab_df_alloc_threshold;
enum ztest_object {
ZTEST_META_DNODE = 0,
ZTEST_DIROBJ,
ZTEST_OBJECTS
};
static void usage(boolean_t) __NORETURN;
/*
* These libumem hooks provide a reasonable set of defaults for the allocator's
* debugging facilities.
*/
const char *
_umem_debug_init(void)
{
return ("default,verbose"); /* $UMEM_DEBUG setting */
}
const char *
_umem_logging_init(void)
{
return ("fail,contents"); /* $UMEM_LOGGING setting */
}
static void
dump_debug_buffer(void)
{
if (!ztest_dump_debug_buffer)
return;
(void) printf("\n");
zfs_dbgmsg_print("ztest");
}
#define BACKTRACE_SZ 100
static void sig_handler(int signo)
{
struct sigaction action;
#ifdef __GLIBC__ /* backtrace() is a GNU extension */
int nptrs;
void *buffer[BACKTRACE_SZ];
nptrs = backtrace(buffer, BACKTRACE_SZ);
backtrace_symbols_fd(buffer, nptrs, STDERR_FILENO);
#endif
dump_debug_buffer();
/*
* Restore default action and re-raise signal so SIGSEGV and
* SIGABRT can trigger a core dump.
*/
action.sa_handler = SIG_DFL;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
(void) sigaction(signo, &action, NULL);
raise(signo);
}
#define FATAL_MSG_SZ 1024
char *fatal_msg;
static void
fatal(int do_perror, char *message, ...)
{
va_list args;
int save_errno = errno;
char *buf;
(void) fflush(stdout);
buf = umem_alloc(FATAL_MSG_SZ, UMEM_NOFAIL);
va_start(args, message);
(void) sprintf(buf, "ztest: ");
/* LINTED */
(void) vsprintf(buf + strlen(buf), message, args);
va_end(args);
if (do_perror) {
(void) snprintf(buf + strlen(buf), FATAL_MSG_SZ - strlen(buf),
": %s", strerror(save_errno));
}
(void) fprintf(stderr, "%s\n", buf);
fatal_msg = buf; /* to ease debugging */
dump_debug_buffer();
if (ztest_dump_core)
abort();
exit(3);
}
static int
str2shift(const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n",
buf);
usage(B_FALSE);
}
if (buf[1] == '\0' || (toupper(buf[1]) == 'B' && buf[2] == '\0')) {
return (10*i);
}
(void) fprintf(stderr, "ztest: invalid bytes suffix: %s\n", buf);
usage(B_FALSE);
/* NOTREACHED */
}
static uint64_t
nicenumtoull(const char *buf)
{
char *end;
uint64_t val;
val = strtoull(buf, &end, 0);
if (end == buf) {
(void) fprintf(stderr, "ztest: bad numeric value: %s\n", buf);
usage(B_FALSE);
} else if (end[0] == '.') {
double fval = strtod(buf, &end);
fval *= pow(2, str2shift(end));
if (fval > UINT64_MAX) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val = (uint64_t)fval;
} else {
int shift = str2shift(end);
if (shift >= 64 || (val << shift) >> shift != val) {
(void) fprintf(stderr, "ztest: value too large: %s\n",
buf);
usage(B_FALSE);
}
val <<= shift;
}
return (val);
}
static void
usage(boolean_t requested)
{
const ztest_shared_opts_t *zo = &ztest_opts_defaults;
char nice_vdev_size[NN_NUMBUF_SZ];
char nice_gang_bang[NN_NUMBUF_SZ];
FILE *fp = requested ? stdout : stderr;
nicenum(zo->zo_vdev_size, nice_vdev_size, sizeof (nice_vdev_size));
nicenum(zo->zo_metaslab_gang_bang, nice_gang_bang,
sizeof (nice_gang_bang));
(void) fprintf(fp, "Usage: %s\n"
"\t[-v vdevs (default: %llu)]\n"
"\t[-s size_of_each_vdev (default: %s)]\n"
"\t[-a alignment_shift (default: %d)] use 0 for random\n"
"\t[-m mirror_copies (default: %d)]\n"
"\t[-r raidz_disks (default: %d)]\n"
"\t[-R raidz_parity (default: %d)]\n"
"\t[-d datasets (default: %d)]\n"
"\t[-t threads (default: %d)]\n"
"\t[-g gang_block_threshold (default: %s)]\n"
"\t[-i init_count (default: %d)] initialize pool i times\n"
"\t[-k kill_percentage (default: %llu%%)]\n"
"\t[-p pool_name (default: %s)]\n"
"\t[-f dir (default: %s)] file directory for vdev files\n"
"\t[-M] Multi-host simulate pool imported on remote host\n"
"\t[-V] verbose (use multiple times for ever more blather)\n"
"\t[-E] use existing pool instead of creating new one\n"
"\t[-T time (default: %llu sec)] total run time\n"
"\t[-F freezeloops (default: %llu)] max loops in spa_freeze()\n"
"\t[-P passtime (default: %llu sec)] time per pass\n"
"\t[-B alt_ztest (default: <none>)] alternate ztest path\n"
"\t[-o variable=value] ... set global variable to an unsigned\n"
"\t 32-bit integer value\n"
"\t[-G dump zfs_dbgmsg buffer before exiting due to an error\n"
"\t[-h] (print help)\n"
"",
zo->zo_pool,
(u_longlong_t)zo->zo_vdevs, /* -v */
nice_vdev_size, /* -s */
zo->zo_ashift, /* -a */
zo->zo_mirrors, /* -m */
zo->zo_raidz, /* -r */
zo->zo_raidz_parity, /* -R */
zo->zo_datasets, /* -d */
zo->zo_threads, /* -t */
nice_gang_bang, /* -g */
zo->zo_init, /* -i */
(u_longlong_t)zo->zo_killrate, /* -k */
zo->zo_pool, /* -p */
zo->zo_dir, /* -f */
(u_longlong_t)zo->zo_time, /* -T */
(u_longlong_t)zo->zo_maxloops, /* -F */
(u_longlong_t)zo->zo_passtime);
exit(requested ? 0 : 1);
}
static void
process_options(int argc, char **argv)
{
char *path;
ztest_shared_opts_t *zo = &ztest_opts;
int opt;
uint64_t value;
char altdir[MAXNAMELEN] = { 0 };
bcopy(&ztest_opts_defaults, zo, sizeof (*zo));
while ((opt = getopt(argc, argv,
"v:s:a:m:r:R:d:t:g:i:k:p:f:MVET:P:hF:B:o:G")) != EOF) {
value = 0;
switch (opt) {
case 'v':
case 's':
case 'a':
case 'm':
case 'r':
case 'R':
case 'd':
case 't':
case 'g':
case 'i':
case 'k':
case 'T':
case 'P':
case 'F':
value = nicenumtoull(optarg);
}
switch (opt) {
case 'v':
zo->zo_vdevs = value;
break;
case 's':
zo->zo_vdev_size = MAX(SPA_MINDEVSIZE, value);
break;
case 'a':
zo->zo_ashift = value;
break;
case 'm':
zo->zo_mirrors = value;
break;
case 'r':
zo->zo_raidz = MAX(1, value);
break;
case 'R':
zo->zo_raidz_parity = MIN(MAX(value, 1), 3);
break;
case 'd':
zo->zo_datasets = MAX(1, value);
break;
case 't':
zo->zo_threads = MAX(1, value);
break;
case 'g':
zo->zo_metaslab_gang_bang = MAX(SPA_MINBLOCKSIZE << 1,
value);
break;
case 'i':
zo->zo_init = value;
break;
case 'k':
zo->zo_killrate = value;
break;
case 'p':
(void) strlcpy(zo->zo_pool, optarg,
sizeof (zo->zo_pool));
break;
case 'f':
path = realpath(optarg, NULL);
if (path == NULL) {
(void) fprintf(stderr, "error: %s: %s\n",
optarg, strerror(errno));
usage(B_FALSE);
} else {
(void) strlcpy(zo->zo_dir, path,
sizeof (zo->zo_dir));
free(path);
}
break;
case 'M':
zo->zo_mmp_test = 1;
break;
case 'V':
zo->zo_verbose++;
break;
case 'E':
zo->zo_init = 0;
break;
case 'T':
zo->zo_time = value;
break;
case 'P':
zo->zo_passtime = MAX(1, value);
break;
case 'F':
zo->zo_maxloops = MAX(1, value);
break;
case 'B':
(void) strlcpy(altdir, optarg, sizeof (altdir));
break;
case 'o':
if (set_global_var(optarg) != 0)
usage(B_FALSE);
break;
case 'G':
ztest_dump_debug_buffer = B_TRUE;
break;
case 'h':
usage(B_TRUE);
break;
case '?':
default:
usage(B_FALSE);
break;
}
}
zo->zo_raidz_parity = MIN(zo->zo_raidz_parity, zo->zo_raidz - 1);
zo->zo_vdevtime =
(zo->zo_vdevs > 0 ? zo->zo_time * NANOSEC / zo->zo_vdevs :
UINT64_MAX >> 2);
if (strlen(altdir) > 0) {
char *cmd;
char *realaltdir;
char *bin;
char *ztest;
char *isa;
int isalen;
cmd = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
realaltdir = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
VERIFY(NULL != realpath(getexecname(), cmd));
if (0 != access(altdir, F_OK)) {
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate ztest path: %s",
altdir);
}
VERIFY(NULL != realpath(altdir, realaltdir));
/*
* 'cmd' should be of the form "<anything>/usr/bin/<isa>/ztest".
* We want to extract <isa> to determine if we should use
* 32 or 64 bit binaries.
*/
bin = strstr(cmd, "/usr/bin/");
ztest = strstr(bin, "/ztest");
isa = bin + 9;
isalen = ztest - isa;
(void) snprintf(zo->zo_alt_ztest, sizeof (zo->zo_alt_ztest),
"%s/usr/bin/%.*s/ztest", realaltdir, isalen, isa);
(void) snprintf(zo->zo_alt_libpath, sizeof (zo->zo_alt_libpath),
"%s/usr/lib/%.*s", realaltdir, isalen, isa);
if (0 != access(zo->zo_alt_ztest, X_OK)) {
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate ztest: %s",
zo->zo_alt_ztest);
} else if (0 != access(zo->zo_alt_libpath, X_OK)) {
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "invalid alternate lib directory %s",
zo->zo_alt_libpath);
}
umem_free(cmd, MAXPATHLEN);
umem_free(realaltdir, MAXPATHLEN);
}
}
static void
ztest_kill(ztest_shared_t *zs)
{
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(ztest_spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(ztest_spa));
/*
* Before we kill off ztest, make sure that the config is updated.
- * See comment above spa_config_sync().
+ * See comment above spa_write_cachefile().
*/
mutex_enter(&spa_namespace_lock);
- spa_config_sync(ztest_spa, B_FALSE, B_FALSE);
+ spa_write_cachefile(ztest_spa, B_FALSE, B_FALSE);
mutex_exit(&spa_namespace_lock);
(void) kill(getpid(), SIGKILL);
}
static uint64_t
ztest_random(uint64_t range)
{
uint64_t r;
ASSERT3S(ztest_fd_rand, >=, 0);
if (range == 0)
return (0);
if (read(ztest_fd_rand, &r, sizeof (r)) != sizeof (r))
fatal(1, "short read from /dev/urandom");
return (r % range);
}
/* ARGSUSED */
static void
ztest_record_enospc(const char *s)
{
ztest_shared->zs_enospc_count++;
}
static uint64_t
ztest_get_ashift(void)
{
if (ztest_opts.zo_ashift == 0)
return (SPA_MINBLOCKSHIFT + ztest_random(5));
return (ztest_opts.zo_ashift);
}
static nvlist_t *
make_vdev_file(char *path, char *aux, char *pool, size_t size, uint64_t ashift)
{
char *pathbuf;
uint64_t vdev;
nvlist_t *file;
pathbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
if (ashift == 0)
ashift = ztest_get_ashift();
if (path == NULL) {
path = pathbuf;
if (aux != NULL) {
vdev = ztest_shared->zs_vdev_aux;
(void) snprintf(path, MAXPATHLEN,
ztest_aux_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool,
aux, vdev);
} else {
vdev = ztest_shared->zs_vdev_next_leaf++;
(void) snprintf(path, MAXPATHLEN,
ztest_dev_template, ztest_opts.zo_dir,
pool == NULL ? ztest_opts.zo_pool : pool, vdev);
}
}
if (size != 0) {
int fd = open(path, O_RDWR | O_CREAT | O_TRUNC, 0666);
if (fd == -1)
fatal(1, "can't open %s", path);
if (ftruncate(fd, size) != 0)
fatal(1, "can't ftruncate %s", path);
(void) close(fd);
}
VERIFY(nvlist_alloc(&file, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_TYPE, VDEV_TYPE_FILE) == 0);
VERIFY(nvlist_add_string(file, ZPOOL_CONFIG_PATH, path) == 0);
VERIFY(nvlist_add_uint64(file, ZPOOL_CONFIG_ASHIFT, ashift) == 0);
umem_free(pathbuf, MAXPATHLEN);
return (file);
}
static nvlist_t *
make_vdev_raidz(char *path, char *aux, char *pool, size_t size,
uint64_t ashift, int r)
{
nvlist_t *raidz, **child;
int c;
if (r < 2)
return (make_vdev_file(path, aux, pool, size, ashift));
child = umem_alloc(r * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < r; c++)
child[c] = make_vdev_file(path, aux, pool, size, ashift);
VERIFY(nvlist_alloc(&raidz, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_string(raidz, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_RAIDZ) == 0);
VERIFY(nvlist_add_uint64(raidz, ZPOOL_CONFIG_NPARITY,
ztest_opts.zo_raidz_parity) == 0);
VERIFY(nvlist_add_nvlist_array(raidz, ZPOOL_CONFIG_CHILDREN,
child, r) == 0);
for (c = 0; c < r; c++)
nvlist_free(child[c]);
umem_free(child, r * sizeof (nvlist_t *));
return (raidz);
}
static nvlist_t *
make_vdev_mirror(char *path, char *aux, char *pool, size_t size,
uint64_t ashift, int r, int m)
{
nvlist_t *mirror, **child;
int c;
if (m < 1)
return (make_vdev_raidz(path, aux, pool, size, ashift, r));
child = umem_alloc(m * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < m; c++)
child[c] = make_vdev_raidz(path, aux, pool, size, ashift, r);
VERIFY(nvlist_alloc(&mirror, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_string(mirror, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_MIRROR) == 0);
VERIFY(nvlist_add_nvlist_array(mirror, ZPOOL_CONFIG_CHILDREN,
child, m) == 0);
for (c = 0; c < m; c++)
nvlist_free(child[c]);
umem_free(child, m * sizeof (nvlist_t *));
return (mirror);
}
static nvlist_t *
make_vdev_root(char *path, char *aux, char *pool, size_t size, uint64_t ashift,
int log, int r, int m, int t)
{
nvlist_t *root, **child;
int c;
ASSERT(t > 0);
child = umem_alloc(t * sizeof (nvlist_t *), UMEM_NOFAIL);
for (c = 0; c < t; c++) {
child[c] = make_vdev_mirror(path, aux, pool, size, ashift,
r, m);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
log) == 0);
}
VERIFY(nvlist_alloc(&root, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_string(root, ZPOOL_CONFIG_TYPE, VDEV_TYPE_ROOT) == 0);
VERIFY(nvlist_add_nvlist_array(root, aux ? aux : ZPOOL_CONFIG_CHILDREN,
child, t) == 0);
for (c = 0; c < t; c++)
nvlist_free(child[c]);
umem_free(child, t * sizeof (nvlist_t *));
return (root);
}
/*
* Find a random spa version. Returns back a random spa version in the
* range [initial_version, SPA_VERSION_FEATURES].
*/
static uint64_t
ztest_random_spa_version(uint64_t initial_version)
{
uint64_t version = initial_version;
if (version <= SPA_VERSION_BEFORE_FEATURES) {
version = version +
ztest_random(SPA_VERSION_BEFORE_FEATURES - version + 1);
}
if (version > SPA_VERSION_BEFORE_FEATURES)
version = SPA_VERSION_FEATURES;
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
return (version);
}
static int
ztest_random_blocksize(void)
{
/*
* Choose a block size >= the ashift.
* If the SPA supports new MAXBLOCKSIZE, test up to 1MB blocks.
*/
int maxbs = SPA_OLD_MAXBLOCKSHIFT;
if (spa_maxblocksize(ztest_spa) == SPA_MAXBLOCKSIZE)
maxbs = 20;
uint64_t block_shift =
ztest_random(maxbs - ztest_spa->spa_max_ashift + 1);
return (1 << (SPA_MINBLOCKSHIFT + block_shift));
}
static int
ztest_random_dnodesize(void)
{
int slots;
int max_slots = spa_maxdnodesize(ztest_spa) >> DNODE_SHIFT;
if (max_slots == DNODE_MIN_SLOTS)
return (DNODE_MIN_SIZE);
/*
* Weight the random distribution more heavily toward smaller
* dnode sizes since that is more likely to reflect real-world
* usage.
*/
ASSERT3U(max_slots, >, 4);
switch (ztest_random(10)) {
case 0:
slots = 5 + ztest_random(max_slots - 4);
break;
case 1 ... 4:
slots = 2 + ztest_random(3);
break;
default:
slots = 1;
break;
}
return (slots << DNODE_SHIFT);
}
static int
ztest_random_ibshift(void)
{
return (DN_MIN_INDBLKSHIFT +
ztest_random(DN_MAX_INDBLKSHIFT - DN_MIN_INDBLKSHIFT + 1));
}
static uint64_t
ztest_random_vdev_top(spa_t *spa, boolean_t log_ok)
{
uint64_t top;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *tvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
do {
top = ztest_random(rvd->vdev_children);
tvd = rvd->vdev_child[top];
- } while (tvd->vdev_ishole || (tvd->vdev_islog && !log_ok) ||
+ } while (!vdev_is_concrete(tvd) || (tvd->vdev_islog && !log_ok) ||
tvd->vdev_mg == NULL || tvd->vdev_mg->mg_class == NULL);
return (top);
}
static uint64_t
ztest_random_dsl_prop(zfs_prop_t prop)
{
uint64_t value;
do {
value = zfs_prop_random_value(prop, ztest_random(-1ULL));
} while (prop == ZFS_PROP_CHECKSUM && value == ZIO_CHECKSUM_OFF);
return (value);
}
static int
ztest_dsl_prop_set_uint64(char *osname, zfs_prop_t prop, uint64_t value,
boolean_t inherit)
{
const char *propname = zfs_prop_to_name(prop);
const char *valname;
char *setpoint;
uint64_t curval;
int error;
error = dsl_prop_set_int(osname, propname,
(inherit ? ZPROP_SRC_NONE : ZPROP_SRC_LOCAL), value);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
setpoint = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
VERIFY0(dsl_prop_get_integer(osname, propname, &curval, setpoint));
if (ztest_opts.zo_verbose >= 6) {
int err;
err = zfs_prop_index_to_string(prop, curval, &valname);
if (err)
(void) printf("%s %s = %llu at '%s'\n", osname,
propname, (unsigned long long)curval, setpoint);
else
(void) printf("%s %s = %s at '%s'\n",
osname, propname, valname, setpoint);
}
umem_free(setpoint, MAXPATHLEN);
return (error);
}
static int
ztest_spa_prop_set_uint64(zpool_prop_t prop, uint64_t value)
{
spa_t *spa = ztest_spa;
nvlist_t *props = NULL;
int error;
VERIFY(nvlist_alloc(&props, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_uint64(props, zpool_prop_to_name(prop), value) == 0);
error = spa_prop_set(spa, props);
nvlist_free(props);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (error);
}
ASSERT0(error);
return (error);
}
static int
ztest_dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
int err;
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
if (decrypt && err == EACCES) {
char ddname[ZFS_MAX_DATASET_NAME_LEN];
dsl_crypto_params_t *dcp;
nvlist_t *crypto_args = fnvlist_alloc();
char *cp = NULL;
/* spa_keystore_load_wkey() expects a dsl dir name */
strcpy(ddname, name);
cp = strchr(ddname, '@');
if (cp != NULL)
*cp = '\0';
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
crypto_args, &dcp));
err = spa_keystore_load_wkey(ddname, dcp, B_FALSE);
dsl_crypto_params_free(dcp, B_FALSE);
fnvlist_free(crypto_args);
if (err != 0)
return (err);
err = dmu_objset_own(name, type, readonly, decrypt, tag, osp);
}
return (err);
}
/*
* Object and range lock mechanics
*/
typedef struct {
list_node_t z_lnode;
refcount_t z_refcnt;
uint64_t z_object;
zfs_rlock_t z_range_lock;
} ztest_znode_t;
typedef struct {
rl_t *z_rl;
ztest_znode_t *z_ztznode;
} ztest_zrl_t;
static ztest_znode_t *
ztest_znode_init(uint64_t object)
{
ztest_znode_t *zp = umem_alloc(sizeof (*zp), UMEM_NOFAIL);
list_link_init(&zp->z_lnode);
refcount_create(&zp->z_refcnt);
zp->z_object = object;
zfs_rlock_init(&zp->z_range_lock);
return (zp);
}
static void
ztest_znode_fini(ztest_znode_t *zp)
{
ASSERT(refcount_is_zero(&zp->z_refcnt));
zfs_rlock_destroy(&zp->z_range_lock);
zp->z_object = 0;
refcount_destroy(&zp->z_refcnt);
list_link_init(&zp->z_lnode);
umem_free(zp, sizeof (*zp));
}
static void
ztest_zll_init(zll_t *zll)
{
mutex_init(&zll->z_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zll->z_list, sizeof (ztest_znode_t),
offsetof(ztest_znode_t, z_lnode));
}
static void
ztest_zll_destroy(zll_t *zll)
{
list_destroy(&zll->z_list);
mutex_destroy(&zll->z_lock);
}
#define RL_TAG "range_lock"
static ztest_znode_t *
ztest_znode_get(ztest_ds_t *zd, uint64_t object)
{
zll_t *zll = &zd->zd_range_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_znode_t *zp = NULL;
mutex_enter(&zll->z_lock);
for (zp = list_head(&zll->z_list); (zp);
zp = list_next(&zll->z_list, zp)) {
if (zp->z_object == object) {
refcount_add(&zp->z_refcnt, RL_TAG);
break;
}
}
if (zp == NULL) {
zp = ztest_znode_init(object);
refcount_add(&zp->z_refcnt, RL_TAG);
list_insert_head(&zll->z_list, zp);
}
mutex_exit(&zll->z_lock);
return (zp);
}
static void
ztest_znode_put(ztest_ds_t *zd, ztest_znode_t *zp)
{
zll_t *zll = NULL;
ASSERT3U(zp->z_object, !=, 0);
zll = &zd->zd_range_lock[zp->z_object & (ZTEST_OBJECT_LOCKS - 1)];
mutex_enter(&zll->z_lock);
refcount_remove(&zp->z_refcnt, RL_TAG);
if (refcount_is_zero(&zp->z_refcnt)) {
list_remove(&zll->z_list, zp);
ztest_znode_fini(zp);
}
mutex_exit(&zll->z_lock);
}
static void
ztest_rll_init(rll_t *rll)
{
rll->rll_writer = NULL;
rll->rll_readers = 0;
mutex_init(&rll->rll_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&rll->rll_cv, NULL, CV_DEFAULT, NULL);
}
static void
ztest_rll_destroy(rll_t *rll)
{
ASSERT(rll->rll_writer == NULL);
ASSERT(rll->rll_readers == 0);
mutex_destroy(&rll->rll_lock);
cv_destroy(&rll->rll_cv);
}
static void
ztest_rll_lock(rll_t *rll, rl_type_t type)
{
mutex_enter(&rll->rll_lock);
if (type == RL_READER) {
while (rll->rll_writer != NULL)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_readers++;
} else {
while (rll->rll_writer != NULL || rll->rll_readers)
(void) cv_wait(&rll->rll_cv, &rll->rll_lock);
rll->rll_writer = curthread;
}
mutex_exit(&rll->rll_lock);
}
static void
ztest_rll_unlock(rll_t *rll)
{
mutex_enter(&rll->rll_lock);
if (rll->rll_writer) {
ASSERT(rll->rll_readers == 0);
rll->rll_writer = NULL;
} else {
ASSERT(rll->rll_readers != 0);
ASSERT(rll->rll_writer == NULL);
rll->rll_readers--;
}
if (rll->rll_writer == NULL && rll->rll_readers == 0)
cv_broadcast(&rll->rll_cv);
mutex_exit(&rll->rll_lock);
}
static void
ztest_object_lock(ztest_ds_t *zd, uint64_t object, rl_type_t type)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_lock(rll, type);
}
static void
ztest_object_unlock(ztest_ds_t *zd, uint64_t object)
{
rll_t *rll = &zd->zd_object_lock[object & (ZTEST_OBJECT_LOCKS - 1)];
ztest_rll_unlock(rll);
}
static ztest_zrl_t *
ztest_zrl_init(rl_t *rl, ztest_znode_t *zp)
{
ztest_zrl_t *zrl = umem_alloc(sizeof (*zrl), UMEM_NOFAIL);
zrl->z_rl = rl;
zrl->z_ztznode = zp;
return (zrl);
}
static void
ztest_zrl_fini(ztest_zrl_t *zrl)
{
umem_free(zrl, sizeof (*zrl));
}
static ztest_zrl_t *
ztest_range_lock(ztest_ds_t *zd, uint64_t object, uint64_t offset,
uint64_t size, rl_type_t type)
{
ztest_znode_t *zp = ztest_znode_get(zd, object);
rl_t *rl = zfs_range_lock(&zp->z_range_lock, offset,
size, type);
return (ztest_zrl_init(rl, zp));
}
static void
ztest_range_unlock(ztest_ds_t *zd, ztest_zrl_t *zrl)
{
zfs_range_unlock(zrl->z_rl);
ztest_znode_put(zd, zrl->z_ztznode);
ztest_zrl_fini(zrl);
}
static void
ztest_zd_init(ztest_ds_t *zd, ztest_shared_ds_t *szd, objset_t *os)
{
zd->zd_os = os;
zd->zd_zilog = dmu_objset_zil(os);
zd->zd_shared = szd;
dmu_objset_name(os, zd->zd_name);
int l;
if (zd->zd_shared != NULL)
zd->zd_shared->zd_seq = 0;
VERIFY(rwlock_init(&zd->zd_zilog_lock, USYNC_THREAD, NULL) == 0);
mutex_init(&zd->zd_dirobj_lock, NULL, MUTEX_DEFAULT, NULL);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_init(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_zll_init(&zd->zd_range_lock[l]);
}
static void
ztest_zd_fini(ztest_ds_t *zd)
{
int l;
mutex_destroy(&zd->zd_dirobj_lock);
(void) rwlock_destroy(&zd->zd_zilog_lock);
for (l = 0; l < ZTEST_OBJECT_LOCKS; l++)
ztest_rll_destroy(&zd->zd_object_lock[l]);
for (l = 0; l < ZTEST_RANGE_LOCKS; l++)
ztest_zll_destroy(&zd->zd_range_lock[l]);
}
#define TXG_MIGHTWAIT (ztest_random(10) == 0 ? TXG_NOWAIT : TXG_WAIT)
static uint64_t
ztest_tx_assign(dmu_tx_t *tx, uint64_t txg_how, const char *tag)
{
uint64_t txg;
int error;
/*
* Attempt to assign tx to some transaction group.
*/
error = dmu_tx_assign(tx, txg_how);
if (error) {
if (error == ERESTART) {
ASSERT(txg_how == TXG_NOWAIT);
dmu_tx_wait(tx);
} else {
ASSERT3U(error, ==, ENOSPC);
ztest_record_enospc(tag);
}
dmu_tx_abort(tx);
return (0);
}
txg = dmu_tx_get_txg(tx);
ASSERT(txg != 0);
return (txg);
}
static void
ztest_pattern_set(void *buf, uint64_t size, uint64_t value)
{
uint64_t *ip = buf;
uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
while (ip < ip_end)
*ip++ = value;
}
#ifndef NDEBUG
static boolean_t
ztest_pattern_match(void *buf, uint64_t size, uint64_t value)
{
uint64_t *ip = buf;
uint64_t *ip_end = (uint64_t *)((uintptr_t)buf + (uintptr_t)size);
uint64_t diff = 0;
while (ip < ip_end)
diff |= (value - *ip++);
return (diff == 0);
}
#endif
static void
ztest_bt_generate(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
bt->bt_magic = BT_MAGIC;
bt->bt_objset = dmu_objset_id(os);
bt->bt_object = object;
bt->bt_dnodesize = dnodesize;
bt->bt_offset = offset;
bt->bt_gen = gen;
bt->bt_txg = txg;
bt->bt_crtxg = crtxg;
}
static void
ztest_bt_verify(ztest_block_tag_t *bt, objset_t *os, uint64_t object,
uint64_t dnodesize, uint64_t offset, uint64_t gen, uint64_t txg,
uint64_t crtxg)
{
ASSERT3U(bt->bt_magic, ==, BT_MAGIC);
ASSERT3U(bt->bt_objset, ==, dmu_objset_id(os));
ASSERT3U(bt->bt_object, ==, object);
ASSERT3U(bt->bt_dnodesize, ==, dnodesize);
ASSERT3U(bt->bt_offset, ==, offset);
ASSERT3U(bt->bt_gen, <=, gen);
ASSERT3U(bt->bt_txg, <=, txg);
ASSERT3U(bt->bt_crtxg, ==, crtxg);
}
static ztest_block_tag_t *
ztest_bt_bonus(dmu_buf_t *db)
{
dmu_object_info_t doi;
ztest_block_tag_t *bt;
dmu_object_info_from_db(db, &doi);
ASSERT3U(doi.doi_bonus_size, <=, db->db_size);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (*bt));
bt = (void *)((char *)db->db_data + doi.doi_bonus_size - sizeof (*bt));
return (bt);
}
/*
* Generate a token to fill up unused bonus buffer space. Try to make
* it unique to the object, generation, and offset to verify that data
* is not getting overwritten by data from other dnodes.
*/
#define ZTEST_BONUS_FILL_TOKEN(obj, ds, gen, offset) \
(((ds) << 48) | ((gen) << 32) | ((obj) << 8) | (offset))
/*
* Fill up the unused bonus buffer region before the block tag with a
* verifiable pattern. Filling the whole bonus area with non-zero data
* helps ensure that all dnode traversal code properly skips the
* interior regions of large dnodes.
*/
void
ztest_fill_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
ASSERT(IS_P2ALIGNED((char *)end - (char *)db->db_data, 8));
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
*bonusp = token;
}
}
/*
* Verify that the unused area of a bonus buffer is filled with the
* expected tokens.
*/
void
ztest_verify_unused_bonus(dmu_buf_t *db, void *end, uint64_t obj,
objset_t *os, uint64_t gen)
{
uint64_t *bonusp;
for (bonusp = db->db_data; bonusp < (uint64_t *)end; bonusp++) {
uint64_t token = ZTEST_BONUS_FILL_TOKEN(obj, dmu_objset_id(os),
gen, bonusp - (uint64_t *)db->db_data);
VERIFY3U(*bonusp, ==, token);
}
}
/*
* ZIL logging ops
*/
#define lrz_type lr_mode
#define lrz_blocksize lr_uid
#define lrz_ibshift lr_gid
#define lrz_bonustype lr_rdev
#define lrz_dnodesize lr_crtime[1]
static void
ztest_log_create(ztest_ds_t *zd, dmu_tx_t *tx, lr_create_t *lr)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_CREATE, sizeof (*lr) + namesize);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_remove(ztest_ds_t *zd, dmu_tx_t *tx, lr_remove_t *lr, uint64_t object)
{
char *name = (void *)(lr + 1); /* name follows lr */
size_t namesize = strlen(name) + 1;
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_REMOVE, sizeof (*lr) + namesize);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) + namesize - sizeof (lr_t));
itx->itx_oid = object;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_write(ztest_ds_t *zd, dmu_tx_t *tx, lr_write_t *lr)
{
itx_t *itx;
itx_wr_state_t write_state = ztest_random(WR_NUM_STATES);
if (zil_replaying(zd->zd_zilog, tx))
return;
if (lr->lr_length > ZIL_MAX_LOG_DATA)
write_state = WR_INDIRECT;
itx = zil_itx_create(TX_WRITE,
sizeof (*lr) + (write_state == WR_COPIED ? lr->lr_length : 0));
if (write_state == WR_COPIED &&
dmu_read(zd->zd_os, lr->lr_foid, lr->lr_offset, lr->lr_length,
((lr_write_t *)&itx->itx_lr) + 1, DMU_READ_NO_PREFETCH) != 0) {
zil_itx_destroy(itx);
itx = zil_itx_create(TX_WRITE, sizeof (*lr));
write_state = WR_NEED_COPY;
}
itx->itx_private = zd;
itx->itx_wr_state = write_state;
itx->itx_sync = (ztest_random(8) == 0);
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) - sizeof (lr_t));
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_truncate(ztest_ds_t *zd, dmu_tx_t *tx, lr_truncate_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_TRUNCATE, sizeof (*lr));
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
static void
ztest_log_setattr(ztest_ds_t *zd, dmu_tx_t *tx, lr_setattr_t *lr)
{
itx_t *itx;
if (zil_replaying(zd->zd_zilog, tx))
return;
itx = zil_itx_create(TX_SETATTR, sizeof (*lr));
bcopy(&lr->lr_common + 1, &itx->itx_lr + 1,
sizeof (*lr) - sizeof (lr_t));
itx->itx_sync = B_FALSE;
zil_itx_assign(zd->zd_zilog, itx, tx);
}
/*
* ZIL replay ops
*/
static int
ztest_replay_create(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_create_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
ztest_block_tag_t *bbt;
dmu_buf_t *db;
dmu_tx_t *tx;
uint64_t txg;
int error = 0;
int bonuslen;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT(lr->lr_doid == ZTEST_DIROBJ);
ASSERT(name[0] != '\0');
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_TRUE, name);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
} else {
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
}
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0)
return (ENOSPC);
ASSERT(dmu_objset_zil(os)->zl_replay == !!lr->lr_foid);
bonuslen = DN_BONUS_SIZE(lr->lrz_dnodesize);
if (lr->lrz_type == DMU_OT_ZAP_OTHER) {
if (lr->lr_foid == 0) {
lr->lr_foid = zap_create_dnsize(os,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = zap_create_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
} else {
if (lr->lr_foid == 0) {
lr->lr_foid = dmu_object_alloc_dnsize(os,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
} else {
error = dmu_object_claim_dnsize(os, lr->lr_foid,
lr->lrz_type, 0, lr->lrz_bonustype,
bonuslen, lr->lrz_dnodesize, tx);
}
}
if (error) {
ASSERT3U(error, ==, EEXIST);
ASSERT(zd->zd_zilog->zl_replay);
dmu_tx_commit(tx);
return (error);
}
ASSERT(lr->lr_foid != 0);
if (lr->lrz_type != DMU_OT_ZAP_OTHER)
VERIFY3U(0, ==, dmu_object_set_blocksize(os, lr->lr_foid,
lr->lrz_blocksize, lr->lrz_ibshift, tx));
VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
bbt = ztest_bt_bonus(db);
dmu_buf_will_dirty(db, tx);
ztest_bt_generate(bbt, os, lr->lr_foid, lr->lrz_dnodesize, -1ULL,
lr->lr_gen, txg, txg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, lr->lr_gen);
dmu_buf_rele(db, FTAG);
VERIFY3U(0, ==, zap_add(os, lr->lr_doid, name, sizeof (uint64_t), 1,
&lr->lr_foid, tx));
(void) ztest_log_create(zd, tx, lr);
dmu_tx_commit(tx);
return (0);
}
static int
ztest_replay_remove(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_remove_t *lr = arg2;
char *name = (void *)(lr + 1); /* name follows lr */
objset_t *os = zd->zd_os;
dmu_object_info_t doi;
dmu_tx_t *tx;
uint64_t object, txg;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ASSERT(lr->lr_doid == ZTEST_DIROBJ);
ASSERT(name[0] != '\0');
VERIFY3U(0, ==,
zap_lookup(os, lr->lr_doid, name, sizeof (object), 1, &object));
ASSERT(object != 0);
ztest_object_lock(zd, object, RL_WRITER);
VERIFY3U(0, ==, dmu_object_info(os, object, &doi));
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, lr->lr_doid, B_FALSE, name);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_object_unlock(zd, object);
return (ENOSPC);
}
if (doi.doi_type == DMU_OT_ZAP_OTHER) {
VERIFY3U(0, ==, zap_destroy(os, object, tx));
} else {
VERIFY3U(0, ==, dmu_object_free(os, object, tx));
}
VERIFY3U(0, ==, zap_remove(os, lr->lr_doid, name, tx));
(void) ztest_log_remove(zd, tx, lr, object);
dmu_tx_commit(tx);
ztest_object_unlock(zd, object);
return (0);
}
static int
ztest_replay_write(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_write_t *lr = arg2;
objset_t *os = zd->zd_os;
void *data = lr + 1; /* data follows lr */
uint64_t offset, length;
ztest_block_tag_t *bt = data;
ztest_block_tag_t *bbt;
uint64_t gen, txg, lrtxg, crtxg;
dmu_object_info_t doi;
dmu_tx_t *tx;
dmu_buf_t *db;
arc_buf_t *abuf = NULL;
ztest_zrl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
offset = lr->lr_offset;
length = lr->lr_length;
/* If it's a dmu_sync() block, write the whole block */
if (lr->lr_common.lrc_reclen == sizeof (lr_write_t)) {
uint64_t blocksize = BP_GET_LSIZE(&lr->lr_blkptr);
if (length < blocksize) {
offset -= offset % blocksize;
length = blocksize;
}
}
if (bt->bt_magic == BSWAP_64(BT_MAGIC))
byteswap_uint64_array(bt, sizeof (*bt));
if (bt->bt_magic != BT_MAGIC)
bt = NULL;
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, offset, length, RL_WRITER);
VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
gen = bbt->bt_gen;
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, lr->lr_foid, offset, length);
if (ztest_random(8) == 0 && length == doi.doi_data_block_size &&
P2PHASE(offset, length) == 0)
abuf = dmu_request_arcbuf(db, length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
dmu_buf_rele(db, FTAG);
ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
if (bt != NULL) {
/*
* Usually, verify the old data before writing new data --
* but not always, because we also want to verify correct
* behavior when the data was not recently read into cache.
*/
ASSERT(offset % doi.doi_data_block_size == 0);
if (ztest_random(4) != 0) {
int prefetch = ztest_random(2) ?
DMU_READ_PREFETCH : DMU_READ_NO_PREFETCH;
ztest_block_tag_t rbt;
VERIFY(dmu_read(os, lr->lr_foid, offset,
sizeof (rbt), &rbt, prefetch) == 0);
if (rbt.bt_magic == BT_MAGIC) {
ztest_bt_verify(&rbt, os, lr->lr_foid, 0,
offset, gen, txg, crtxg);
}
}
/*
* Writes can appear to be newer than the bonus buffer because
* the ztest_get_data() callback does a dmu_read() of the
* open-context data, which may be different than the data
* as it was when the write was generated.
*/
if (zd->zd_zilog->zl_replay) {
ztest_bt_verify(bt, os, lr->lr_foid, 0, offset,
MAX(gen, bt->bt_gen), MAX(txg, lrtxg),
bt->bt_crtxg);
}
/*
* Set the bt's gen/txg to the bonus buffer's gen/txg
* so that all of the usual ASSERTs will work.
*/
ztest_bt_generate(bt, os, lr->lr_foid, 0, offset, gen, txg,
crtxg);
}
if (abuf == NULL) {
dmu_write(os, lr->lr_foid, offset, length, data, tx);
} else {
bcopy(data, abuf->b_data, length);
dmu_assign_arcbuf_by_dbuf(db, offset, abuf, tx);
}
(void) ztest_log_write(zd, tx, lr);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_truncate(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_truncate_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
ztest_zrl_t *rl;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_READER);
rl = ztest_range_lock(zd, lr->lr_foid, lr->lr_offset, lr->lr_length,
RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, lr->lr_foid, lr->lr_offset, lr->lr_length);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
VERIFY(dmu_free_range(os, lr->lr_foid, lr->lr_offset,
lr->lr_length, tx) == 0);
(void) ztest_log_truncate(zd, tx, lr);
dmu_tx_commit(tx);
ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
static int
ztest_replay_setattr(void *arg1, void *arg2, boolean_t byteswap)
{
ztest_ds_t *zd = arg1;
lr_setattr_t *lr = arg2;
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
dmu_buf_t *db;
ztest_block_tag_t *bbt;
uint64_t txg, lrtxg, crtxg, dnodesize;
if (byteswap)
byteswap_uint64_array(lr, sizeof (*lr));
ztest_object_lock(zd, lr->lr_foid, RL_WRITER);
VERIFY3U(0, ==, dmu_bonus_hold(os, lr->lr_foid, FTAG, &db));
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, lr->lr_foid);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, lr->lr_foid);
return (ENOSPC);
}
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
crtxg = bbt->bt_crtxg;
lrtxg = lr->lr_common.lrc_txg;
dnodesize = bbt->bt_dnodesize;
if (zd->zd_zilog->zl_replay) {
ASSERT(lr->lr_size != 0);
ASSERT(lr->lr_mode != 0);
ASSERT(lrtxg != 0);
} else {
/*
* Randomly change the size and increment the generation.
*/
lr->lr_size = (ztest_random(db->db_size / sizeof (*bbt)) + 1) *
sizeof (*bbt);
lr->lr_mode = bbt->bt_gen + 1;
ASSERT(lrtxg == 0);
}
/*
* Verify that the current bonus buffer is not newer than our txg.
*/
ztest_bt_verify(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
MAX(txg, lrtxg), crtxg);
dmu_buf_will_dirty(db, tx);
ASSERT3U(lr->lr_size, >=, sizeof (*bbt));
ASSERT3U(lr->lr_size, <=, db->db_size);
VERIFY0(dmu_set_bonus(db, lr->lr_size, tx));
bbt = ztest_bt_bonus(db);
ztest_bt_generate(bbt, os, lr->lr_foid, dnodesize, -1ULL, lr->lr_mode,
txg, crtxg);
ztest_fill_unused_bonus(db, bbt, lr->lr_foid, os, bbt->bt_gen);
dmu_buf_rele(db, FTAG);
(void) ztest_log_setattr(zd, tx, lr);
dmu_tx_commit(tx);
ztest_object_unlock(zd, lr->lr_foid);
return (0);
}
zil_replay_func_t *ztest_replay_vector[TX_MAX_TYPE] = {
NULL, /* 0 no such transaction type */
ztest_replay_create, /* TX_CREATE */
NULL, /* TX_MKDIR */
NULL, /* TX_MKXATTR */
NULL, /* TX_SYMLINK */
ztest_replay_remove, /* TX_REMOVE */
NULL, /* TX_RMDIR */
NULL, /* TX_LINK */
NULL, /* TX_RENAME */
ztest_replay_write, /* TX_WRITE */
ztest_replay_truncate, /* TX_TRUNCATE */
ztest_replay_setattr, /* TX_SETATTR */
NULL, /* TX_ACL */
NULL, /* TX_CREATE_ACL */
NULL, /* TX_CREATE_ATTR */
NULL, /* TX_CREATE_ACL_ATTR */
NULL, /* TX_MKDIR_ACL */
NULL, /* TX_MKDIR_ATTR */
NULL, /* TX_MKDIR_ACL_ATTR */
NULL, /* TX_WRITE2 */
};
/*
* ZIL get_data callbacks
*/
typedef struct ztest_zgd_private {
ztest_ds_t *z_zd;
ztest_zrl_t *z_rl;
uint64_t z_object;
} ztest_zgd_private_t;
static void
ztest_get_done(zgd_t *zgd, int error)
{
ztest_zgd_private_t *zzp = zgd->zgd_private;
ztest_ds_t *zd = zzp->z_zd;
uint64_t object = zzp->z_object;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
ztest_range_unlock(zd, zzp->z_rl);
ztest_object_unlock(zd, object);
if (error == 0 && zgd->zgd_bp)
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
umem_free(zgd, sizeof (*zgd));
umem_free(zzp, sizeof (*zzp));
}
static int
ztest_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb,
zio_t *zio)
{
ztest_ds_t *zd = arg;
objset_t *os = zd->zd_os;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
uint64_t txg = lr->lr_common.lrc_txg;
uint64_t crtxg;
dmu_object_info_t doi;
dmu_buf_t *db;
zgd_t *zgd;
int error;
ztest_zgd_private_t *zgd_private;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
ztest_object_lock(zd, object, RL_READER);
error = dmu_bonus_hold(os, object, FTAG, &db);
if (error) {
ztest_object_unlock(zd, object);
return (error);
}
crtxg = ztest_bt_bonus(db)->bt_crtxg;
if (crtxg == 0 || crtxg > txg) {
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, object);
return (ENOENT);
}
dmu_object_info_from_db(db, &doi);
dmu_buf_rele(db, FTAG);
db = NULL;
zgd = umem_zalloc(sizeof (*zgd), UMEM_NOFAIL);
zgd->zgd_lwb = lwb;
zgd_private = umem_zalloc(sizeof (ztest_zgd_private_t), UMEM_NOFAIL);
zgd_private->z_zd = zd;
zgd_private->z_object = object;
zgd->zgd_private = zgd_private;
if (buf != NULL) { /* immediate write */
zgd_private->z_rl = ztest_range_lock(zd, object, offset, size,
RL_READER);
zgd->zgd_rl = zgd_private->z_rl->z_rl;
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
ASSERT(error == 0);
} else {
size = doi.doi_data_block_size;
if (ISP2(size)) {
offset = P2ALIGN(offset, size);
} else {
ASSERT(offset < size);
offset = 0;
}
zgd_private->z_rl = ztest_range_lock(zd, object, offset, size,
RL_READER);
zgd->zgd_rl = zgd_private->z_rl->z_rl;
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
ztest_get_done, zgd);
if (error == 0)
return (0);
}
}
ztest_get_done(zgd, error);
return (error);
}
static void *
ztest_lr_alloc(size_t lrsize, char *name)
{
char *lr;
size_t namesize = name ? strlen(name) + 1 : 0;
lr = umem_zalloc(lrsize + namesize, UMEM_NOFAIL);
if (name)
bcopy(name, lr + lrsize, namesize);
return (lr);
}
void
ztest_lr_free(void *lr, size_t lrsize, char *name)
{
size_t namesize = name ? strlen(name) + 1 : 0;
umem_free(lr, lrsize + namesize);
}
/*
* Lookup a bunch of objects. Returns the number of objects not found.
*/
static int
ztest_lookup(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
od->od_object = 0;
error = zap_lookup(zd->zd_os, od->od_dir, od->od_name,
sizeof (uint64_t), 1, &od->od_object);
if (error) {
ASSERT(error == ENOENT);
ASSERT(od->od_object == 0);
missing++;
} else {
dmu_buf_t *db;
ztest_block_tag_t *bbt;
dmu_object_info_t doi;
ASSERT(od->od_object != 0);
ASSERT(missing == 0); /* there should be no gaps */
ztest_object_lock(zd, od->od_object, RL_READER);
VERIFY3U(0, ==, dmu_bonus_hold(zd->zd_os,
od->od_object, FTAG, &db));
dmu_object_info_from_db(db, &doi);
bbt = ztest_bt_bonus(db);
ASSERT3U(bbt->bt_magic, ==, BT_MAGIC);
od->od_type = doi.doi_type;
od->od_blocksize = doi.doi_data_block_size;
od->od_gen = bbt->bt_gen;
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, od->od_object);
}
}
return (missing);
}
static int
ztest_create(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
for (i = 0; i < count; i++, od++) {
if (missing) {
od->od_object = 0;
missing++;
continue;
}
lr_create_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
lr->lr_foid = 0; /* 0 to allocate, > 0 to claim */
lr->lrz_type = od->od_crtype;
lr->lrz_blocksize = od->od_crblocksize;
lr->lrz_ibshift = ztest_random_ibshift();
lr->lrz_bonustype = DMU_OT_UINT64_OTHER;
lr->lrz_dnodesize = od->od_crdnodesize;
lr->lr_gen = od->od_crgen;
lr->lr_crtime[0] = time(NULL);
if (ztest_replay_create(zd, lr, B_FALSE) != 0) {
ASSERT(missing == 0);
od->od_object = 0;
missing++;
} else {
od->od_object = lr->lr_foid;
od->od_type = od->od_crtype;
od->od_blocksize = od->od_crblocksize;
od->od_gen = od->od_crgen;
ASSERT(od->od_object != 0);
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_remove(ztest_ds_t *zd, ztest_od_t *od, int count)
{
int missing = 0;
int error;
int i;
ASSERT(MUTEX_HELD(&zd->zd_dirobj_lock));
od += count - 1;
for (i = count - 1; i >= 0; i--, od--) {
if (missing) {
missing++;
continue;
}
/*
* No object was found.
*/
if (od->od_object == 0)
continue;
lr_remove_t *lr = ztest_lr_alloc(sizeof (*lr), od->od_name);
lr->lr_doid = od->od_dir;
if ((error = ztest_replay_remove(zd, lr, B_FALSE)) != 0) {
ASSERT3U(error, ==, ENOSPC);
missing++;
} else {
od->od_object = 0;
}
ztest_lr_free(lr, sizeof (*lr), od->od_name);
}
return (missing);
}
static int
ztest_write(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size,
void *data)
{
lr_write_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr) + size, NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
lr->lr_blkoff = 0;
BP_ZERO(&lr->lr_blkptr);
bcopy(data, lr + 1, size);
error = ztest_replay_write(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr) + size, NULL);
return (error);
}
static int
ztest_truncate(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
lr_truncate_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_offset = offset;
lr->lr_length = size;
error = ztest_replay_truncate(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static int
ztest_setattr(ztest_ds_t *zd, uint64_t object)
{
lr_setattr_t *lr;
int error;
lr = ztest_lr_alloc(sizeof (*lr), NULL);
lr->lr_foid = object;
lr->lr_size = 0;
lr->lr_mode = 0;
error = ztest_replay_setattr(zd, lr, B_FALSE);
ztest_lr_free(lr, sizeof (*lr), NULL);
return (error);
}
static void
ztest_prealloc(ztest_ds_t *zd, uint64_t object, uint64_t offset, uint64_t size)
{
objset_t *os = zd->zd_os;
dmu_tx_t *tx;
uint64_t txg;
ztest_zrl_t *rl;
txg_wait_synced(dmu_objset_pool(os), 0);
ztest_object_lock(zd, object, RL_READER);
rl = ztest_range_lock(zd, object, offset, size, RL_WRITER);
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, offset, size);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg != 0) {
dmu_prealloc(os, object, offset, size, tx);
dmu_tx_commit(tx);
txg_wait_synced(dmu_objset_pool(os), txg);
} else {
(void) dmu_free_long_range(os, object, offset, size);
}
ztest_range_unlock(zd, rl);
ztest_object_unlock(zd, object);
}
static void
ztest_io(ztest_ds_t *zd, uint64_t object, uint64_t offset)
{
int err;
ztest_block_tag_t wbt;
dmu_object_info_t doi;
enum ztest_io_type io_type;
uint64_t blocksize;
void *data;
VERIFY(dmu_object_info(zd->zd_os, object, &doi) == 0);
blocksize = doi.doi_data_block_size;
data = umem_alloc(blocksize, UMEM_NOFAIL);
/*
* Pick an i/o type at random, biased toward writing block tags.
*/
io_type = ztest_random(ZTEST_IO_TYPES);
if (ztest_random(2) == 0)
io_type = ZTEST_IO_WRITE_TAG;
(void) rw_rdlock(&zd->zd_zilog_lock);
switch (io_type) {
case ZTEST_IO_WRITE_TAG:
ztest_bt_generate(&wbt, zd->zd_os, object, doi.doi_dnodesize,
offset, 0, 0, 0);
(void) ztest_write(zd, object, offset, sizeof (wbt), &wbt);
break;
case ZTEST_IO_WRITE_PATTERN:
(void) memset(data, 'a' + (object + offset) % 5, blocksize);
if (ztest_random(2) == 0) {
/*
* Induce fletcher2 collisions to ensure that
* zio_ddt_collision() detects and resolves them
* when using fletcher2-verify for deduplication.
*/
((uint64_t *)data)[0] ^= 1ULL << 63;
((uint64_t *)data)[4] ^= 1ULL << 63;
}
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_WRITE_ZEROES:
bzero(data, blocksize);
(void) ztest_write(zd, object, offset, blocksize, data);
break;
case ZTEST_IO_TRUNCATE:
(void) ztest_truncate(zd, object, offset, blocksize);
break;
case ZTEST_IO_SETATTR:
(void) ztest_setattr(zd, object);
break;
default:
break;
case ZTEST_IO_REWRITE:
(void) rw_rdlock(&ztest_name_lock);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_CHECKSUM, spa_dedup_checksum(ztest_spa),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
err = ztest_dsl_prop_set_uint64(zd->zd_name,
ZFS_PROP_COMPRESSION,
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION),
B_FALSE);
VERIFY(err == 0 || err == ENOSPC);
(void) rw_unlock(&ztest_name_lock);
VERIFY0(dmu_read(zd->zd_os, object, offset, blocksize, data,
DMU_READ_NO_PREFETCH));
(void) ztest_write(zd, object, offset, blocksize, data);
break;
}
(void) rw_unlock(&zd->zd_zilog_lock);
umem_free(data, blocksize);
}
/*
* Initialize an object description template.
*/
static void
ztest_od_init(ztest_od_t *od, uint64_t id, char *tag, uint64_t index,
dmu_object_type_t type, uint64_t blocksize, uint64_t dnodesize,
uint64_t gen)
{
od->od_dir = ZTEST_DIROBJ;
od->od_object = 0;
od->od_crtype = type;
od->od_crblocksize = blocksize ? blocksize : ztest_random_blocksize();
od->od_crdnodesize = dnodesize ? dnodesize : ztest_random_dnodesize();
od->od_crgen = gen;
od->od_type = DMU_OT_NONE;
od->od_blocksize = 0;
od->od_gen = 0;
(void) snprintf(od->od_name, sizeof (od->od_name), "%s(%lld)[%llu]",
tag, (longlong_t)id, (u_longlong_t)index);
}
/*
* Lookup or create the objects for a test using the od template.
* If the objects do not all exist, or if 'remove' is specified,
* remove any existing objects and create new ones. Otherwise,
* use the existing objects.
*/
static int
ztest_object_init(ztest_ds_t *zd, ztest_od_t *od, size_t size, boolean_t remove)
{
int count = size / sizeof (*od);
int rv = 0;
mutex_enter(&zd->zd_dirobj_lock);
if ((ztest_lookup(zd, od, count) != 0 || remove) &&
(ztest_remove(zd, od, count) != 0 ||
ztest_create(zd, od, count) != 0))
rv = -1;
zd->zd_od = od;
mutex_exit(&zd->zd_dirobj_lock);
return (rv);
}
/* ARGSUSED */
void
ztest_zil_commit(ztest_ds_t *zd, uint64_t id)
{
zilog_t *zilog = zd->zd_zilog;
(void) rw_rdlock(&zd->zd_zilog_lock);
zil_commit(zilog, ztest_random(ZTEST_OBJECTS));
/*
* Remember the committed values in zd, which is in parent/child
* shared memory. If we die, the next iteration of ztest_run()
* will verify that the log really does contain this record.
*/
mutex_enter(&zilog->zl_lock);
ASSERT(zd->zd_shared != NULL);
ASSERT3U(zd->zd_shared->zd_seq, <=, zilog->zl_commit_lr_seq);
zd->zd_shared->zd_seq = zilog->zl_commit_lr_seq;
mutex_exit(&zilog->zl_lock);
(void) rw_unlock(&zd->zd_zilog_lock);
}
/*
* This function is designed to simulate the operations that occur during a
* mount/unmount operation. We hold the dataset across these operations in an
* attempt to expose any implicit assumptions about ZIL management.
*/
/* ARGSUSED */
void
ztest_zil_remount(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
/*
* We grab the zd_dirobj_lock to ensure that no other thread is
* updating the zil (i.e. adding in-memory log records) and the
* zd_zilog_lock to block any I/O.
*/
mutex_enter(&zd->zd_dirobj_lock);
(void) rw_wrlock(&zd->zd_zilog_lock);
/* zfsvfs_teardown() */
zil_close(zd->zd_zilog);
/* zfsvfs_setup() */
VERIFY(zil_open(os, ztest_get_data) == zd->zd_zilog);
zil_replay(os, zd, ztest_replay_vector);
(void) rw_unlock(&zd->zd_zilog_lock);
mutex_exit(&zd->zd_dirobj_lock);
}
/*
* Verify that we can't destroy an active pool, create an existing pool,
* or create a pool with a bad vdev spec.
*/
/* ARGSUSED */
void
ztest_spa_create_destroy(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa;
nvlist_t *nvroot;
if (zo->zo_mmp_test)
return;
/*
* Attempt to create using a bad file.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_file", nvroot, NULL, NULL, NULL));
nvlist_free(nvroot);
/*
* Attempt to create using a bad mirror.
*/
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 2, 1);
VERIFY3U(ENOENT, ==,
spa_create("ztest_bad_mirror", nvroot, NULL, NULL, NULL));
nvlist_free(nvroot);
/*
* Attempt to create an existing pool. It shouldn't matter
* what's in the nvroot; we should fail with EEXIST.
*/
(void) rw_rdlock(&ztest_name_lock);
nvroot = make_vdev_root("/dev/bogus", NULL, NULL, 0, 0, 0, 0, 0, 1);
VERIFY3U(EEXIST, ==,
spa_create(zo->zo_pool, nvroot, NULL, NULL, NULL));
nvlist_free(nvroot);
VERIFY3U(0, ==, spa_open(zo->zo_pool, &spa, FTAG));
VERIFY3U(EBUSY, ==, spa_destroy(zo->zo_pool));
spa_close(spa, FTAG);
(void) rw_unlock(&ztest_name_lock);
}
/*
* Start and then stop the MMP threads to ensure the startup and shutdown code
* works properly. Actual protection and property-related code tested via ZTS.
*/
/* ARGSUSED */
void
ztest_mmp_enable_disable(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_opts_t *zo = &ztest_opts;
spa_t *spa = ztest_spa;
if (zo->zo_mmp_test)
return;
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
if (!spa_multihost(spa)) {
spa->spa_multihost = B_TRUE;
mmp_thread_start(spa);
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
mmp_signal_all_threads();
txg_wait_synced(spa_get_dsl(spa), 0);
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
mutex_enter(&spa->spa_props_lock);
if (spa_multihost(spa)) {
mmp_thread_stop(spa);
spa->spa_multihost = B_FALSE;
}
mutex_exit(&spa->spa_props_lock);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/* ARGSUSED */
void
ztest_spa_upgrade(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa;
uint64_t initial_version = SPA_VERSION_INITIAL;
uint64_t version, newversion;
nvlist_t *nvroot, *props;
char *name;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
name = kmem_asprintf("%s_upgrade", ztest_opts.zo_pool);
/*
* Clean up from previous runs.
*/
(void) spa_destroy(name);
nvroot = make_vdev_root(NULL, NULL, name, ztest_opts.zo_vdev_size, 0,
0, ztest_opts.zo_raidz, ztest_opts.zo_mirrors, 1);
/*
* If we're configuring a RAIDZ device then make sure that the
* the initial version is capable of supporting that feature.
*/
switch (ztest_opts.zo_raidz_parity) {
case 0:
case 1:
initial_version = SPA_VERSION_INITIAL;
break;
case 2:
initial_version = SPA_VERSION_RAIDZ2;
break;
case 3:
initial_version = SPA_VERSION_RAIDZ3;
break;
}
/*
* Create a pool with a spa version that can be upgraded. Pick
* a value between initial_version and SPA_VERSION_BEFORE_FEATURES.
*/
do {
version = ztest_random_spa_version(initial_version);
} while (version > SPA_VERSION_BEFORE_FEATURES);
props = fnvlist_alloc();
fnvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), version);
VERIFY3S(spa_create(name, nvroot, props, NULL, NULL), ==, 0);
fnvlist_free(nvroot);
fnvlist_free(props);
VERIFY3S(spa_open(name, &spa, FTAG), ==, 0);
VERIFY3U(spa_version(spa), ==, version);
newversion = ztest_random_spa_version(version + 1);
if (ztest_opts.zo_verbose >= 4) {
(void) printf("upgrading spa version from %llu to %llu\n",
(u_longlong_t)version, (u_longlong_t)newversion);
}
spa_upgrade(spa, newversion);
VERIFY3U(spa_version(spa), >, version);
VERIFY3U(spa_version(spa), ==, fnvlist_lookup_uint64(spa->spa_config,
zpool_prop_to_name(ZPOOL_PROP_VERSION)));
spa_close(spa, FTAG);
strfree(name);
mutex_exit(&ztest_vdev_lock);
}
static vdev_t *
vdev_lookup_by_path(vdev_t *vd, const char *path)
{
vdev_t *mvd;
int c;
if (vd->vdev_path != NULL && strcmp(path, vd->vdev_path) == 0)
return (vd);
for (c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_path(vd->vdev_child[c], path)) !=
NULL)
return (mvd);
return (NULL);
}
/*
* Find the first available hole which can be used as a top-level.
*/
int
find_vdev_hole(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_config_held(spa, SCL_VDEV, RW_READER) == SCL_VDEV);
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *cvd = rvd->vdev_child[c];
if (cvd->vdev_ishole)
break;
}
return (c);
}
/*
* Verify that vdev_add() works as expected.
*/
/* ARGSUSED */
void
ztest_vdev_add_remove(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
uint64_t leaves;
uint64_t guid;
nvlist_t *nvroot;
int error;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors + zs->zs_splits, 1) * ztest_opts.zo_raidz;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
ztest_shared->zs_vdev_next_leaf = find_vdev_hole(spa) * leaves;
/*
* If we have slogs then remove them 1/4 of the time.
*/
if (spa_has_slogs(spa) && ztest_random(4) == 0) {
/*
* Grab the guid from the head of the log class rotor.
*/
guid = spa_log_class(spa)->mc_rotor->mg_vd->vdev_guid;
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between removing a slog (dmu_objset_find)
* and destroying a dataset. Removing the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
rw_wrlock(&ztest_name_lock);
error = spa_vdev_remove(spa, guid, B_FALSE);
rw_unlock(&ztest_name_lock);
if (error && error != EEXIST)
fatal(0, "spa_vdev_remove() = %d", error);
} else {
spa_config_exit(spa, SCL_VDEV, FTAG);
/*
* Make 1/4 of the devices be log devices.
*/
nvroot = make_vdev_root(NULL, NULL, NULL,
ztest_opts.zo_vdev_size, 0,
ztest_random(4) == 0, ztest_opts.zo_raidz,
zs->zs_mirrors, 1);
error = spa_vdev_add(spa, nvroot);
nvlist_free(nvroot);
if (error == ENOSPC)
ztest_record_enospc("spa_vdev_add");
else if (error != 0)
fatal(0, "spa_vdev_add() = %d", error);
}
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that adding/removing aux devices (l2arc, hot spare) works as expected.
*/
/* ARGSUSED */
void
ztest_vdev_aux_add_remove(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
spa_aux_vdev_t *sav;
char *aux;
char *path;
uint64_t guid = 0;
int error;
if (ztest_opts.zo_mmp_test)
return;
path = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
if (ztest_random(2) == 0) {
sav = &spa->spa_spares;
aux = ZPOOL_CONFIG_SPARES;
} else {
sav = &spa->spa_l2cache;
aux = ZPOOL_CONFIG_L2CACHE;
}
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
if (sav->sav_count != 0 && ztest_random(4) == 0) {
/*
* Pick a random device to remove.
*/
guid = sav->sav_vdevs[ztest_random(sav->sav_count)]->vdev_guid;
} else {
/*
* Find an unused device we can add.
*/
zs->zs_vdev_aux = 0;
for (;;) {
int c;
(void) snprintf(path, MAXPATHLEN, ztest_aux_template,
ztest_opts.zo_dir, ztest_opts.zo_pool, aux,
zs->zs_vdev_aux);
for (c = 0; c < sav->sav_count; c++)
if (strcmp(sav->sav_vdevs[c]->vdev_path,
path) == 0)
break;
if (c == sav->sav_count &&
vdev_lookup_by_path(rvd, path) == NULL)
break;
zs->zs_vdev_aux++;
}
}
spa_config_exit(spa, SCL_VDEV, FTAG);
if (guid == 0) {
/*
* Add a new device.
*/
nvlist_t *nvroot = make_vdev_root(NULL, aux, NULL,
(ztest_opts.zo_vdev_size * 5) / 4, 0, 0, 0, 0, 1);
error = spa_vdev_add(spa, nvroot);
if (error != 0)
fatal(0, "spa_vdev_add(%p) = %d", nvroot, error);
nvlist_free(nvroot);
} else {
/*
* Remove an existing device. Sometimes, dirty its
* vdev state first to make sure we handle removal
* of devices that have pending state changes.
*/
if (ztest_random(2) == 0)
(void) vdev_online(spa, guid, 0, NULL);
error = spa_vdev_remove(spa, guid, B_FALSE);
if (error != 0 && error != EBUSY)
fatal(0, "spa_vdev_remove(%llu) = %d", guid, error);
}
mutex_exit(&ztest_vdev_lock);
umem_free(path, MAXPATHLEN);
}
/*
* split a pool if it has mirror tlvdevs
*/
/* ARGSUSED */
void
ztest_split_pool(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
vdev_t *rvd = spa->spa_root_vdev;
nvlist_t *tree, **child, *config, *split, **schild;
uint_t c, children, schildren = 0, lastlogid = 0;
int error = 0;
if (ztest_opts.zo_mmp_test)
return;
mutex_enter(&ztest_vdev_lock);
/* ensure we have a useable config; mirrors of raidz aren't supported */
if (zs->zs_mirrors < 3 || ztest_opts.zo_raidz > 1) {
mutex_exit(&ztest_vdev_lock);
return;
}
/* clean up the old pool, if any */
(void) spa_destroy("splitp");
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
/* generate a config from the existing config */
mutex_enter(&spa->spa_props_lock);
VERIFY(nvlist_lookup_nvlist(spa->spa_config, ZPOOL_CONFIG_VDEV_TREE,
&tree) == 0);
mutex_exit(&spa->spa_props_lock);
VERIFY(nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0);
schild = malloc(rvd->vdev_children * sizeof (nvlist_t *));
for (c = 0; c < children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
nvlist_t **mchild;
uint_t mchildren;
if (tvd->vdev_islog || tvd->vdev_ops == &vdev_hole_ops) {
VERIFY(nvlist_alloc(&schild[schildren], NV_UNIQUE_NAME,
0) == 0);
VERIFY(nvlist_add_string(schild[schildren],
ZPOOL_CONFIG_TYPE, VDEV_TYPE_HOLE) == 0);
VERIFY(nvlist_add_uint64(schild[schildren],
ZPOOL_CONFIG_IS_HOLE, 1) == 0);
if (lastlogid == 0)
lastlogid = schildren;
++schildren;
continue;
}
lastlogid = 0;
VERIFY(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
VERIFY(nvlist_dup(mchild[0], &schild[schildren++], 0) == 0);
}
/* OK, create a config that can be used to split */
VERIFY(nvlist_alloc(&split, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_string(split, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) == 0);
VERIFY(nvlist_add_nvlist_array(split, ZPOOL_CONFIG_CHILDREN, schild,
lastlogid != 0 ? lastlogid : schildren) == 0);
VERIFY(nvlist_alloc(&config, NV_UNIQUE_NAME, 0) == 0);
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, split) == 0);
for (c = 0; c < schildren; c++)
nvlist_free(schild[c]);
free(schild);
nvlist_free(split);
spa_config_exit(spa, SCL_VDEV, FTAG);
(void) rw_wrlock(&ztest_name_lock);
error = spa_vdev_split_mirror(spa, "splitp", config, NULL, B_FALSE);
(void) rw_unlock(&ztest_name_lock);
nvlist_free(config);
if (error == 0) {
(void) printf("successful split - results:\n");
mutex_enter(&spa_namespace_lock);
show_pool_stats(spa);
show_pool_stats(spa_lookup("splitp"));
mutex_exit(&spa_namespace_lock);
++zs->zs_splits;
--zs->zs_mirrors;
}
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that we can attach and detach devices.
*/
/* ARGSUSED */
void
ztest_vdev_attach_detach(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
spa_aux_vdev_t *sav = &spa->spa_spares;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *oldvd, *newvd, *pvd;
nvlist_t *root;
uint64_t leaves;
uint64_t leaf, top;
uint64_t ashift = ztest_get_ashift();
uint64_t oldguid, pguid;
uint64_t oldsize, newsize;
char *oldpath, *newpath;
int replacing;
int oldvd_has_siblings = B_FALSE;
int newvd_is_spare = B_FALSE;
int oldvd_is_log;
int error, expected_error;
if (ztest_opts.zo_mmp_test)
return;
oldpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
newpath = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
- spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
+
+ /*
+ * If a vdev is in the process of being removed, its removal may
+ * finish while we are in progress, leading to an unexpected error
+ * value. Don't bother trying to attach while we are in the middle
+ * of removal.
+ */
+ if (spa->spa_vdev_removal != NULL) {
+ spa_config_exit(spa, SCL_ALL, FTAG);
+ mutex_exit(&ztest_vdev_lock);
+ return;
+ }
/*
* Decide whether to do an attach or a replace.
*/
replacing = ztest_random(2);
/*
* Pick a random top-level vdev.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
/*
* Pick a random leaf within it.
*/
leaf = ztest_random(leaves);
/*
* Locate this vdev.
*/
oldvd = rvd->vdev_child[top];
if (zs->zs_mirrors >= 1) {
ASSERT(oldvd->vdev_ops == &vdev_mirror_ops);
ASSERT(oldvd->vdev_children >= zs->zs_mirrors);
oldvd = oldvd->vdev_child[leaf / ztest_opts.zo_raidz];
}
if (ztest_opts.zo_raidz > 1) {
ASSERT(oldvd->vdev_ops == &vdev_raidz_ops);
ASSERT(oldvd->vdev_children == ztest_opts.zo_raidz);
oldvd = oldvd->vdev_child[leaf % ztest_opts.zo_raidz];
}
/*
* If we're already doing an attach or replace, oldvd may be a
* mirror vdev -- in which case, pick a random child.
*/
while (oldvd->vdev_children != 0) {
oldvd_has_siblings = B_TRUE;
ASSERT(oldvd->vdev_children >= 2);
oldvd = oldvd->vdev_child[ztest_random(oldvd->vdev_children)];
}
oldguid = oldvd->vdev_guid;
oldsize = vdev_get_min_asize(oldvd);
oldvd_is_log = oldvd->vdev_top->vdev_islog;
(void) strcpy(oldpath, oldvd->vdev_path);
pvd = oldvd->vdev_parent;
pguid = pvd->vdev_guid;
/*
* If oldvd has siblings, then half of the time, detach it.
*/
if (oldvd_has_siblings && ztest_random(2) == 0) {
- spa_config_exit(spa, SCL_VDEV, FTAG);
+ spa_config_exit(spa, SCL_ALL, FTAG);
error = spa_vdev_detach(spa, oldguid, pguid, B_FALSE);
if (error != 0 && error != ENODEV && error != EBUSY &&
error != ENOTSUP)
fatal(0, "detach (%s) returned %d", oldpath, error);
goto out;
}
/*
* For the new vdev, choose with equal probability between the two
* standard paths (ending in either 'a' or 'b') or a random hot spare.
*/
if (sav->sav_count != 0 && ztest_random(3) == 0) {
newvd = sav->sav_vdevs[ztest_random(sav->sav_count)];
newvd_is_spare = B_TRUE;
(void) strcpy(newpath, newvd->vdev_path);
} else {
(void) snprintf(newpath, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
if (ztest_random(2) == 0)
newpath[strlen(newpath) - 1] = 'b';
newvd = vdev_lookup_by_path(rvd, newpath);
}
if (newvd) {
+ /*
+ * Reopen to ensure the vdev's asize field isn't stale.
+ */
+ vdev_reopen(newvd);
newsize = vdev_get_min_asize(newvd);
} else {
/*
* Make newsize a little bigger or smaller than oldsize.
* If it's smaller, the attach should fail.
* If it's larger, and we're doing a replace,
* we should get dynamic LUN growth when we're done.
*/
newsize = 10 * oldsize / (9 + ztest_random(3));
}
/*
* If pvd is not a mirror or root, the attach should fail with ENOTSUP,
* unless it's a replace; in that case any non-replacing parent is OK.
*
* If newvd is already part of the pool, it should fail with EBUSY.
*
* If newvd is too small, it should fail with EOVERFLOW.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops && (!replacing ||
pvd->vdev_ops == &vdev_replacing_ops ||
pvd->vdev_ops == &vdev_spare_ops))
expected_error = ENOTSUP;
else if (newvd_is_spare && (!replacing || oldvd_is_log))
expected_error = ENOTSUP;
else if (newvd == oldvd)
expected_error = replacing ? 0 : EBUSY;
else if (vdev_lookup_by_path(rvd, newpath) != NULL)
expected_error = EBUSY;
else if (newsize < oldsize)
expected_error = EOVERFLOW;
else if (ashift > oldvd->vdev_top->vdev_ashift)
expected_error = EDOM;
else
expected_error = 0;
- spa_config_exit(spa, SCL_VDEV, FTAG);
+ spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Build the nvlist describing newpath.
*/
root = make_vdev_root(newpath, NULL, NULL, newvd == NULL ? newsize : 0,
ashift, 0, 0, 0, 1);
error = spa_vdev_attach(spa, oldguid, root, replacing);
nvlist_free(root);
/*
* If our parent was the replacing vdev, but the replace completed,
* then instead of failing with ENOTSUP we may either succeed,
* fail with ENODEV, or fail with EOVERFLOW.
*/
if (expected_error == ENOTSUP &&
(error == 0 || error == ENODEV || error == EOVERFLOW))
expected_error = error;
/*
* If someone grew the LUN, the replacement may be too small.
*/
if (error == EOVERFLOW || error == EBUSY)
expected_error = error;
/* XXX workaround 6690467 */
if (error != expected_error && expected_error != EBUSY) {
fatal(0, "attach (%s %llu, %s %llu, %d) "
"returned %d, expected %d",
oldpath, oldsize, newpath,
newsize, replacing, error, expected_error);
}
out:
mutex_exit(&ztest_vdev_lock);
umem_free(oldpath, MAXPATHLEN);
umem_free(newpath, MAXPATHLEN);
}
+/* ARGSUSED */
+void
+ztest_device_removal(ztest_ds_t *zd, uint64_t id)
+{
+ spa_t *spa = ztest_spa;
+ vdev_t *vd;
+ uint64_t guid;
+
+ mutex_enter(&ztest_vdev_lock);
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ vd = vdev_lookup_top(spa, ztest_random_vdev_top(spa, B_FALSE));
+ guid = vd->vdev_guid;
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ (void) spa_vdev_remove(spa, guid, B_FALSE);
+
+ mutex_exit(&ztest_vdev_lock);
+}
+
/*
* Callback function which expands the physical size of the vdev.
*/
vdev_t *
grow_vdev(vdev_t *vd, void *arg)
{
ASSERTV(spa_t *spa = vd->vdev_spa);
size_t *newsize = arg;
size_t fsize;
int fd;
ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
if ((fd = open(vd->vdev_path, O_RDWR)) == -1)
return (vd);
fsize = lseek(fd, 0, SEEK_END);
VERIFY(ftruncate(fd, *newsize) == 0);
if (ztest_opts.zo_verbose >= 6) {
(void) printf("%s grew from %lu to %lu bytes\n",
vd->vdev_path, (ulong_t)fsize, (ulong_t)*newsize);
}
(void) close(fd);
return (NULL);
}
/*
* Callback function which expands a given vdev by calling vdev_online().
*/
/* ARGSUSED */
vdev_t *
online_vdev(vdev_t *vd, void *arg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint64_t guid = vd->vdev_guid;
uint64_t generation = spa->spa_config_generation + 1;
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
int error;
ASSERT(spa_config_held(spa, SCL_STATE, RW_READER) == SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
/* Calling vdev_online will initialize the new metaslabs */
spa_config_exit(spa, SCL_STATE, spa);
error = vdev_online(spa, guid, ZFS_ONLINE_EXPAND, &newstate);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
/*
* If vdev_online returned an error or the underlying vdev_open
* failed then we abort the expand. The only way to know that
* vdev_open fails is by checking the returned newstate.
*/
if (error || newstate != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Unable to expand vdev, state %llu, "
"error %d\n", (u_longlong_t)newstate, error);
}
return (vd);
}
ASSERT3U(newstate, ==, VDEV_STATE_HEALTHY);
/*
* Since we dropped the lock we need to ensure that we're
* still talking to the original vdev. It's possible this
* vdev may have been detached/replaced while we were
* trying to online it.
*/
if (generation != spa->spa_config_generation) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("vdev configuration has changed, "
"guid %llu, state %llu, expected gen %llu, "
"got gen %llu\n",
(u_longlong_t)guid,
(u_longlong_t)tvd->vdev_state,
(u_longlong_t)generation,
(u_longlong_t)spa->spa_config_generation);
}
return (vd);
}
return (NULL);
}
/*
* Traverse the vdev tree calling the supplied function.
* We continue to walk the tree until we either have walked all
* children or we receive a non-NULL return from the callback.
* If a NULL callback is passed, then we just return back the first
* leaf vdev we encounter.
*/
vdev_t *
vdev_walk_tree(vdev_t *vd, vdev_t *(*func)(vdev_t *, void *), void *arg)
{
uint_t c;
if (vd->vdev_ops->vdev_op_leaf) {
if (func == NULL)
return (vd);
else
return (func(vd, arg));
}
for (c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if ((cvd = vdev_walk_tree(cvd, func, arg)) != NULL)
return (cvd);
}
return (NULL);
}
/*
* Verify that dynamic LUN growth works as expected.
*/
/* ARGSUSED */
void
ztest_vdev_LUN_growth(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
vdev_t *vd, *tvd;
metaslab_class_t *mc;
metaslab_group_t *mg;
size_t psize, newsize;
uint64_t top;
uint64_t old_class_space, new_class_space, old_ms_count, new_ms_count;
mutex_enter(&ztest_vdev_lock);
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
+ /*
+ * If there is a vdev removal in progress, it could complete while
+ * we are running, in which case we would not be able to verify
+ * that the metaslab_class space increased (because it decreases
+ * when the device removal completes).
+ */
+ if (spa->spa_vdev_removal != NULL) {
+ spa_config_exit(spa, SCL_STATE, FTAG);
+ mutex_exit(&ztest_vdev_lock);
+ return;
+ }
+
top = ztest_random_vdev_top(spa, B_TRUE);
tvd = spa->spa_root_vdev->vdev_child[top];
mg = tvd->vdev_mg;
mc = mg->mg_class;
old_ms_count = tvd->vdev_ms_count;
old_class_space = metaslab_class_get_space(mc);
/*
* Determine the size of the first leaf vdev associated with
* our top-level device.
*/
vd = vdev_walk_tree(tvd, NULL, NULL);
ASSERT3P(vd, !=, NULL);
ASSERT(vd->vdev_ops->vdev_op_leaf);
psize = vd->vdev_psize;
/*
* We only try to expand the vdev if it's healthy, less than 4x its
* original size, and it has a valid psize.
*/
if (tvd->vdev_state != VDEV_STATE_HEALTHY ||
psize == 0 || psize >= 4 * ztest_opts.zo_vdev_size) {
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
return;
}
ASSERT(psize > 0);
newsize = psize + psize / 8;
ASSERT3U(newsize, >, psize);
if (ztest_opts.zo_verbose >= 6) {
(void) printf("Expanding LUN %s from %lu to %lu\n",
vd->vdev_path, (ulong_t)psize, (ulong_t)newsize);
}
/*
* Growing the vdev is a two step process:
* 1). expand the physical size (i.e. relabel)
* 2). online the vdev to create the new metaslabs
*/
if (vdev_walk_tree(tvd, grow_vdev, &newsize) != NULL ||
vdev_walk_tree(tvd, online_vdev, NULL) != NULL ||
tvd->vdev_state != VDEV_STATE_HEALTHY) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not expand LUN because "
"the vdev configuration changed.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
return;
}
spa_config_exit(spa, SCL_STATE, spa);
/*
* Expanding the LUN will update the config asynchronously,
* thus we must wait for the async thread to complete any
* pending tasks before proceeding.
*/
for (;;) {
boolean_t done;
mutex_enter(&spa->spa_async_lock);
done = (spa->spa_async_thread == NULL && !spa->spa_async_tasks);
mutex_exit(&spa->spa_async_lock);
if (done)
break;
txg_wait_synced(spa_get_dsl(spa), 0);
(void) poll(NULL, 0, 100);
}
spa_config_enter(spa, SCL_STATE, spa, RW_READER);
tvd = spa->spa_root_vdev->vdev_child[top];
new_ms_count = tvd->vdev_ms_count;
new_class_space = metaslab_class_get_space(mc);
if (tvd->vdev_mg != mg || mg->mg_class != mc) {
if (ztest_opts.zo_verbose >= 5) {
(void) printf("Could not verify LUN expansion due to "
"intervening vdev offline or remove.\n");
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
return;
}
/*
* Make sure we were able to grow the vdev.
*/
- if (new_ms_count <= old_ms_count)
- fatal(0, "LUN expansion failed: ms_count %llu <= %llu\n",
+ if (new_ms_count <= old_ms_count) {
+ fatal(0, "LUN expansion failed: ms_count %llu < %llu\n",
old_ms_count, new_ms_count);
+ }
/*
* Make sure we were able to grow the pool.
*/
- if (new_class_space <= old_class_space)
- fatal(0, "LUN expansion failed: class_space %llu <= %llu\n",
+ if (new_class_space <= old_class_space) {
+ fatal(0, "LUN expansion failed: class_space %llu < %llu\n",
old_class_space, new_class_space);
+ }
if (ztest_opts.zo_verbose >= 5) {
char oldnumbuf[NN_NUMBUF_SZ], newnumbuf[NN_NUMBUF_SZ];
nicenum(old_class_space, oldnumbuf, sizeof (oldnumbuf));
nicenum(new_class_space, newnumbuf, sizeof (newnumbuf));
(void) printf("%s grew from %s to %s\n",
spa->spa_name, oldnumbuf, newnumbuf);
}
spa_config_exit(spa, SCL_STATE, spa);
mutex_exit(&ztest_vdev_lock);
}
/*
* Verify that dmu_objset_{create,destroy,open,close} work as expected.
*/
/* ARGSUSED */
static void
ztest_objset_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
/*
* Create the objects common to all ztest datasets.
*/
VERIFY(zap_create_claim(os, ZTEST_DIROBJ,
DMU_OT_ZAP_OTHER, DMU_OT_NONE, 0, tx) == 0);
}
static int
ztest_dataset_create(char *dsname)
{
int err;
uint64_t rand;
dsl_crypto_params_t *dcp = NULL;
/*
* 50% of the time, we create encrypted datasets
* using a random cipher suite and a hard-coded
* wrapping key.
*/
rand = ztest_random(2);
if (rand != 0) {
nvlist_t *crypto_args = fnvlist_alloc();
nvlist_t *props = fnvlist_alloc();
/* slight bias towards the default cipher suite */
rand = ztest_random(ZIO_CRYPT_FUNCTIONS);
if (rand < ZIO_CRYPT_AES_128_CCM)
rand = ZIO_CRYPT_ON;
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_ENCRYPTION), rand);
fnvlist_add_uint8_array(crypto_args, "wkeydata",
(uint8_t *)ztest_wkeydata, WRAPPING_KEY_LEN);
/*
* These parameters aren't really used by the kernel. They
* are simply stored so that userspace knows how to load
* the wrapping key.
*/
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_KEYFORMAT), ZFS_KEYFORMAT_RAW);
fnvlist_add_string(props,
zfs_prop_to_name(ZFS_PROP_KEYLOCATION), "prompt");
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_SALT), 0ULL);
fnvlist_add_uint64(props,
zfs_prop_to_name(ZFS_PROP_PBKDF2_ITERS), 0ULL);
VERIFY0(dsl_crypto_params_create_nvlist(DCP_CMD_NONE, props,
crypto_args, &dcp));
fnvlist_free(crypto_args);
fnvlist_free(props);
}
err = dmu_objset_create(dsname, DMU_OST_OTHER, 0, dcp,
ztest_objset_create_cb, NULL);
dsl_crypto_params_free(dcp, !!err);
rand = ztest_random(100);
if (err || rand < 80)
return (err);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Setting dataset %s to sync always\n", dsname);
return (ztest_dsl_prop_set_uint64(dsname, ZFS_PROP_SYNC,
ZFS_SYNC_ALWAYS, B_FALSE));
}
/* ARGSUSED */
static int
ztest_objset_destroy_cb(const char *name, void *arg)
{
objset_t *os;
dmu_object_info_t doi;
int error;
/*
* Verify that the dataset contains a directory object.
*/
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
B_TRUE, FTAG, &os));
error = dmu_object_info(os, ZTEST_DIROBJ, &doi);
if (error != ENOENT) {
/* We could have crashed in the middle of destroying it */
ASSERT0(error);
ASSERT3U(doi.doi_type, ==, DMU_OT_ZAP_OTHER);
ASSERT3S(doi.doi_physical_blocks_512, >=, 0);
}
dmu_objset_disown(os, B_TRUE, FTAG);
/*
* Destroy the dataset.
*/
if (strchr(name, '@') != NULL) {
VERIFY0(dsl_destroy_snapshot(name, B_TRUE));
} else {
error = dsl_destroy_head(name);
/* There could be a hold on this dataset */
if (error != EBUSY)
ASSERT0(error);
}
return (0);
}
static boolean_t
ztest_snapshot_create(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
(void) snprintf(snapname, sizeof (snapname), "%llu", (u_longlong_t)id);
error = dmu_objset_snapshot_one(osname, snapname);
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
return (B_FALSE);
}
if (error != 0 && error != EEXIST) {
fatal(0, "ztest_snapshot_create(%s@%s) = %d", osname,
snapname, error);
}
return (B_TRUE);
}
static boolean_t
ztest_snapshot_destroy(char *osname, uint64_t id)
{
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int error;
(void) snprintf(snapname, sizeof (snapname), "%s@%llu", osname,
(u_longlong_t)id);
error = dsl_destroy_snapshot(snapname, B_FALSE);
if (error != 0 && error != ENOENT)
fatal(0, "ztest_snapshot_destroy(%s) = %d", snapname, error);
return (B_TRUE);
}
/* ARGSUSED */
void
ztest_dmu_objset_create_destroy(ztest_ds_t *zd, uint64_t id)
{
ztest_ds_t *zdtmp;
int iters;
int error;
objset_t *os, *os2;
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog_t *zilog;
int i;
zdtmp = umem_alloc(sizeof (ztest_ds_t), UMEM_NOFAIL);
(void) rw_rdlock(&ztest_name_lock);
(void) snprintf(name, sizeof (name), "%s/temp_%llu",
ztest_opts.zo_pool, (u_longlong_t)id);
/*
* If this dataset exists from a previous run, process its replay log
* half of the time. If we don't replay it, then dsl_destroy_head()
* (invoked from ztest_objset_destroy_cb()) should just throw it away.
*/
if (ztest_random(2) == 0 &&
ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, FTAG, &os) == 0) {
ztest_zd_init(zdtmp, NULL, os);
zil_replay(os, zdtmp, ztest_replay_vector);
ztest_zd_fini(zdtmp);
txg_wait_synced(dmu_objset_pool(os), 0);
dmu_objset_disown(os, B_TRUE, FTAG);
}
/*
* There may be an old instance of the dataset we're about to
* create lying around from a previous run. If so, destroy it
* and all of its snapshots.
*/
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
/*
* Verify that the destroyed dataset is no longer in the namespace.
*/
VERIFY3U(ENOENT, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER, B_TRUE,
B_TRUE, FTAG, &os));
/*
* Verify that we can create a new dataset.
*/
error = ztest_dataset_create(name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(0, "dmu_objset_create(%s) = %d", name, error);
}
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE, B_TRUE,
FTAG, &os));
ztest_zd_init(zdtmp, NULL, os);
/*
* Open the intent log for it.
*/
zilog = zil_open(os, ztest_get_data);
/*
* Put some objects in there, do a little I/O to them,
* and randomly take a couple of snapshots along the way.
*/
iters = ztest_random(5);
for (i = 0; i < iters; i++) {
ztest_dmu_object_alloc_free(zdtmp, id);
if (ztest_random(iters) == 0)
(void) ztest_snapshot_create(name, i);
}
/*
* Verify that we cannot create an existing dataset.
*/
VERIFY3U(EEXIST, ==,
dmu_objset_create(name, DMU_OST_OTHER, 0, NULL, NULL, NULL));
/*
* Verify that we can hold an objset that is also owned.
*/
VERIFY3U(0, ==, dmu_objset_hold(name, FTAG, &os2));
dmu_objset_rele(os2, FTAG);
/*
* Verify that we cannot own an objset that is already owned.
*/
VERIFY3U(EBUSY, ==, ztest_dmu_objset_own(name, DMU_OST_OTHER,
B_FALSE, B_TRUE, FTAG, &os2));
zil_close(zilog);
txg_wait_synced(spa_get_dsl(os->os_spa), 0);
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_zd_fini(zdtmp);
out:
(void) rw_unlock(&ztest_name_lock);
umem_free(zdtmp, sizeof (ztest_ds_t));
}
/*
* Verify that dmu_snapshot_{create,destroy,open,close} work as expected.
*/
void
ztest_dmu_snapshot_create_destroy(ztest_ds_t *zd, uint64_t id)
{
(void) rw_rdlock(&ztest_name_lock);
(void) ztest_snapshot_destroy(zd->zd_name, id);
(void) ztest_snapshot_create(zd->zd_name, id);
(void) rw_unlock(&ztest_name_lock);
}
/*
* Cleanup non-standard snapshots and clones.
*/
void
ztest_dsl_dataset_cleanup(char *osname, uint64_t id)
{
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
(void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN,
"%s@s1_%llu", osname, (u_longlong_t)id);
(void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN,
"%s/c1_%llu", osname, (u_longlong_t)id);
(void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN,
"%s@s2_%llu", clone1name, (u_longlong_t)id);
(void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN,
"%s/c2_%llu", osname, (u_longlong_t)id);
(void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN,
"%s@s3_%llu", clone1name, (u_longlong_t)id);
error = dsl_destroy_head(clone2name);
if (error && error != ENOENT)
fatal(0, "dsl_destroy_head(%s) = %d", clone2name, error);
error = dsl_destroy_snapshot(snap3name, B_FALSE);
if (error && error != ENOENT)
fatal(0, "dsl_destroy_snapshot(%s) = %d", snap3name, error);
error = dsl_destroy_snapshot(snap2name, B_FALSE);
if (error && error != ENOENT)
fatal(0, "dsl_destroy_snapshot(%s) = %d", snap2name, error);
error = dsl_destroy_head(clone1name);
if (error && error != ENOENT)
fatal(0, "dsl_destroy_head(%s) = %d", clone1name, error);
error = dsl_destroy_snapshot(snap1name, B_FALSE);
if (error && error != ENOENT)
fatal(0, "dsl_destroy_snapshot(%s) = %d", snap1name, error);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
/*
* Verify dsl_dataset_promote handles EBUSY
*/
void
ztest_dsl_dataset_promote_busy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os;
char *snap1name;
char *clone1name;
char *snap2name;
char *clone2name;
char *snap3name;
char *osname = zd->zd_name;
int error;
snap1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone1name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
clone2name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
snap3name = umem_alloc(ZFS_MAX_DATASET_NAME_LEN, UMEM_NOFAIL);
(void) rw_rdlock(&ztest_name_lock);
ztest_dsl_dataset_cleanup(osname, id);
(void) snprintf(snap1name, ZFS_MAX_DATASET_NAME_LEN,
"%s@s1_%llu", osname, (u_longlong_t)id);
(void) snprintf(clone1name, ZFS_MAX_DATASET_NAME_LEN,
"%s/c1_%llu", osname, (u_longlong_t)id);
(void) snprintf(snap2name, ZFS_MAX_DATASET_NAME_LEN,
"%s@s2_%llu", clone1name, (u_longlong_t)id);
(void) snprintf(clone2name, ZFS_MAX_DATASET_NAME_LEN,
"%s/c2_%llu", osname, (u_longlong_t)id);
(void) snprintf(snap3name, ZFS_MAX_DATASET_NAME_LEN,
"%s@s3_%llu", clone1name, (u_longlong_t)id);
error = dmu_objset_snapshot_one(osname, strchr(snap1name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(0, "dmu_take_snapshot(%s) = %d", snap1name, error);
}
error = dmu_objset_clone(clone1name, snap1name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(0, "dmu_objset_create(%s) = %d", clone1name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap2name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(0, "dmu_open_snapshot(%s) = %d", snap2name, error);
}
error = dmu_objset_snapshot_one(clone1name, strchr(snap3name, '@') + 1);
if (error && error != EEXIST) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(0, "dmu_open_snapshot(%s) = %d", snap3name, error);
}
error = dmu_objset_clone(clone2name, snap3name);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc(FTAG);
goto out;
}
fatal(0, "dmu_objset_create(%s) = %d", clone2name, error);
}
error = ztest_dmu_objset_own(snap2name, DMU_OST_ANY, B_TRUE, B_TRUE,
FTAG, &os);
if (error)
fatal(0, "dmu_objset_own(%s) = %d", snap2name, error);
error = dsl_dataset_promote(clone2name, NULL);
if (error == ENOSPC) {
dmu_objset_disown(os, B_TRUE, FTAG);
ztest_record_enospc(FTAG);
goto out;
}
if (error != EBUSY)
fatal(0, "dsl_dataset_promote(%s), %d, not EBUSY", clone2name,
error);
dmu_objset_disown(os, B_TRUE, FTAG);
out:
ztest_dsl_dataset_cleanup(osname, id);
(void) rw_unlock(&ztest_name_lock);
umem_free(snap1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone1name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(clone2name, ZFS_MAX_DATASET_NAME_LEN);
umem_free(snap3name, ZFS_MAX_DATASET_NAME_LEN);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 4
/*
* Verify that dmu_object_{alloc,free} work as expected.
*/
void
ztest_dmu_object_alloc_free(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
int batchsize;
int size;
int b;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
batchsize = OD_ARRAY_SIZE;
for (b = 0; b < batchsize; b++)
ztest_od_init(od + b, id, FTAG, b, DMU_OT_UINT64_OTHER,
0, 0, 0);
/*
* Destroy the previous batch of objects, create a new batch,
* and do some I/O on the new objects.
*/
if (ztest_object_init(zd, od, size, B_TRUE) != 0)
return;
while (ztest_random(4 * batchsize) != 0)
ztest_io(zd, od[ztest_random(batchsize)].od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
umem_free(od, size);
}
/*
* Rewind the global allocator to verify object allocation backfilling.
*/
void
ztest_dmu_object_next_chunk(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
int dnodes_per_chunk = 1 << dmu_object_alloc_chunk_shift;
uint64_t object;
/*
* Rewind the global allocator randomly back to a lower object number
* to force backfilling and reclamation of recently freed dnodes.
*/
mutex_enter(&os->os_obj_lock);
object = ztest_random(os->os_obj_next_chunk);
os->os_obj_next_chunk = P2ALIGN(object, dnodes_per_chunk);
mutex_exit(&os->os_obj_lock);
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
/*
* Verify that dmu_{read,write} work as expected.
*/
void
ztest_dmu_read_write(ztest_ds_t *zd, uint64_t id)
{
int size;
ztest_od_t *od;
objset_t *os = zd->zd_os;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
dmu_tx_t *tx;
int i, freeit, error;
uint64_t n, s, txg;
bufwad_t *packbuf, *bigbuf, *pack, *bigH, *bigT;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t chunksize = (1000 + ztest_random(1000)) * sizeof (uint64_t);
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 40;
int free_percent = 5;
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is arbitrary. It doesn't have to be a power of two,
* and it doesn't have any relation to the object blocksize.
* The only requirement is that it can hold at least two bufwads.
*
* Normally, we write the bufwad to each of these locations.
* However, free_percent of the time we instead write zeroes to
* packobj and perform a dmu_free_range() on bigobj. By comparing
* bigobj to packobj, we can verify that the DMU is correctly
* tracking which parts of an object are allocated and free,
* and that the contents of the allocated blocks are correct.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, chunksize);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
chunksize = od[0].od_gen;
ASSERT(chunksize == od[1].od_gen);
/*
* Prefetch a random chunk of the big object.
* Our aim here is to get some async reads in flight
* for blocks that we may free below; the DMU should
* handle this race correctly.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(2 * width - 1);
dmu_prefetch(os, bigobj, 0, n * chunksize, s * chunksize,
ZIO_PRIORITY_SYNC_READ);
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_alloc(packsize, UMEM_NOFAIL);
bigbuf = umem_alloc(bigsize, UMEM_NOFAIL);
/*
* free_percent of the time, free a range of bigobj rather than
* overwriting it.
*/
freeit = (ztest_random(100) < free_percent);
/*
* Read the current contents of our objects.
*/
error = dmu_read(os, packobj, packoff, packsize, packbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize, bigbuf,
DMU_READ_PREFETCH);
ASSERT0(error);
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
if (freeit)
dmu_tx_hold_free(tx, bigobj, bigoff, bigsize);
else
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
/* This accounts for setting the checksum/compression. */
dmu_tx_hold_bonus(tx, bigobj);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
return;
}
enum zio_checksum cksum;
do {
cksum = (enum zio_checksum)
ztest_random_dsl_prop(ZFS_PROP_CHECKSUM);
} while (cksum >= ZIO_CHECKSUM_LEGACY_FUNCTIONS);
dmu_object_set_checksum(os, bigobj, cksum, tx);
enum zio_compress comp;
do {
comp = (enum zio_compress)
ztest_random_dsl_prop(ZFS_PROP_COMPRESSION);
} while (comp >= ZIO_COMPRESS_LEGACY_FUNCTIONS);
dmu_object_set_compress(os, bigobj, comp, tx);
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
if (pack->bw_txg > txg)
fatal(0, "future leak: got %llx, open txg is %llx",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
fatal(0, "wrong index: got %llx, wanted %llx+%llx",
pack->bw_index, n, i);
if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
if (freeit) {
bzero(pack, sizeof (bufwad_t));
} else {
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
}
*bigH = *pack;
*bigT = *pack;
}
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (freeit) {
if (ztest_opts.zo_verbose >= 7) {
(void) printf("freeing offset %llx size %llx"
" txg %llx\n",
(u_longlong_t)bigoff,
(u_longlong_t)bigsize,
(u_longlong_t)txg);
}
VERIFY(0 == dmu_free_range(os, bigobj, bigoff, bigsize, tx));
} else {
if (ztest_opts.zo_verbose >= 7) {
(void) printf("writing offset %llx size %llx"
" txg %llx\n",
(u_longlong_t)bigoff,
(u_longlong_t)bigsize,
(u_longlong_t)txg);
}
dmu_write(os, bigobj, bigoff, bigsize, bigbuf, tx);
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY(0 == dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(od, size);
}
void
compare_and_update_pbbufs(uint64_t s, bufwad_t *packbuf, bufwad_t *bigbuf,
uint64_t bigsize, uint64_t n, uint64_t chunksize, uint64_t txg)
{
uint64_t i;
bufwad_t *pack;
bufwad_t *bigH;
bufwad_t *bigT;
/*
* For each index from n to n + s, verify that the existing bufwad
* in packobj matches the bufwads at the head and tail of the
* corresponding chunk in bigobj. Then update all three bufwads
* with the new values we want to write out.
*/
for (i = 0; i < s; i++) {
/* LINTED */
pack = (bufwad_t *)((char *)packbuf + i * sizeof (bufwad_t));
/* LINTED */
bigH = (bufwad_t *)((char *)bigbuf + i * chunksize);
/* LINTED */
bigT = (bufwad_t *)((char *)bigH + chunksize) - 1;
ASSERT((uintptr_t)bigH - (uintptr_t)bigbuf < bigsize);
ASSERT((uintptr_t)bigT - (uintptr_t)bigbuf < bigsize);
if (pack->bw_txg > txg)
fatal(0, "future leak: got %llx, open txg is %llx",
pack->bw_txg, txg);
if (pack->bw_data != 0 && pack->bw_index != n + i)
fatal(0, "wrong index: got %llx, wanted %llx+%llx",
pack->bw_index, n, i);
if (bcmp(pack, bigH, sizeof (bufwad_t)) != 0)
fatal(0, "pack/bigH mismatch in %p/%p", pack, bigH);
if (bcmp(pack, bigT, sizeof (bufwad_t)) != 0)
fatal(0, "pack/bigT mismatch in %p/%p", pack, bigT);
pack->bw_index = n + i;
pack->bw_txg = txg;
pack->bw_data = 1 + ztest_random(-2ULL);
*bigH = *pack;
*bigT = *pack;
}
}
#undef OD_ARRAY_SIZE
#define OD_ARRAY_SIZE 2
void
ztest_dmu_read_write_zcopy(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
uint64_t i;
int error;
int size;
uint64_t n, s, txg;
bufwad_t *packbuf, *bigbuf;
uint64_t packobj, packoff, packsize, bigobj, bigoff, bigsize;
uint64_t blocksize = ztest_random_blocksize();
uint64_t chunksize = blocksize;
uint64_t regions = 997;
uint64_t stride = 123456789ULL;
uint64_t width = 9;
dmu_buf_t *bonus_db;
arc_buf_t **bigbuf_arcbufs;
dmu_object_info_t doi;
size = sizeof (ztest_od_t) * OD_ARRAY_SIZE;
od = umem_alloc(size, UMEM_NOFAIL);
/*
* This test uses two objects, packobj and bigobj, that are always
* updated together (i.e. in the same tx) so that their contents are
* in sync and can be compared. Their contents relate to each other
* in a simple way: packobj is a dense array of 'bufwad' structures,
* while bigobj is a sparse array of the same bufwads. Specifically,
* for any index n, there are three bufwads that should be identical:
*
* packobj, at offset n * sizeof (bufwad_t)
* bigobj, at the head of the nth chunk
* bigobj, at the tail of the nth chunk
*
* The chunk size is set equal to bigobj block size so that
* dmu_assign_arcbuf_by_dbuf() can be tested for object updates.
*/
/*
* Read the directory info. If it's the first time, set things up.
*/
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
ztest_od_init(od + 1, id, FTAG, 1, DMU_OT_UINT64_OTHER, 0, 0,
chunksize);
if (ztest_object_init(zd, od, size, B_FALSE) != 0) {
umem_free(od, size);
return;
}
bigobj = od[0].od_object;
packobj = od[1].od_object;
blocksize = od[0].od_blocksize;
chunksize = blocksize;
ASSERT(chunksize == od[1].od_gen);
VERIFY(dmu_object_info(os, bigobj, &doi) == 0);
VERIFY(ISP2(doi.doi_data_block_size));
VERIFY(chunksize == doi.doi_data_block_size);
VERIFY(chunksize >= 2 * sizeof (bufwad_t));
/*
* Pick a random index and compute the offsets into packobj and bigobj.
*/
n = ztest_random(regions) * stride + ztest_random(width);
s = 1 + ztest_random(width - 1);
packoff = n * sizeof (bufwad_t);
packsize = s * sizeof (bufwad_t);
bigoff = n * chunksize;
bigsize = s * chunksize;
packbuf = umem_zalloc(packsize, UMEM_NOFAIL);
bigbuf = umem_zalloc(bigsize, UMEM_NOFAIL);
VERIFY3U(0, ==, dmu_bonus_hold(os, bigobj, FTAG, &bonus_db));
bigbuf_arcbufs = umem_zalloc(2 * s * sizeof (arc_buf_t *), UMEM_NOFAIL);
/*
* Iteration 0 test zcopy for DB_UNCACHED dbufs.
* Iteration 1 test zcopy to already referenced dbufs.
* Iteration 2 test zcopy to dirty dbuf in the same txg.
* Iteration 3 test zcopy to dbuf dirty in previous txg.
* Iteration 4 test zcopy when dbuf is no longer dirty.
* Iteration 5 test zcopy when it can't be done.
* Iteration 6 one more zcopy write.
*/
for (i = 0; i < 7; i++) {
uint64_t j;
uint64_t off;
/*
* In iteration 5 (i == 5) use arcbufs
* that don't match bigobj blksz to test
* dmu_assign_arcbuf_by_dbuf() when it can't directly
* assign an arcbuf to a dbuf.
*/
for (j = 0; j < s; j++) {
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
bigbuf_arcbufs[j] =
dmu_request_arcbuf(bonus_db, chunksize);
} else {
bigbuf_arcbufs[2 * j] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
bigbuf_arcbufs[2 * j + 1] =
dmu_request_arcbuf(bonus_db, chunksize / 2);
}
}
/*
* Get a tx for the mods to both packobj and bigobj.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, packobj, packoff, packsize);
dmu_tx_hold_write(tx, bigobj, bigoff, bigsize);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
for (j = 0; j < s; j++) {
if (i != 5 ||
chunksize < (SPA_MINBLOCKSIZE * 2)) {
dmu_return_arcbuf(bigbuf_arcbufs[j]);
} else {
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j]);
dmu_return_arcbuf(
bigbuf_arcbufs[2 * j + 1]);
}
}
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
dmu_buf_rele(bonus_db, FTAG);
return;
}
/*
* 50% of the time don't read objects in the 1st iteration to
* test dmu_assign_arcbuf_by_dbuf() for the case when there are
* no existing dbufs for the specified offsets.
*/
if (i != 0 || ztest_random(2) != 0) {
error = dmu_read(os, packobj, packoff,
packsize, packbuf, DMU_READ_PREFETCH);
ASSERT0(error);
error = dmu_read(os, bigobj, bigoff, bigsize,
bigbuf, DMU_READ_PREFETCH);
ASSERT0(error);
}
compare_and_update_pbbufs(s, packbuf, bigbuf, bigsize,
n, chunksize, txg);
/*
* We've verified all the old bufwads, and made new ones.
* Now write them out.
*/
dmu_write(os, packobj, packoff, packsize, packbuf, tx);
if (ztest_opts.zo_verbose >= 7) {
(void) printf("writing offset %llx size %llx"
" txg %llx\n",
(u_longlong_t)bigoff,
(u_longlong_t)bigsize,
(u_longlong_t)txg);
}
for (off = bigoff, j = 0; j < s; j++, off += chunksize) {
dmu_buf_t *dbt;
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
bcopy((caddr_t)bigbuf + (off - bigoff),
bigbuf_arcbufs[j]->b_data, chunksize);
} else {
bcopy((caddr_t)bigbuf + (off - bigoff),
bigbuf_arcbufs[2 * j]->b_data,
chunksize / 2);
bcopy((caddr_t)bigbuf + (off - bigoff) +
chunksize / 2,
bigbuf_arcbufs[2 * j + 1]->b_data,
chunksize / 2);
}
if (i == 1) {
VERIFY(dmu_buf_hold(os, bigobj, off,
FTAG, &dbt, DMU_READ_NO_PREFETCH) == 0);
}
if (i != 5 || chunksize < (SPA_MINBLOCKSIZE * 2)) {
dmu_assign_arcbuf_by_dbuf(bonus_db, off,
bigbuf_arcbufs[j], tx);
} else {
dmu_assign_arcbuf_by_dbuf(bonus_db, off,
bigbuf_arcbufs[2 * j], tx);
dmu_assign_arcbuf_by_dbuf(bonus_db,
off + chunksize / 2,
bigbuf_arcbufs[2 * j + 1], tx);
}
if (i == 1) {
dmu_buf_rele(dbt, FTAG);
}
}
dmu_tx_commit(tx);
/*
* Sanity check the stuff we just wrote.
*/
{
void *packcheck = umem_alloc(packsize, UMEM_NOFAIL);
void *bigcheck = umem_alloc(bigsize, UMEM_NOFAIL);
VERIFY(0 == dmu_read(os, packobj, packoff,
packsize, packcheck, DMU_READ_PREFETCH));
VERIFY(0 == dmu_read(os, bigobj, bigoff,
bigsize, bigcheck, DMU_READ_PREFETCH));
ASSERT(bcmp(packbuf, packcheck, packsize) == 0);
ASSERT(bcmp(bigbuf, bigcheck, bigsize) == 0);
umem_free(packcheck, packsize);
umem_free(bigcheck, bigsize);
}
if (i == 2) {
txg_wait_open(dmu_objset_pool(os), 0);
} else if (i == 3) {
txg_wait_synced(dmu_objset_pool(os), 0);
}
}
dmu_buf_rele(bonus_db, FTAG);
umem_free(packbuf, packsize);
umem_free(bigbuf, bigsize);
umem_free(bigbuf_arcbufs, 2 * s * sizeof (arc_buf_t *));
umem_free(od, size);
}
/* ARGSUSED */
void
ztest_dmu_write_parallel(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
uint64_t offset = (1ULL << (ztest_random(20) + 43)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
/*
* Have multiple threads write to large offsets in an object
* to verify that parallel writes to an object -- even to the
* same blocks within the object -- doesn't cause any trouble.
*/
ztest_od_init(od, ID_PARALLEL, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0)
return;
while (ztest_random(10) != 0)
ztest_io(zd, od->od_object, offset);
umem_free(od, sizeof (ztest_od_t));
}
void
ztest_dmu_prealloc(ztest_ds_t *zd, uint64_t id)
{
ztest_od_t *od;
uint64_t offset = (1ULL << (ztest_random(4) + SPA_MAXBLOCKSHIFT)) +
(ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
uint64_t count = ztest_random(20) + 1;
uint64_t blocksize = ztest_random_blocksize();
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
if (ztest_truncate(zd, od->od_object, offset, count * blocksize) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
ztest_prealloc(zd, od->od_object, offset, count * blocksize);
data = umem_zalloc(blocksize, UMEM_NOFAIL);
while (ztest_random(count) != 0) {
uint64_t randoff = offset + (ztest_random(count) * blocksize);
if (ztest_write(zd, od->od_object, randoff, blocksize,
data) != 0)
break;
while (ztest_random(4) != 0)
ztest_io(zd, od->od_object, randoff);
}
umem_free(data, blocksize);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Verify that zap_{create,destroy,add,remove,update} work as expected.
*/
#define ZTEST_ZAP_MIN_INTS 1
#define ZTEST_ZAP_MAX_INTS 4
#define ZTEST_ZAP_MAX_PROPS 1000
void
ztest_zap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object;
uint64_t txg, last_txg;
uint64_t value[ZTEST_ZAP_MAX_INTS];
uint64_t zl_ints, zl_intsize, prop;
int i, ints;
dmu_tx_t *tx;
char propname[100], txgname[100];
int error;
char *hc[2] = { "s.acl.h", ".s.open.h.hyLZlg" };
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Generate a known hash collision, and verify that
* we can lookup and remove both entries.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
for (i = 0; i < 2; i++) {
value[i] = i;
VERIFY3U(0, ==, zap_add(os, object, hc[i], sizeof (uint64_t),
1, &value[i], tx));
}
for (i = 0; i < 2; i++) {
VERIFY3U(EEXIST, ==, zap_add(os, object, hc[i],
sizeof (uint64_t), 1, &value[i], tx));
VERIFY3U(0, ==,
zap_length(os, object, hc[i], &zl_intsize, &zl_ints));
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
}
for (i = 0; i < 2; i++) {
VERIFY3U(0, ==, zap_remove(os, object, hc[i], tx));
}
dmu_tx_commit(tx);
/*
* Generate a buch of random entries.
*/
ints = MAX(ZTEST_ZAP_MIN_INTS, object % ZTEST_ZAP_MAX_INTS);
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
(void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
(void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
bzero(value, sizeof (value));
last_txg = 0;
/*
* If these zap entries already exist, validate their contents.
*/
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == 0) {
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, 1);
VERIFY(zap_lookup(os, object, txgname, zl_intsize,
zl_ints, &last_txg) == 0);
VERIFY(zap_length(os, object, propname, &zl_intsize,
&zl_ints) == 0);
ASSERT3U(zl_intsize, ==, sizeof (uint64_t));
ASSERT3U(zl_ints, ==, ints);
VERIFY(zap_lookup(os, object, propname, zl_intsize,
zl_ints, value) == 0);
for (i = 0; i < ints; i++) {
ASSERT3U(value[i], ==, last_txg + object + i);
}
} else {
ASSERT3U(error, ==, ENOENT);
}
/*
* Atomically update two entries in our zap object.
* The first is named txg_%llu, and contains the txg
* in which the property was last updated. The second
* is named prop_%llu, and the nth element of its value
* should be txg + object + n.
*/
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
if (last_txg > txg)
fatal(0, "zap future leak: old %llu new %llu", last_txg, txg);
for (i = 0; i < ints; i++)
value[i] = txg + object + i;
VERIFY3U(0, ==, zap_update(os, object, txgname, sizeof (uint64_t),
1, &txg, tx));
VERIFY3U(0, ==, zap_update(os, object, propname, sizeof (uint64_t),
ints, value, tx));
dmu_tx_commit(tx);
/*
* Remove a random pair of entries.
*/
prop = ztest_random(ZTEST_ZAP_MAX_PROPS);
(void) sprintf(propname, "prop_%llu", (u_longlong_t)prop);
(void) sprintf(txgname, "txg_%llu", (u_longlong_t)prop);
error = zap_length(os, object, txgname, &zl_intsize, &zl_ints);
if (error == ENOENT)
goto out;
ASSERT0(error);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
VERIFY3U(0, ==, zap_remove(os, object, txgname, tx));
VERIFY3U(0, ==, zap_remove(os, object, propname, tx));
dmu_tx_commit(tx);
out:
umem_free(od, sizeof (ztest_od_t));
}
/*
* Testcase to test the upgrading of a microzap to fatzap.
*/
void
ztest_fzap(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object, txg;
int i;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t),
!ztest_random(2)) != 0)
goto out;
object = od->od_object;
/*
* Add entries to this ZAP and make sure it spills over
* and gets upgraded to a fatzap. Also, since we are adding
* 2050 entries we should see ptrtbl growth and leaf-block split.
*/
for (i = 0; i < 2050; i++) {
char name[ZFS_MAX_DATASET_NAME_LEN];
uint64_t value = i;
dmu_tx_t *tx;
int error;
(void) snprintf(name, sizeof (name), "fzap-%llu-%llu",
(u_longlong_t)id, (u_longlong_t)value);
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, name);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0)
goto out;
error = zap_add(os, object, name, sizeof (uint64_t), 1,
&value, tx);
ASSERT(error == 0 || error == EEXIST);
dmu_tx_commit(tx);
}
out:
umem_free(od, sizeof (ztest_od_t));
}
/* ARGSUSED */
void
ztest_zap_parallel(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t txg, object, count, wsize, wc, zl_wsize, zl_wc;
dmu_tx_t *tx;
int i, namelen, error;
int micro = ztest_random(2);
char name[20], string_value[20];
void *data;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, ID_PARALLEL, FTAG, micro, DMU_OT_ZAP_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
object = od->od_object;
/*
* Generate a random name of the form 'xxx.....' where each
* x is a random printable character and the dots are dots.
* There are 94 such characters, and the name length goes from
* 6 to 20, so there are 94^3 * 15 = 12,458,760 possible names.
*/
namelen = ztest_random(sizeof (name) - 5) + 5 + 1;
for (i = 0; i < 3; i++)
name[i] = '!' + ztest_random('~' - '!' + 1);
for (; i < namelen - 1; i++)
name[i] = '.';
name[i] = '\0';
if ((namelen & 1) || micro) {
wsize = sizeof (txg);
wc = 1;
data = &txg;
} else {
wsize = 1;
wc = namelen;
data = string_value;
}
count = -1ULL;
VERIFY0(zap_count(os, object, &count));
ASSERT(count != -1ULL);
/*
* Select an operation: length, lookup, add, update, remove.
*/
i = ztest_random(5);
if (i >= 2) {
tx = dmu_tx_create(os);
dmu_tx_hold_zap(tx, object, B_TRUE, NULL);
txg = ztest_tx_assign(tx, TXG_MIGHTWAIT, FTAG);
if (txg == 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
bcopy(name, string_value, namelen);
} else {
tx = NULL;
txg = 0;
bzero(string_value, namelen);
}
switch (i) {
case 0:
error = zap_length(os, object, name, &zl_wsize, &zl_wc);
if (error == 0) {
ASSERT3U(wsize, ==, zl_wsize);
ASSERT3U(wc, ==, zl_wc);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 1:
error = zap_lookup(os, object, name, wsize, wc, data);
if (error == 0) {
if (data == string_value &&
bcmp(name, data, namelen) != 0)
fatal(0, "name '%s' != val '%s' len %d",
name, data, namelen);
} else {
ASSERT3U(error, ==, ENOENT);
}
break;
case 2:
error = zap_add(os, object, name, wsize, wc, data, tx);
ASSERT(error == 0 || error == EEXIST);
break;
case 3:
VERIFY(zap_update(os, object, name, wsize, wc, data, tx) == 0);
break;
case 4:
error = zap_remove(os, object, name, tx);
ASSERT(error == 0 || error == ENOENT);
break;
}
if (tx != NULL)
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Commit callback data.
*/
typedef struct ztest_cb_data {
list_node_t zcd_node;
uint64_t zcd_txg;
int zcd_expected_err;
boolean_t zcd_added;
boolean_t zcd_called;
spa_t *zcd_spa;
} ztest_cb_data_t;
/* This is the actual commit callback function */
static void
ztest_commit_callback(void *arg, int error)
{
ztest_cb_data_t *data = arg;
uint64_t synced_txg;
VERIFY(data != NULL);
VERIFY3S(data->zcd_expected_err, ==, error);
VERIFY(!data->zcd_called);
synced_txg = spa_last_synced_txg(data->zcd_spa);
if (data->zcd_txg > synced_txg)
fatal(0, "commit callback of txg %" PRIu64 " called prematurely"
", last synced txg = %" PRIu64 "\n", data->zcd_txg,
synced_txg);
data->zcd_called = B_TRUE;
if (error == ECANCELED) {
ASSERT0(data->zcd_txg);
ASSERT(!data->zcd_added);
/*
* The private callback data should be destroyed here, but
* since we are going to check the zcd_called field after
* dmu_tx_abort(), we will destroy it there.
*/
return;
}
ASSERT(data->zcd_added);
ASSERT3U(data->zcd_txg, !=, 0);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/* See if this cb was called more quickly */
if ((synced_txg - data->zcd_txg) < zc_min_txg_delay)
zc_min_txg_delay = synced_txg - data->zcd_txg;
/* Remove our callback from the list */
list_remove(&zcl.zcl_callbacks, data);
(void) mutex_exit(&zcl.zcl_callbacks_lock);
umem_free(data, sizeof (ztest_cb_data_t));
}
/* Allocate and initialize callback data structure */
static ztest_cb_data_t *
ztest_create_cb_data(objset_t *os, uint64_t txg)
{
ztest_cb_data_t *cb_data;
cb_data = umem_zalloc(sizeof (ztest_cb_data_t), UMEM_NOFAIL);
cb_data->zcd_txg = txg;
cb_data->zcd_spa = dmu_objset_spa(os);
list_link_init(&cb_data->zcd_node);
return (cb_data);
}
/*
* Commit callback test.
*/
void
ztest_dmu_commit_callbacks(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
ztest_od_t *od;
dmu_tx_t *tx;
ztest_cb_data_t *cb_data[3], *tmp_cb;
uint64_t old_txg, txg;
int i, error = 0;
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
tx = dmu_tx_create(os);
cb_data[0] = ztest_create_cb_data(os, 0);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[0]);
dmu_tx_hold_write(tx, od->od_object, 0, sizeof (uint64_t));
/* Every once in a while, abort the transaction on purpose */
if (ztest_random(100) == 0)
error = -1;
if (!error)
error = dmu_tx_assign(tx, TXG_NOWAIT);
txg = error ? 0 : dmu_tx_get_txg(tx);
cb_data[0]->zcd_txg = txg;
cb_data[1] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[1]);
if (error) {
/*
* It's not a strict requirement to call the registered
* callbacks from inside dmu_tx_abort(), but that's what
* it's supposed to happen in the current implementation
* so we will check for that.
*/
for (i = 0; i < 2; i++) {
cb_data[i]->zcd_expected_err = ECANCELED;
VERIFY(!cb_data[i]->zcd_called);
}
dmu_tx_abort(tx);
for (i = 0; i < 2; i++) {
VERIFY(cb_data[i]->zcd_called);
umem_free(cb_data[i], sizeof (ztest_cb_data_t));
}
umem_free(od, sizeof (ztest_od_t));
return;
}
cb_data[2] = ztest_create_cb_data(os, txg);
dmu_tx_callback_register(tx, ztest_commit_callback, cb_data[2]);
/*
* Read existing data to make sure there isn't a future leak.
*/
VERIFY(0 == dmu_read(os, od->od_object, 0, sizeof (uint64_t),
&old_txg, DMU_READ_PREFETCH));
if (old_txg > txg)
fatal(0, "future leak: got %" PRIu64 ", open txg is %" PRIu64,
old_txg, txg);
dmu_write(os, od->od_object, 0, sizeof (uint64_t), &txg, tx);
(void) mutex_enter(&zcl.zcl_callbacks_lock);
/*
* Since commit callbacks don't have any ordering requirement and since
* it is theoretically possible for a commit callback to be called
* after an arbitrary amount of time has elapsed since its txg has been
* synced, it is difficult to reliably determine whether a commit
* callback hasn't been called due to high load or due to a flawed
* implementation.
*
* In practice, we will assume that if after a certain number of txgs a
* commit callback hasn't been called, then most likely there's an
* implementation bug..
*/
tmp_cb = list_head(&zcl.zcl_callbacks);
if (tmp_cb != NULL &&
tmp_cb->zcd_txg + ZTEST_COMMIT_CB_THRESH < txg) {
fatal(0, "Commit callback threshold exceeded, oldest txg: %"
PRIu64 ", open txg: %" PRIu64 "\n", tmp_cb->zcd_txg, txg);
}
/*
* Let's find the place to insert our callbacks.
*
* Even though the list is ordered by txg, it is possible for the
* insertion point to not be the end because our txg may already be
* quiescing at this point and other callbacks in the open txg
* (from other objsets) may have sneaked in.
*/
tmp_cb = list_tail(&zcl.zcl_callbacks);
while (tmp_cb != NULL && tmp_cb->zcd_txg > txg)
tmp_cb = list_prev(&zcl.zcl_callbacks, tmp_cb);
/* Add the 3 callbacks to the list */
for (i = 0; i < 3; i++) {
if (tmp_cb == NULL)
list_insert_head(&zcl.zcl_callbacks, cb_data[i]);
else
list_insert_after(&zcl.zcl_callbacks, tmp_cb,
cb_data[i]);
cb_data[i]->zcd_added = B_TRUE;
VERIFY(!cb_data[i]->zcd_called);
tmp_cb = cb_data[i];
}
zc_cb_counter += 3;
(void) mutex_exit(&zcl.zcl_callbacks_lock);
dmu_tx_commit(tx);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Visit each object in the dataset. Verify that its properties
* are consistent what was stored in the block tag when it was created,
* and that its unused bonus buffer space has not been overwritten.
*/
/* ARGSUSED */
void
ztest_verify_dnode_bt(ztest_ds_t *zd, uint64_t id)
{
objset_t *os = zd->zd_os;
uint64_t obj;
int err = 0;
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
ztest_block_tag_t *bt = NULL;
dmu_object_info_t doi;
dmu_buf_t *db;
ztest_object_lock(zd, obj, RL_READER);
if (dmu_bonus_hold(os, obj, FTAG, &db) != 0) {
ztest_object_unlock(zd, obj);
continue;
}
dmu_object_info_from_db(db, &doi);
if (doi.doi_bonus_size >= sizeof (*bt))
bt = ztest_bt_bonus(db);
if (bt && bt->bt_magic == BT_MAGIC) {
ztest_bt_verify(bt, os, obj, doi.doi_dnodesize,
bt->bt_offset, bt->bt_gen, bt->bt_txg,
bt->bt_crtxg);
ztest_verify_unused_bonus(db, bt, obj, os, bt->bt_gen);
}
dmu_buf_rele(db, FTAG);
ztest_object_unlock(zd, obj);
}
}
/* ARGSUSED */
void
ztest_dsl_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
zfs_prop_t proplist[] = {
ZFS_PROP_CHECKSUM,
ZFS_PROP_COMPRESSION,
ZFS_PROP_COPIES,
ZFS_PROP_DEDUP
};
int p;
(void) rw_rdlock(&ztest_name_lock);
for (p = 0; p < sizeof (proplist) / sizeof (proplist[0]); p++)
(void) ztest_dsl_prop_set_uint64(zd->zd_name, proplist[p],
ztest_random_dsl_prop(proplist[p]), (int)ztest_random(2));
VERIFY0(ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_RECORDSIZE,
ztest_random_blocksize(), (int)ztest_random(2)));
(void) rw_unlock(&ztest_name_lock);
}
+/* ARGSUSED */
+void
+ztest_remap_blocks(ztest_ds_t *zd, uint64_t id)
+{
+ (void) rw_rdlock(&ztest_name_lock);
+
+ int error = dmu_objset_remap_indirects(zd->zd_name);
+ if (error == ENOSPC)
+ error = 0;
+ ASSERT0(error);
+
+ (void) rw_unlock(&ztest_name_lock);
+}
+
/* ARGSUSED */
void
ztest_spa_prop_get_set(ztest_ds_t *zd, uint64_t id)
{
nvlist_t *props = NULL;
(void) rw_rdlock(&ztest_name_lock);
(void) ztest_spa_prop_set_uint64(ZPOOL_PROP_DEDUPDITTO,
ZIO_DEDUPDITTO_MIN + ztest_random(ZIO_DEDUPDITTO_MIN));
VERIFY0(spa_prop_get(ztest_spa, &props));
if (ztest_opts.zo_verbose >= 6)
dump_nvlist(props, 4);
nvlist_free(props);
(void) rw_unlock(&ztest_name_lock);
}
static int
user_release_one(const char *snapname, const char *holdname)
{
nvlist_t *snaps, *holds;
int error;
snaps = fnvlist_alloc();
holds = fnvlist_alloc();
fnvlist_add_boolean(holds, holdname);
fnvlist_add_nvlist(snaps, snapname, holds);
fnvlist_free(holds);
error = dsl_dataset_user_release(snaps, NULL);
fnvlist_free(snaps);
return (error);
}
/*
* Test snapshot hold/release and deferred destroy.
*/
void
ztest_dmu_snapshot_hold(ztest_ds_t *zd, uint64_t id)
{
int error;
objset_t *os = zd->zd_os;
objset_t *origin;
char snapname[100];
char fullname[100];
char clonename[100];
char tag[100];
char osname[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *holds;
(void) rw_rdlock(&ztest_name_lock);
dmu_objset_name(os, osname);
(void) snprintf(snapname, sizeof (snapname), "sh1_%llu",
(u_longlong_t)id);
(void) snprintf(fullname, sizeof (fullname), "%s@%s", osname, snapname);
(void) snprintf(clonename, sizeof (clonename),
"%s/ch1_%llu", osname, (u_longlong_t)id);
(void) snprintf(tag, sizeof (tag), "tag_%llu", (u_longlong_t)id);
/*
* Clean up from any previous run.
*/
error = dsl_destroy_head(clonename);
if (error != ENOENT)
ASSERT0(error);
error = user_release_one(fullname, tag);
if (error != ESRCH && error != ENOENT)
ASSERT0(error);
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != ENOENT)
ASSERT0(error);
/*
* Create snapshot, clone it, mark snap for deferred destroy,
* destroy clone, verify snap was also destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
error = dmu_objset_clone(clonename, fullname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_clone");
goto out;
}
fatal(0, "dmu_objset_clone(%s) = %d", clonename, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = dsl_destroy_head(clonename);
if (error)
fatal(0, "dsl_destroy_head(%s) = %d", clonename, error);
error = dmu_objset_hold(fullname, FTAG, &origin);
if (error != ENOENT)
fatal(0, "dmu_objset_hold(%s) = %d", fullname, error);
/*
* Create snapshot, add temporary hold, verify that we can't
* destroy a held snapshot, mark for deferred destroy,
* release hold, verify snapshot was destroyed.
*/
error = dmu_objset_snapshot_one(osname, snapname);
if (error) {
if (error == ENOSPC) {
ztest_record_enospc("dmu_objset_snapshot");
goto out;
}
fatal(0, "dmu_objset_snapshot(%s) = %d", fullname, error);
}
holds = fnvlist_alloc();
fnvlist_add_string(holds, fullname, tag);
error = dsl_dataset_user_hold(holds, 0, NULL);
fnvlist_free(holds);
if (error == ENOSPC) {
ztest_record_enospc("dsl_dataset_user_hold");
goto out;
} else if (error) {
fatal(0, "dsl_dataset_user_hold(%s, %s) = %u",
fullname, tag, error);
}
error = dsl_destroy_snapshot(fullname, B_FALSE);
if (error != EBUSY) {
fatal(0, "dsl_destroy_snapshot(%s, B_FALSE) = %d",
fullname, error);
}
error = dsl_destroy_snapshot(fullname, B_TRUE);
if (error) {
fatal(0, "dsl_destroy_snapshot(%s, B_TRUE) = %d",
fullname, error);
}
error = user_release_one(fullname, tag);
if (error)
fatal(0, "user_release_one(%s, %s) = %d", fullname, tag, error);
VERIFY3U(dmu_objset_hold(fullname, FTAG, &origin), ==, ENOENT);
out:
(void) rw_unlock(&ztest_name_lock);
}
/*
* Inject random faults into the on-disk data.
*/
/* ARGSUSED */
void
ztest_fault_inject(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
int fd;
uint64_t offset;
uint64_t leaves;
uint64_t bad = 0x1990c0ffeedecadeull;
uint64_t top, leaf;
char *path0;
char *pathrand;
size_t fsize;
int bshift = SPA_MAXBLOCKSHIFT + 2;
int iters = 1000;
int maxfaults;
int mirror_save;
vdev_t *vd0 = NULL;
uint64_t guid0 = 0;
boolean_t islog = B_FALSE;
path0 = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
pathrand = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
mutex_enter(&ztest_vdev_lock);
maxfaults = MAXFAULTS(zs);
leaves = MAX(zs->zs_mirrors, 1) * ztest_opts.zo_raidz;
mirror_save = zs->zs_mirrors;
mutex_exit(&ztest_vdev_lock);
ASSERT(leaves >= 1);
/*
* Grab the name lock as reader. There are some operations
* which don't like to have their vdevs changed while
* they are in progress (i.e. spa_change_guid). Those
* operations will have grabbed the name lock as writer.
*/
(void) rw_rdlock(&ztest_name_lock);
/*
* We need SCL_STATE here because we're going to look at vd0->vdev_tsd.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (ztest_random(2) == 0) {
/*
* Inject errors on a normal data device or slog device.
*/
top = ztest_random_vdev_top(spa, B_TRUE);
leaf = ztest_random(leaves) + zs->zs_splits;
/*
* Generate paths to the first leaf in this top-level vdev,
* and to the random leaf we selected. We'll induce transient
* write failures and random online/offline activity on leaf 0,
* and we'll write random garbage to the randomly chosen leaf.
*/
(void) snprintf(path0, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + zs->zs_splits);
(void) snprintf(pathrand, MAXPATHLEN, ztest_dev_template,
ztest_opts.zo_dir, ztest_opts.zo_pool,
top * leaves + leaf);
vd0 = vdev_lookup_by_path(spa->spa_root_vdev, path0);
if (vd0 != NULL && vd0->vdev_top->vdev_islog)
islog = B_TRUE;
/*
* If the top-level vdev needs to be resilvered
* then we only allow faults on the device that is
* resilvering.
*/
if (vd0 != NULL && maxfaults != 1 &&
(!vdev_resilver_needed(vd0->vdev_top, NULL, NULL) ||
vd0->vdev_resilver_txg != 0)) {
/*
* Make vd0 explicitly claim to be unreadable,
* or unwriteable, or reach behind its back
* and close the underlying fd. We can do this if
* maxfaults == 0 because we'll fail and reexecute,
* and we can do it if maxfaults >= 2 because we'll
* have enough redundancy. If maxfaults == 1, the
* combination of this with injection of random data
* corruption below exceeds the pool's fault tolerance.
*/
vdev_file_t *vf = vd0->vdev_tsd;
+ zfs_dbgmsg("injecting fault to vdev %llu; maxfaults=%d",
+ (long long)vd0->vdev_id, (int)maxfaults);
+
if (vf != NULL && ztest_random(3) == 0) {
(void) close(vf->vf_vnode->v_fd);
vf->vf_vnode->v_fd = -1;
} else if (ztest_random(2) == 0) {
vd0->vdev_cant_read = B_TRUE;
} else {
vd0->vdev_cant_write = B_TRUE;
}
guid0 = vd0->vdev_guid;
}
} else {
/*
* Inject errors on an l2cache device.
*/
spa_aux_vdev_t *sav = &spa->spa_l2cache;
if (sav->sav_count == 0) {
spa_config_exit(spa, SCL_STATE, FTAG);
(void) rw_unlock(&ztest_name_lock);
goto out;
}
vd0 = sav->sav_vdevs[ztest_random(sav->sav_count)];
guid0 = vd0->vdev_guid;
(void) strcpy(path0, vd0->vdev_path);
(void) strcpy(pathrand, vd0->vdev_path);
leaf = 0;
leaves = 1;
maxfaults = INT_MAX; /* no limit on cache devices */
}
spa_config_exit(spa, SCL_STATE, FTAG);
(void) rw_unlock(&ztest_name_lock);
/*
* If we can tolerate two or more faults, or we're dealing
* with a slog, randomly online/offline vd0.
*/
if ((maxfaults >= 2 || islog) && guid0 != 0) {
if (ztest_random(10) < 6) {
int flags = (ztest_random(2) == 0 ?
ZFS_OFFLINE_TEMPORARY : 0);
/*
* We have to grab the zs_name_lock as writer to
* prevent a race between offlining a slog and
* destroying a dataset. Offlining the slog will
* grab a reference on the dataset which may cause
* dsl_destroy_head() to fail with EBUSY thus
* leaving the dataset in an inconsistent state.
*/
if (islog)
(void) rw_wrlock(&ztest_name_lock);
VERIFY(vdev_offline(spa, guid0, flags) != EBUSY);
if (islog)
(void) rw_unlock(&ztest_name_lock);
} else {
/*
* Ideally we would like to be able to randomly
* call vdev_[on|off]line without holding locks
* to force unpredictable failures but the side
* effects of vdev_[on|off]line prevent us from
* doing so. We grab the ztest_vdev_lock here to
* prevent a race between injection testing and
* aux_vdev removal.
*/
mutex_enter(&ztest_vdev_lock);
(void) vdev_online(spa, guid0, 0, NULL);
mutex_exit(&ztest_vdev_lock);
}
}
if (maxfaults == 0)
goto out;
/*
* We have at least single-fault tolerance, so inject data corruption.
*/
fd = open(pathrand, O_RDWR);
if (fd == -1) /* we hit a gap in the device namespace */
goto out;
fsize = lseek(fd, 0, SEEK_END);
while (--iters != 0) {
/*
* The offset must be chosen carefully to ensure that
* we do not inject a given logical block with errors
* on two different leaf devices, because ZFS can not
* tolerate that (if maxfaults==1).
*
* We divide each leaf into chunks of size
* (# leaves * SPA_MAXBLOCKSIZE * 4). Within each chunk
* there is a series of ranges to which we can inject errors.
* Each range can accept errors on only a single leaf vdev.
* The error injection ranges are separated by ranges
* which we will not inject errors on any device (DMZs).
* Each DMZ must be large enough such that a single block
* can not straddle it, so that a single block can not be
* a target in two different injection ranges (on different
* leaf vdevs).
*
* For example, with 3 leaves, each chunk looks like:
* 0 to 32M: injection range for leaf 0
* 32M to 64M: DMZ - no injection allowed
* 64M to 96M: injection range for leaf 1
* 96M to 128M: DMZ - no injection allowed
* 128M to 160M: injection range for leaf 2
* 160M to 192M: DMZ - no injection allowed
*/
offset = ztest_random(fsize / (leaves << bshift)) *
(leaves << bshift) + (leaf << bshift) +
(ztest_random(1ULL << (bshift - 1)) & -8ULL);
/*
* Only allow damage to the labels at one end of the vdev.
*
* If all labels are damaged, the device will be totally
* inaccessible, which will result in loss of data,
* because we also damage (parts of) the other side of
* the mirror/raidz.
*
* Additionally, we will always have both an even and an
* odd label, so that we can handle crashes in the
* middle of vdev_config_sync().
*/
if ((leaf & 1) == 0 && offset < VDEV_LABEL_START_SIZE)
continue;
/*
* The two end labels are stored at the "end" of the disk, but
* the end of the disk (vdev_psize) is aligned to
* sizeof (vdev_label_t).
*/
uint64_t psize = P2ALIGN(fsize, sizeof (vdev_label_t));
if ((leaf & 1) == 1 &&
offset + sizeof (bad) > psize - VDEV_LABEL_END_SIZE)
continue;
mutex_enter(&ztest_vdev_lock);
if (mirror_save != zs->zs_mirrors) {
mutex_exit(&ztest_vdev_lock);
(void) close(fd);
goto out;
}
if (pwrite(fd, &bad, sizeof (bad), offset) != sizeof (bad))
fatal(1, "can't inject bad word at 0x%llx in %s",
offset, pathrand);
mutex_exit(&ztest_vdev_lock);
if (ztest_opts.zo_verbose >= 7)
(void) printf("injected bad word into %s,"
" offset 0x%llx\n", pathrand, (u_longlong_t)offset);
}
(void) close(fd);
out:
umem_free(path0, MAXPATHLEN);
umem_free(pathrand, MAXPATHLEN);
}
/*
* Verify that DDT repair works as expected.
*/
void
ztest_ddt_repair(ztest_ds_t *zd, uint64_t id)
{
ztest_shared_t *zs = ztest_shared;
spa_t *spa = ztest_spa;
objset_t *os = zd->zd_os;
ztest_od_t *od;
uint64_t object, blocksize, txg, pattern, psize;
enum zio_checksum checksum = spa_dedup_checksum(spa);
dmu_buf_t *db;
dmu_tx_t *tx;
abd_t *abd;
blkptr_t blk;
int copies = 2 * ZIO_DEDUPDITTO_MIN;
int i;
blocksize = ztest_random_blocksize();
blocksize = MIN(blocksize, 2048); /* because we write so many */
od = umem_alloc(sizeof (ztest_od_t), UMEM_NOFAIL);
ztest_od_init(od, id, FTAG, 0, DMU_OT_UINT64_OTHER, blocksize, 0, 0);
if (ztest_object_init(zd, od, sizeof (ztest_od_t), B_FALSE) != 0) {
umem_free(od, sizeof (ztest_od_t));
return;
}
/*
* Take the name lock as writer to prevent anyone else from changing
* the pool and dataset properies we need to maintain during this test.
*/
(void) rw_wrlock(&ztest_name_lock);
if (ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_DEDUP, checksum,
B_FALSE) != 0 ||
ztest_dsl_prop_set_uint64(zd->zd_name, ZFS_PROP_COPIES, 1,
B_FALSE) != 0) {
(void) rw_unlock(&ztest_name_lock);
umem_free(od, sizeof (ztest_od_t));
return;
}
dmu_objset_stats_t dds;
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
object = od[0].od_object;
blocksize = od[0].od_blocksize;
pattern = zs->zs_guid ^ dds.dds_guid;
ASSERT(object != 0);
tx = dmu_tx_create(os);
dmu_tx_hold_write(tx, object, 0, copies * blocksize);
txg = ztest_tx_assign(tx, TXG_WAIT, FTAG);
if (txg == 0) {
(void) rw_unlock(&ztest_name_lock);
umem_free(od, sizeof (ztest_od_t));
return;
}
/*
* Write all the copies of our block.
*/
for (i = 0; i < copies; i++) {
uint64_t offset = i * blocksize;
int error = dmu_buf_hold(os, object, offset, FTAG, &db,
DMU_READ_NO_PREFETCH);
if (error != 0) {
fatal(B_FALSE, "dmu_buf_hold(%p, %llu, %llu) = %u",
os, (long long)object, (long long) offset, error);
}
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == blocksize);
ASSERT(ztest_pattern_match(db->db_data, db->db_size, pattern) ||
ztest_pattern_match(db->db_data, db->db_size, 0ULL));
dmu_buf_will_fill(db, tx);
ztest_pattern_set(db->db_data, db->db_size, pattern);
dmu_buf_rele(db, FTAG);
}
dmu_tx_commit(tx);
txg_wait_synced(spa_get_dsl(spa), txg);
/*
* Find out what block we got.
*/
VERIFY0(dmu_buf_hold(os, object, 0, FTAG, &db,
DMU_READ_NO_PREFETCH));
blk = *((dmu_buf_impl_t *)db)->db_blkptr;
dmu_buf_rele(db, FTAG);
/*
* Damage the block. Dedup-ditto will save us when we read it later.
*/
psize = BP_GET_PSIZE(&blk);
abd = abd_alloc_linear(psize, B_TRUE);
ztest_pattern_set(abd_to_buf(abd), psize, ~pattern);
(void) zio_wait(zio_rewrite(NULL, spa, 0, &blk,
abd, psize, NULL, NULL, ZIO_PRIORITY_SYNC_WRITE,
ZIO_FLAG_CANFAIL | ZIO_FLAG_INDUCE_DAMAGE, NULL));
abd_free(abd);
(void) rw_unlock(&ztest_name_lock);
umem_free(od, sizeof (ztest_od_t));
}
/*
* Scrub the pool.
*/
/* ARGSUSED */
void
ztest_scrub(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
(void) spa_scan(spa, POOL_SCAN_SCRUB);
(void) poll(NULL, 0, 100); /* wait a moment, then force a restart */
(void) spa_scan(spa, POOL_SCAN_SCRUB);
}
/*
* Change the guid for the pool.
*/
/* ARGSUSED */
void
ztest_reguid(ztest_ds_t *zd, uint64_t id)
{
spa_t *spa = ztest_spa;
uint64_t orig, load;
int error;
if (ztest_opts.zo_mmp_test)
return;
orig = spa_guid(spa);
load = spa_load_guid(spa);
(void) rw_wrlock(&ztest_name_lock);
error = spa_change_guid(spa);
(void) rw_unlock(&ztest_name_lock);
if (error != 0)
return;
if (ztest_opts.zo_verbose >= 4) {
(void) printf("Changed guid old %llu -> %llu\n",
(u_longlong_t)orig, (u_longlong_t)spa_guid(spa));
}
VERIFY3U(orig, !=, spa_guid(spa));
VERIFY3U(load, ==, spa_load_guid(spa));
}
/*
* Rename the pool to a different name and then rename it back.
*/
/* ARGSUSED */
void
ztest_spa_rename(ztest_ds_t *zd, uint64_t id)
{
char *oldname, *newname;
spa_t *spa;
if (ztest_opts.zo_mmp_test)
return;
(void) rw_wrlock(&ztest_name_lock);
oldname = ztest_opts.zo_pool;
newname = umem_alloc(strlen(oldname) + 5, UMEM_NOFAIL);
(void) strcpy(newname, oldname);
(void) strcat(newname, "_tmp");
/*
* Do the rename
*/
VERIFY3U(0, ==, spa_rename(oldname, newname));
/*
* Try to open it under the old name, which shouldn't exist
*/
VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
/*
* Open it under the new name and make sure it's still the same spa_t.
*/
VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
ASSERT(spa == ztest_spa);
spa_close(spa, FTAG);
/*
* Rename it back to the original
*/
VERIFY3U(0, ==, spa_rename(newname, oldname));
/*
* Make sure it can still be opened
*/
VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
ASSERT(spa == ztest_spa);
spa_close(spa, FTAG);
umem_free(newname, strlen(newname) + 1);
(void) rw_unlock(&ztest_name_lock);
}
void
ztest_fletcher(ztest_ds_t *zd, uint64_t id)
{
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
void *buf;
struct abd *abd_data, *abd_meta;
uint32_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_byteswap;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
abd_data = abd_alloc(size, B_FALSE);
abd_meta = abd_alloc(size, B_TRUE);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
abd_copy_from_buf_off(abd_data, buf, 0, size);
abd_copy_from_buf_off(abd_meta, buf, 0, size);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_byteswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_byteswap;
fletcher_4_byteswap(buf, size, NULL, &zc_byteswap);
fletcher_4_native(buf, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - data */
abd_fletcher_4_byteswap(abd_data, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_data, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
/* Test ABD - metadata */
abd_fletcher_4_byteswap(abd_meta, size, NULL,
&zc_byteswap);
abd_fletcher_4_native(abd_meta, size, NULL, &zc);
VERIFY0(bcmp(&zc, &zc_ref, sizeof (zc)));
VERIFY0(bcmp(&zc_byteswap, &zc_ref_byteswap,
sizeof (zc_byteswap)));
}
umem_free(buf, size);
abd_free(abd_data);
abd_free(abd_meta);
}
}
void
ztest_fletcher_incr(ztest_ds_t *zd, uint64_t id)
{
void *buf;
size_t size;
int *ptr;
int i;
zio_cksum_t zc_ref;
zio_cksum_t zc_ref_bswap;
hrtime_t end = gethrtime() + NANOSEC;
while (gethrtime() <= end) {
int run_count = 100;
size = ztest_random_blocksize();
buf = umem_alloc(size, UMEM_NOFAIL);
for (i = 0, ptr = buf; i < size / sizeof (*ptr); i++, ptr++)
*ptr = ztest_random(UINT_MAX);
VERIFY0(fletcher_4_impl_set("scalar"));
fletcher_4_native(buf, size, NULL, &zc_ref);
fletcher_4_byteswap(buf, size, NULL, &zc_ref_bswap);
VERIFY0(fletcher_4_impl_set("cycle"));
while (run_count-- > 0) {
zio_cksum_t zc;
zio_cksum_t zc_bswap;
size_t pos = 0;
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
while (pos < size) {
size_t inc = 64 * ztest_random(size / 67);
/* sometimes add few bytes to test non-simd */
if (ztest_random(100) < 10)
inc += P2ALIGN(ztest_random(64),
sizeof (uint32_t));
if (inc > (size - pos))
inc = size - pos;
fletcher_4_incremental_native(buf + pos, inc,
&zc);
fletcher_4_incremental_byteswap(buf + pos, inc,
&zc_bswap);
pos += inc;
}
VERIFY3U(pos, ==, size);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
/*
* verify if incremental on the whole buffer is
* equivalent to non-incremental version
*/
ZIO_SET_CHECKSUM(&zc, 0, 0, 0, 0);
ZIO_SET_CHECKSUM(&zc_bswap, 0, 0, 0, 0);
fletcher_4_incremental_native(buf, size, &zc);
fletcher_4_incremental_byteswap(buf, size, &zc_bswap);
VERIFY(ZIO_CHECKSUM_EQUAL(zc, zc_ref));
VERIFY(ZIO_CHECKSUM_EQUAL(zc_bswap, zc_ref_bswap));
}
umem_free(buf, size);
}
}
static int
ztest_check_path(char *path)
{
struct stat s;
/* return true on success */
return (!stat(path, &s));
}
static void
ztest_get_zdb_bin(char *bin, int len)
{
char *zdb_path;
/*
* Try to use ZDB_PATH and in-tree zdb path. If not successful, just
* let popen to search through PATH.
*/
if ((zdb_path = getenv("ZDB_PATH"))) {
strlcpy(bin, zdb_path, len); /* In env */
if (!ztest_check_path(bin)) {
ztest_dump_core = 0;
fatal(1, "invalid ZDB_PATH '%s'", bin);
}
return;
}
VERIFY(realpath(getexecname(), bin) != NULL);
if (strstr(bin, "/ztest/")) {
strstr(bin, "/ztest/")[0] = '\0'; /* In-tree */
strcat(bin, "/zdb/zdb");
if (ztest_check_path(bin))
return;
}
strcpy(bin, "zdb");
}
/*
* Verify pool integrity by running zdb.
*/
static void
ztest_run_zdb(char *pool)
{
int status;
char *bin;
char *zdb;
char *zbuf;
const int len = MAXPATHLEN + MAXNAMELEN + 20;
FILE *fp;
bin = umem_alloc(len, UMEM_NOFAIL);
zdb = umem_alloc(len, UMEM_NOFAIL);
zbuf = umem_alloc(1024, UMEM_NOFAIL);
ztest_get_zdb_bin(bin, len);
(void) sprintf(zdb,
"%s -bcc%s%s -G -d -U %s %s",
bin,
ztest_opts.zo_verbose >= 3 ? "s" : "",
ztest_opts.zo_verbose >= 4 ? "v" : "",
spa_config_path,
pool);
if (ztest_opts.zo_verbose >= 5)
(void) printf("Executing %s\n", strstr(zdb, "zdb "));
fp = popen(zdb, "r");
while (fgets(zbuf, 1024, fp) != NULL)
if (ztest_opts.zo_verbose >= 3)
(void) printf("%s", zbuf);
status = pclose(fp);
if (status == 0)
goto out;
ztest_dump_core = 0;
if (WIFEXITED(status))
fatal(0, "'%s' exit code %d", zdb, WEXITSTATUS(status));
else
fatal(0, "'%s' died with signal %d", zdb, WTERMSIG(status));
out:
umem_free(bin, len);
umem_free(zdb, len);
umem_free(zbuf, 1024);
}
static void
ztest_walk_pool_directory(char *header)
{
spa_t *spa = NULL;
if (ztest_opts.zo_verbose >= 6)
(void) printf("%s\n", header);
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
if (ztest_opts.zo_verbose >= 6)
(void) printf("\t%s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
}
static void
ztest_spa_import_export(char *oldname, char *newname)
{
nvlist_t *config, *newconfig;
uint64_t pool_guid;
spa_t *spa;
int error;
if (ztest_opts.zo_verbose >= 4) {
(void) printf("import/export: old = %s, new = %s\n",
oldname, newname);
}
/*
* Clean up from previous runs.
*/
(void) spa_destroy(newname);
/*
* Get the pool's configuration and guid.
*/
VERIFY3U(0, ==, spa_open(oldname, &spa, FTAG));
/*
* Kick off a scrub to tickle scrub/export races.
*/
if (ztest_random(2) == 0)
(void) spa_scan(spa, POOL_SCAN_SCRUB);
pool_guid = spa_guid(spa);
spa_close(spa, FTAG);
ztest_walk_pool_directory("pools before export");
/*
* Export it.
*/
VERIFY3U(0, ==, spa_export(oldname, &config, B_FALSE, B_FALSE));
ztest_walk_pool_directory("pools after export");
/*
* Try to import it.
*/
newconfig = spa_tryimport(config);
ASSERT(newconfig != NULL);
nvlist_free(newconfig);
/*
* Import it under the new name.
*/
error = spa_import(newname, config, NULL, 0);
if (error != 0) {
dump_nvlist(config, 0);
fatal(B_FALSE, "couldn't import pool %s as %s: error %u",
oldname, newname, error);
}
ztest_walk_pool_directory("pools after import");
/*
* Try to import it again -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(newname, config, NULL, 0));
/*
* Try to import it under a different name -- should fail with EEXIST.
*/
VERIFY3U(EEXIST, ==, spa_import(oldname, config, NULL, 0));
/*
* Verify that the pool is no longer visible under the old name.
*/
VERIFY3U(ENOENT, ==, spa_open(oldname, &spa, FTAG));
/*
* Verify that we can open and close the pool using the new name.
*/
VERIFY3U(0, ==, spa_open(newname, &spa, FTAG));
ASSERT(pool_guid == spa_guid(spa));
spa_close(spa, FTAG);
nvlist_free(config);
}
static void
ztest_resume(spa_t *spa)
{
if (spa_suspended(spa) && ztest_opts.zo_verbose >= 6)
(void) printf("resuming from suspended state\n");
spa_vdev_state_enter(spa, SCL_NONE);
vdev_clear(spa, NULL);
(void) spa_vdev_state_exit(spa, NULL, 0);
(void) zio_resume(spa);
}
static void
ztest_resume_thread(void *arg)
{
spa_t *spa = arg;
while (!ztest_exiting) {
if (spa_suspended(spa))
ztest_resume(spa);
(void) poll(NULL, 0, 100);
/*
* Periodically change the zfs_compressed_arc_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_compressed_arc_enabled = ztest_random(2);
/*
* Periodically change the zfs_abd_scatter_enabled setting.
*/
if (ztest_random(10) == 0)
zfs_abd_scatter_enabled = ztest_random(2);
}
thread_exit();
}
static void
ztest_deadman_thread(void *arg)
{
ztest_shared_t *zs = arg;
spa_t *spa = ztest_spa;
hrtime_t delta, overdue, total = 0;
for (;;) {
delta = zs->zs_thread_stop - zs->zs_thread_start +
MSEC2NSEC(zfs_deadman_synctime_ms);
(void) poll(NULL, 0, (int)NSEC2MSEC(delta));
/*
* If the pool is suspended then fail immediately. Otherwise,
* check to see if the pool is making any progress. If
* vdev_deadman() discovers that there hasn't been any recent
* I/Os then it will end up aborting the tests.
*/
if (spa_suspended(spa) || spa->spa_root_vdev == NULL) {
fatal(0, "aborting test after %llu seconds because "
"pool has transitioned to a suspended state.",
zfs_deadman_synctime_ms / 1000);
}
vdev_deadman(spa->spa_root_vdev, FTAG);
/*
* If the process doesn't complete within a grace period of
* zfs_deadman_synctime_ms over the expected finish time,
* then it may be hung and is terminated.
*/
overdue = zs->zs_proc_stop + MSEC2NSEC(zfs_deadman_synctime_ms);
total += zfs_deadman_synctime_ms / 1000;
if (gethrtime() > overdue) {
fatal(0, "aborting test after %llu seconds because "
"the process is overdue for termination.", total);
}
(void) printf("ztest has been running for %lld seconds\n",
total);
}
}
static void
ztest_execute(int test, ztest_info_t *zi, uint64_t id)
{
ztest_ds_t *zd = &ztest_ds[id % ztest_opts.zo_datasets];
ztest_shared_callstate_t *zc = ZTEST_GET_SHARED_CALLSTATE(test);
hrtime_t functime = gethrtime();
int i;
for (i = 0; i < zi->zi_iters; i++)
zi->zi_func(zd, id);
functime = gethrtime() - functime;
atomic_add_64(&zc->zc_count, 1);
atomic_add_64(&zc->zc_time, functime);
if (ztest_opts.zo_verbose >= 4)
(void) printf("%6.2f sec in %s\n",
(double)functime / NANOSEC, zi->zi_funcname);
}
static void
ztest_thread(void *arg)
{
int rand;
uint64_t id = (uintptr_t)arg;
ztest_shared_t *zs = ztest_shared;
uint64_t call_next;
hrtime_t now;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
while ((now = gethrtime()) < zs->zs_thread_stop) {
/*
* See if it's time to force a crash.
*/
if (now > zs->zs_thread_kill)
ztest_kill(zs);
/*
* If we're getting ENOSPC with some regularity, stop.
*/
if (zs->zs_enospc_count > 10)
break;
/*
* Pick a random function to execute.
*/
rand = ztest_random(ZTEST_FUNCS);
zi = &ztest_info[rand];
zc = ZTEST_GET_SHARED_CALLSTATE(rand);
call_next = zc->zc_next;
if (now >= call_next &&
atomic_cas_64(&zc->zc_next, call_next, call_next +
ztest_random(2 * zi->zi_interval[0] + 1)) == call_next) {
ztest_execute(rand, zi, id);
}
}
thread_exit();
}
static void
ztest_dataset_name(char *dsname, char *pool, int d)
{
(void) snprintf(dsname, ZFS_MAX_DATASET_NAME_LEN, "%s/ds_%d", pool, d);
}
static void
ztest_dataset_destroy(int d)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
int t;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
if (ztest_opts.zo_verbose >= 3)
(void) printf("Destroying %s to free up space\n", name);
/*
* Cleanup any non-standard clones and snapshots. In general,
* ztest thread t operates on dataset (t % zopt_datasets),
* so there may be more than one thing to clean up.
*/
for (t = d; t < ztest_opts.zo_threads;
t += ztest_opts.zo_datasets)
ztest_dsl_dataset_cleanup(name, t);
(void) dmu_objset_find(name, ztest_objset_destroy_cb, NULL,
DS_FIND_SNAPSHOTS | DS_FIND_CHILDREN);
}
static void
ztest_dataset_dirobj_verify(ztest_ds_t *zd)
{
uint64_t usedobjs, dirobjs, scratch;
/*
* ZTEST_DIROBJ is the object directory for the entire dataset.
* Therefore, the number of objects in use should equal the
* number of ZTEST_DIROBJ entries, +1 for ZTEST_DIROBJ itself.
* If not, we have an object leak.
*
* Note that we can only check this in ztest_dataset_open(),
* when the open-context and syncing-context values agree.
* That's because zap_count() returns the open-context value,
* while dmu_objset_space() returns the rootbp fill count.
*/
VERIFY3U(0, ==, zap_count(zd->zd_os, ZTEST_DIROBJ, &dirobjs));
dmu_objset_space(zd->zd_os, &scratch, &scratch, &usedobjs, &scratch);
ASSERT3U(dirobjs + 1, ==, usedobjs);
}
static int
ztest_dataset_open(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
uint64_t committed_seq = ZTEST_GET_SHARED_DS(d)->zd_seq;
objset_t *os;
zilog_t *zilog;
char name[ZFS_MAX_DATASET_NAME_LEN];
int error;
ztest_dataset_name(name, ztest_opts.zo_pool, d);
(void) rw_rdlock(&ztest_name_lock);
error = ztest_dataset_create(name);
if (error == ENOSPC) {
(void) rw_unlock(&ztest_name_lock);
ztest_record_enospc(FTAG);
return (error);
}
ASSERT(error == 0 || error == EEXIST);
VERIFY0(ztest_dmu_objset_own(name, DMU_OST_OTHER, B_FALSE,
B_TRUE, zd, &os));
(void) rw_unlock(&ztest_name_lock);
ztest_zd_init(zd, ZTEST_GET_SHARED_DS(d), os);
zilog = zd->zd_zilog;
if (zilog->zl_header->zh_claim_lr_seq != 0 &&
zilog->zl_header->zh_claim_lr_seq < committed_seq)
fatal(0, "missing log records: claimed %llu < committed %llu",
zilog->zl_header->zh_claim_lr_seq, committed_seq);
ztest_dataset_dirobj_verify(zd);
zil_replay(os, zd, ztest_replay_vector);
ztest_dataset_dirobj_verify(zd);
if (ztest_opts.zo_verbose >= 6)
(void) printf("%s replay %llu blocks, %llu records, seq %llu\n",
zd->zd_name,
(u_longlong_t)zilog->zl_parse_blk_count,
(u_longlong_t)zilog->zl_parse_lr_count,
(u_longlong_t)zilog->zl_replaying_seq);
zilog = zil_open(os, ztest_get_data);
if (zilog->zl_replaying_seq != 0 &&
zilog->zl_replaying_seq < committed_seq)
fatal(0, "missing log records: replayed %llu < committed %llu",
zilog->zl_replaying_seq, committed_seq);
return (0);
}
static void
ztest_dataset_close(int d)
{
ztest_ds_t *zd = &ztest_ds[d];
zil_close(zd->zd_zilog);
txg_wait_synced(spa_get_dsl(zd->zd_os->os_spa), 0);
dmu_objset_disown(zd->zd_os, B_TRUE, zd);
ztest_zd_fini(zd);
}
/*
* Kick off threads to run tests on all datasets in parallel.
*/
static void
ztest_run(ztest_shared_t *zs)
{
spa_t *spa;
objset_t *os;
kthread_t *resume_thread;
kthread_t **run_threads;
uint64_t object;
int error;
int t, d;
ztest_exiting = B_FALSE;
/*
* Initialize parent/child shared state.
*/
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
zs->zs_thread_start = gethrtime();
zs->zs_thread_stop =
zs->zs_thread_start + ztest_opts.zo_passtime * NANOSEC;
zs->zs_thread_stop = MIN(zs->zs_thread_stop, zs->zs_proc_stop);
zs->zs_thread_kill = zs->zs_thread_stop;
if (ztest_random(100) < ztest_opts.zo_killrate) {
zs->zs_thread_kill -=
ztest_random(ztest_opts.zo_passtime * NANOSEC);
}
mutex_init(&zcl.zcl_callbacks_lock, NULL, MUTEX_DEFAULT, NULL);
list_create(&zcl.zcl_callbacks, sizeof (ztest_cb_data_t),
offsetof(ztest_cb_data_t, zcd_node));
/*
* Open our pool.
*/
kernel_init(FREAD | FWRITE);
VERIFY0(spa_open(ztest_opts.zo_pool, &spa, FTAG));
spa->spa_debug = B_TRUE;
metaslab_preload_limit = ztest_random(20) + 1;
ztest_spa = spa;
dmu_objset_stats_t dds;
VERIFY0(ztest_dmu_objset_own(ztest_opts.zo_pool,
DMU_OST_ANY, B_TRUE, B_TRUE, FTAG, &os));
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
dmu_objset_fast_stat(os, &dds);
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
zs->zs_guid = dds.dds_guid;
dmu_objset_disown(os, B_TRUE, FTAG);
spa->spa_dedup_ditto = 2 * ZIO_DEDUPDITTO_MIN;
/*
* Create a thread to periodically resume suspended I/O.
*/
resume_thread = thread_create(NULL, 0, ztest_resume_thread,
spa, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
/*
* Create a deadman thread and set to panic if we hang.
*/
(void) thread_create(NULL, 0, ztest_deadman_thread,
zs, 0, NULL, TS_RUN | TS_JOINABLE, defclsyspri);
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
/*
* Verify that we can safely inquire about about any object,
* whether it's allocated or not. To make it interesting,
* we probe a 5-wide window around each power of two.
* This hits all edge cases, including zero and the max.
*/
for (t = 0; t < 64; t++) {
for (d = -5; d <= 5; d++) {
error = dmu_object_info(spa->spa_meta_objset,
(1ULL << t) + d, NULL);
ASSERT(error == 0 || error == ENOENT ||
error == EINVAL);
}
}
/*
* If we got any ENOSPC errors on the previous run, destroy something.
*/
if (zs->zs_enospc_count != 0) {
int d = ztest_random(ztest_opts.zo_datasets);
ztest_dataset_destroy(d);
}
zs->zs_enospc_count = 0;
run_threads = umem_zalloc(ztest_opts.zo_threads * sizeof (kthread_t *),
UMEM_NOFAIL);
if (ztest_opts.zo_verbose >= 4)
(void) printf("starting main threads...\n");
/*
* Kick off all the tests that run in parallel.
*/
for (t = 0; t < ztest_opts.zo_threads; t++) {
if (t < ztest_opts.zo_datasets && ztest_dataset_open(t) != 0) {
umem_free(run_threads, ztest_opts.zo_threads *
sizeof (kthread_t *));
return;
}
run_threads[t] = thread_create(NULL, 0, ztest_thread,
(void *)(uintptr_t)t, 0, NULL, TS_RUN | TS_JOINABLE,
defclsyspri);
}
/*
* Wait for all of the tests to complete. We go in reverse order
* so we don't close datasets while threads are still using them.
*/
for (t = ztest_opts.zo_threads - 1; t >= 0; t--) {
VERIFY0(thread_join(run_threads[t]));
if (t < ztest_opts.zo_datasets)
ztest_dataset_close(t);
}
txg_wait_synced(spa_get_dsl(spa), 0);
zs->zs_alloc = metaslab_class_get_alloc(spa_normal_class(spa));
zs->zs_space = metaslab_class_get_space(spa_normal_class(spa));
umem_free(run_threads, ztest_opts.zo_threads * sizeof (kthread_t *));
/* Kill the resume thread */
ztest_exiting = B_TRUE;
VERIFY0(thread_join(resume_thread));
ztest_resume(spa);
/*
* Right before closing the pool, kick off a bunch of async I/O;
* spa_close() should wait for it to complete.
*/
for (object = 1; object < 50; object++) {
dmu_prefetch(spa->spa_meta_objset, object, 0, 0, 1ULL << 20,
ZIO_PRIORITY_SYNC_READ);
}
/* Verify that at least one commit cb was called in a timely fashion */
if (zc_cb_counter >= ZTEST_COMMIT_CB_MIN_REG)
VERIFY0(zc_min_txg_delay);
spa_close(spa, FTAG);
/*
* Verify that we can loop over all pools.
*/
mutex_enter(&spa_namespace_lock);
for (spa = spa_next(NULL); spa != NULL; spa = spa_next(spa))
if (ztest_opts.zo_verbose > 3)
(void) printf("spa_next: found %s\n", spa_name(spa));
mutex_exit(&spa_namespace_lock);
/*
* Verify that we can export the pool and reimport it under a
* different name.
*/
if ((ztest_random(2) == 0) && !ztest_opts.zo_mmp_test) {
char name[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(name, sizeof (name), "%s_import",
ztest_opts.zo_pool);
ztest_spa_import_export(ztest_opts.zo_pool, name);
ztest_spa_import_export(name, ztest_opts.zo_pool);
}
kernel_fini();
list_destroy(&zcl.zcl_callbacks);
mutex_destroy(&zcl.zcl_callbacks_lock);
(void) rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
}
static void
ztest_freeze(void)
{
ztest_ds_t *zd = &ztest_ds[0];
spa_t *spa;
int numloops = 0;
if (ztest_opts.zo_verbose >= 3)
(void) printf("testing spa_freeze()...\n");
kernel_init(FREAD | FWRITE);
VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
VERIFY3U(0, ==, ztest_dataset_open(0));
spa->spa_debug = B_TRUE;
ztest_spa = spa;
/*
* Force the first log block to be transactionally allocated.
* We have to do this before we freeze the pool -- otherwise
* the log chain won't be anchored.
*/
while (BP_IS_HOLE(&zd->zd_zilog->zl_header->zh_log)) {
ztest_dmu_object_alloc_free(zd, 0);
zil_commit(zd->zd_zilog, 0);
}
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Freeze the pool. This stops spa_sync() from doing anything,
* so that the only way to record changes from now on is the ZIL.
*/
spa_freeze(spa);
/*
* Because it is hard to predict how much space a write will actually
* require beforehand, we leave ourselves some fudge space to write over
* capacity.
*/
uint64_t capacity = metaslab_class_get_space(spa_normal_class(spa)) / 2;
/*
* Run tests that generate log records but don't alter the pool config
* or depend on DSL sync tasks (snapshots, objset create/destroy, etc).
* We do a txg_wait_synced() after each iteration to force the txg
* to increase well beyond the last synced value in the uberblock.
* The ZIL should be OK with that.
*
* Run a random number of times less than zo_maxloops and ensure we do
* not run out of space on the pool.
*/
while (ztest_random(10) != 0 &&
numloops++ < ztest_opts.zo_maxloops &&
metaslab_class_get_alloc(spa_normal_class(spa)) < capacity) {
ztest_od_t od;
ztest_od_init(&od, 0, FTAG, 0, DMU_OT_UINT64_OTHER, 0, 0, 0);
VERIFY0(ztest_object_init(zd, &od, sizeof (od), B_FALSE));
ztest_io(zd, od.od_object,
ztest_random(ZTEST_RANGE_LOCKS) << SPA_MAXBLOCKSHIFT);
txg_wait_synced(spa_get_dsl(spa), 0);
}
/*
* Commit all of the changes we just generated.
*/
zil_commit(zd->zd_zilog, 0);
txg_wait_synced(spa_get_dsl(spa), 0);
/*
* Close our dataset and close the pool.
*/
ztest_dataset_close(0);
spa_close(spa, FTAG);
kernel_fini();
/*
* Open and close the pool and dataset to induce log replay.
*/
kernel_init(FREAD | FWRITE);
VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
ASSERT(spa_freeze_txg(spa) == UINT64_MAX);
VERIFY3U(0, ==, ztest_dataset_open(0));
spa->spa_debug = B_TRUE;
ztest_spa = spa;
txg_wait_synced(spa_get_dsl(spa), 0);
ztest_dataset_close(0);
ztest_reguid(NULL, 0);
spa_close(spa, FTAG);
kernel_fini();
}
void
print_time(hrtime_t t, char *timebuf)
{
hrtime_t s = t / NANOSEC;
hrtime_t m = s / 60;
hrtime_t h = m / 60;
hrtime_t d = h / 24;
s -= m * 60;
m -= h * 60;
h -= d * 24;
timebuf[0] = '\0';
if (d)
(void) sprintf(timebuf,
"%llud%02lluh%02llum%02llus", d, h, m, s);
else if (h)
(void) sprintf(timebuf, "%lluh%02llum%02llus", h, m, s);
else if (m)
(void) sprintf(timebuf, "%llum%02llus", m, s);
else
(void) sprintf(timebuf, "%llus", s);
}
static nvlist_t *
make_random_props(void)
{
nvlist_t *props;
VERIFY0(nvlist_alloc(&props, NV_UNIQUE_NAME, 0));
if (ztest_random(2) == 0)
return (props);
VERIFY0(nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_AUTOREPLACE), 1));
return (props);
}
/*
* Import a storage pool with the given name.
*/
static void
ztest_import(ztest_shared_t *zs)
{
libzfs_handle_t *hdl;
importargs_t args = { 0 };
spa_t *spa;
nvlist_t *cfg = NULL;
int nsearch = 1;
char *searchdirs[nsearch];
char *name = ztest_opts.zo_pool;
int flags = ZFS_IMPORT_MISSING_LOG;
int error;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
kernel_init(FREAD | FWRITE);
hdl = libzfs_init();
searchdirs[0] = ztest_opts.zo_dir;
args.paths = nsearch;
args.path = searchdirs;
args.can_be_active = B_FALSE;
error = zpool_tryimport(hdl, name, &cfg, &args);
if (error)
(void) fatal(0, "No pools found\n");
VERIFY0(spa_import(name, cfg, NULL, flags));
VERIFY0(spa_open(name, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
libzfs_fini(hdl);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
}
/*
* Create a storage pool with the given name and initial vdev size.
* Then test spa_freeze() functionality.
*/
static void
ztest_init(ztest_shared_t *zs)
{
spa_t *spa;
nvlist_t *nvroot, *props;
int i;
mutex_init(&ztest_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
VERIFY(rwlock_init(&ztest_name_lock, USYNC_THREAD, NULL) == 0);
kernel_init(FREAD | FWRITE);
/*
* Create the storage pool.
*/
(void) spa_destroy(ztest_opts.zo_pool);
ztest_shared->zs_vdev_next_leaf = 0;
zs->zs_splits = 0;
zs->zs_mirrors = ztest_opts.zo_mirrors;
nvroot = make_vdev_root(NULL, NULL, NULL, ztest_opts.zo_vdev_size, 0,
0, ztest_opts.zo_raidz, zs->zs_mirrors, 1);
props = make_random_props();
/*
* We don't expect the pool to suspend unless maxfaults == 0,
* in which case ztest_fault_inject() temporarily takes away
* the only valid replica.
*/
VERIFY0(nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_FAILUREMODE),
MAXFAULTS(zs) ? ZIO_FAILURE_MODE_PANIC : ZIO_FAILURE_MODE_WAIT));
for (i = 0; i < SPA_FEATURES; i++) {
char *buf;
VERIFY3S(-1, !=, asprintf(&buf, "feature@%s",
spa_feature_table[i].fi_uname));
VERIFY3U(0, ==, nvlist_add_uint64(props, buf, 0));
free(buf);
}
VERIFY0(spa_create(ztest_opts.zo_pool, nvroot, props, NULL, NULL));
nvlist_free(nvroot);
nvlist_free(props);
VERIFY3U(0, ==, spa_open(ztest_opts.zo_pool, &spa, FTAG));
zs->zs_metaslab_sz =
1ULL << spa->spa_root_vdev->vdev_child[0]->vdev_ms_shift;
spa_close(spa, FTAG);
kernel_fini();
if (!ztest_opts.zo_mmp_test) {
ztest_run_zdb(ztest_opts.zo_pool);
ztest_freeze();
ztest_run_zdb(ztest_opts.zo_pool);
}
(void) rwlock_destroy(&ztest_name_lock);
mutex_destroy(&ztest_vdev_lock);
}
static void
setup_data_fd(void)
{
static char ztest_name_data[] = "/tmp/ztest.data.XXXXXX";
ztest_fd_data = mkstemp(ztest_name_data);
ASSERT3S(ztest_fd_data, >=, 0);
(void) unlink(ztest_name_data);
}
static int
shared_data_size(ztest_shared_hdr_t *hdr)
{
int size;
size = hdr->zh_hdr_size;
size += hdr->zh_opts_size;
size += hdr->zh_size;
size += hdr->zh_stats_size * hdr->zh_stats_count;
size += hdr->zh_ds_size * hdr->zh_ds_count;
return (size);
}
static void
setup_hdr(void)
{
int size;
ztest_shared_hdr_t *hdr;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT(hdr != MAP_FAILED);
VERIFY3U(0, ==, ftruncate(ztest_fd_data, sizeof (ztest_shared_hdr_t)));
hdr->zh_hdr_size = sizeof (ztest_shared_hdr_t);
hdr->zh_opts_size = sizeof (ztest_shared_opts_t);
hdr->zh_size = sizeof (ztest_shared_t);
hdr->zh_stats_size = sizeof (ztest_shared_callstate_t);
hdr->zh_stats_count = ZTEST_FUNCS;
hdr->zh_ds_size = sizeof (ztest_shared_ds_t);
hdr->zh_ds_count = ztest_opts.zo_datasets;
size = shared_data_size(hdr);
VERIFY3U(0, ==, ftruncate(ztest_fd_data, size));
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
}
static void
setup_data(void)
{
int size, offset;
ztest_shared_hdr_t *hdr;
uint8_t *buf;
hdr = (void *)mmap(0, P2ROUNDUP(sizeof (*hdr), getpagesize()),
PROT_READ, MAP_SHARED, ztest_fd_data, 0);
ASSERT(hdr != MAP_FAILED);
size = shared_data_size(hdr);
(void) munmap((caddr_t)hdr, P2ROUNDUP(sizeof (*hdr), getpagesize()));
hdr = ztest_shared_hdr = (void *)mmap(0, P2ROUNDUP(size, getpagesize()),
PROT_READ | PROT_WRITE, MAP_SHARED, ztest_fd_data, 0);
ASSERT(hdr != MAP_FAILED);
buf = (uint8_t *)hdr;
offset = hdr->zh_hdr_size;
ztest_shared_opts = (void *)&buf[offset];
offset += hdr->zh_opts_size;
ztest_shared = (void *)&buf[offset];
offset += hdr->zh_size;
ztest_shared_callstate = (void *)&buf[offset];
offset += hdr->zh_stats_size * hdr->zh_stats_count;
ztest_shared_ds = (void *)&buf[offset];
}
static boolean_t
exec_child(char *cmd, char *libpath, boolean_t ignorekill, int *statusp)
{
pid_t pid;
int status;
char *cmdbuf = NULL;
pid = fork();
if (cmd == NULL) {
cmdbuf = umem_alloc(MAXPATHLEN, UMEM_NOFAIL);
(void) strlcpy(cmdbuf, getexecname(), MAXPATHLEN);
cmd = cmdbuf;
}
if (pid == -1)
fatal(1, "fork failed");
if (pid == 0) { /* child */
char *emptyargv[2] = { cmd, NULL };
char fd_data_str[12];
struct rlimit rl = { 1024, 1024 };
(void) setrlimit(RLIMIT_NOFILE, &rl);
(void) close(ztest_fd_rand);
VERIFY(11 >= snprintf(fd_data_str, 12, "%d", ztest_fd_data));
VERIFY(0 == setenv("ZTEST_FD_DATA", fd_data_str, 1));
(void) enable_extended_FILE_stdio(-1, -1);
if (libpath != NULL)
VERIFY(0 == setenv("LD_LIBRARY_PATH", libpath, 1));
(void) execv(cmd, emptyargv);
ztest_dump_core = B_FALSE;
fatal(B_TRUE, "exec failed: %s", cmd);
}
if (cmdbuf != NULL) {
umem_free(cmdbuf, MAXPATHLEN);
cmd = NULL;
}
while (waitpid(pid, &status, 0) != pid)
continue;
if (statusp != NULL)
*statusp = status;
if (WIFEXITED(status)) {
if (WEXITSTATUS(status) != 0) {
(void) fprintf(stderr, "child exited with code %d\n",
WEXITSTATUS(status));
exit(2);
}
return (B_FALSE);
} else if (WIFSIGNALED(status)) {
if (!ignorekill || WTERMSIG(status) != SIGKILL) {
(void) fprintf(stderr, "child died with signal %d\n",
WTERMSIG(status));
exit(3);
}
return (B_TRUE);
} else {
(void) fprintf(stderr, "something strange happened to child\n");
exit(4);
/* NOTREACHED */
}
}
static void
ztest_run_init(void)
{
int i;
ztest_shared_t *zs = ztest_shared;
/*
* Blow away any existing copy of zpool.cache
*/
(void) remove(spa_config_path);
if (ztest_opts.zo_init == 0) {
if (ztest_opts.zo_verbose >= 1)
(void) printf("Importing pool %s\n",
ztest_opts.zo_pool);
ztest_import(zs);
return;
}
/*
* Create and initialize our storage pool.
*/
for (i = 1; i <= ztest_opts.zo_init; i++) {
bzero(zs, sizeof (ztest_shared_t));
if (ztest_opts.zo_verbose >= 3 &&
ztest_opts.zo_init != 1) {
(void) printf("ztest_init(), pass %d\n", i);
}
ztest_init(zs);
}
}
int
main(int argc, char **argv)
{
int kills = 0;
int iters = 0;
int older = 0;
int newer = 0;
ztest_shared_t *zs;
ztest_info_t *zi;
ztest_shared_callstate_t *zc;
char timebuf[100];
char numbuf[NN_NUMBUF_SZ];
spa_t *spa;
char *cmd;
boolean_t hasalt;
int f;
char *fd_data_str = getenv("ZTEST_FD_DATA");
struct sigaction action;
(void) setvbuf(stdout, NULL, _IOLBF, 0);
dprintf_setup(&argc, argv);
zfs_deadman_synctime_ms = 300000;
action.sa_handler = sig_handler;
sigemptyset(&action.sa_mask);
action.sa_flags = 0;
if (sigaction(SIGSEGV, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGSEGV: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
if (sigaction(SIGABRT, &action, NULL) < 0) {
(void) fprintf(stderr, "ztest: cannot catch SIGABRT: %s.\n",
strerror(errno));
exit(EXIT_FAILURE);
}
/*
* Force random_get_bytes() to use /dev/urandom in order to prevent
* ztest from needlessly depleting the system entropy pool.
*/
random_path = "/dev/urandom";
ztest_fd_rand = open(random_path, O_RDONLY);
ASSERT3S(ztest_fd_rand, >=, 0);
if (!fd_data_str) {
process_options(argc, argv);
setup_data_fd();
setup_hdr();
setup_data();
bcopy(&ztest_opts, ztest_shared_opts,
sizeof (*ztest_shared_opts));
} else {
ztest_fd_data = atoi(fd_data_str);
setup_data();
bcopy(ztest_shared_opts, &ztest_opts, sizeof (ztest_opts));
}
ASSERT3U(ztest_opts.zo_datasets, ==, ztest_shared_hdr->zh_ds_count);
/* Override location of zpool.cache */
VERIFY(asprintf((char **)&spa_config_path, "%s/zpool.cache",
ztest_opts.zo_dir) != -1);
ztest_ds = umem_alloc(ztest_opts.zo_datasets * sizeof (ztest_ds_t),
UMEM_NOFAIL);
zs = ztest_shared;
if (fd_data_str) {
metaslab_gang_bang = ztest_opts.zo_metaslab_gang_bang;
metaslab_df_alloc_threshold =
zs->zs_metaslab_df_alloc_threshold;
if (zs->zs_do_init)
ztest_run_init();
else
ztest_run(zs);
exit(0);
}
hasalt = (strlen(ztest_opts.zo_alt_ztest) != 0);
if (ztest_opts.zo_verbose >= 1) {
(void) printf("%llu vdevs, %d datasets, %d threads,"
" %llu seconds...\n",
(u_longlong_t)ztest_opts.zo_vdevs,
ztest_opts.zo_datasets,
ztest_opts.zo_threads,
(u_longlong_t)ztest_opts.zo_time);
}
cmd = umem_alloc(MAXNAMELEN, UMEM_NOFAIL);
(void) strlcpy(cmd, getexecname(), MAXNAMELEN);
zs->zs_do_init = B_TRUE;
if (strlen(ztest_opts.zo_alt_ztest) != 0) {
if (ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest for "
"initialization: %s\n", ztest_opts.zo_alt_ztest);
}
VERIFY(!exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_FALSE, NULL));
} else {
VERIFY(!exec_child(NULL, NULL, B_FALSE, NULL));
}
zs->zs_do_init = B_FALSE;
zs->zs_proc_start = gethrtime();
zs->zs_proc_stop = zs->zs_proc_start + ztest_opts.zo_time * NANOSEC;
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
if (zs->zs_proc_start + zi->zi_interval[0] > zs->zs_proc_stop)
zc->zc_next = UINT64_MAX;
else
zc->zc_next = zs->zs_proc_start +
ztest_random(2 * zi->zi_interval[0] + 1);
}
/*
* Run the tests in a loop. These tests include fault injection
* to verify that self-healing data works, and forced crashes
* to verify that we never lose on-disk consistency.
*/
while (gethrtime() < zs->zs_proc_stop) {
int status;
boolean_t killed;
/*
* Initialize the workload counters for each function.
*/
for (f = 0; f < ZTEST_FUNCS; f++) {
zc = ZTEST_GET_SHARED_CALLSTATE(f);
zc->zc_count = 0;
zc->zc_time = 0;
}
/* Set the allocation switch size */
zs->zs_metaslab_df_alloc_threshold =
ztest_random(zs->zs_metaslab_sz / 4) + 1;
if (!hasalt || ztest_random(2) == 0) {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing newer ztest: %s\n",
cmd);
}
newer++;
killed = exec_child(cmd, NULL, B_TRUE, &status);
} else {
if (hasalt && ztest_opts.zo_verbose >= 1) {
(void) printf("Executing older ztest: %s\n",
ztest_opts.zo_alt_ztest);
}
older++;
killed = exec_child(ztest_opts.zo_alt_ztest,
ztest_opts.zo_alt_libpath, B_TRUE, &status);
}
if (killed)
kills++;
iters++;
if (ztest_opts.zo_verbose >= 1) {
hrtime_t now = gethrtime();
now = MIN(now, zs->zs_proc_stop);
print_time(zs->zs_proc_stop - now, timebuf);
nicenum(zs->zs_space, numbuf, sizeof (numbuf));
(void) printf("Pass %3d, %8s, %3llu ENOSPC, "
"%4.1f%% of %5s used, %3.0f%% done, %8s to go\n",
iters,
WIFEXITED(status) ? "Complete" : "SIGKILL",
(u_longlong_t)zs->zs_enospc_count,
100.0 * zs->zs_alloc / zs->zs_space,
numbuf,
100.0 * (now - zs->zs_proc_start) /
(ztest_opts.zo_time * NANOSEC), timebuf);
}
if (ztest_opts.zo_verbose >= 2) {
(void) printf("\nWorkload summary:\n\n");
(void) printf("%7s %9s %s\n",
"Calls", "Time", "Function");
(void) printf("%7s %9s %s\n",
"-----", "----", "--------");
for (f = 0; f < ZTEST_FUNCS; f++) {
zi = &ztest_info[f];
zc = ZTEST_GET_SHARED_CALLSTATE(f);
print_time(zc->zc_time, timebuf);
(void) printf("%7llu %9s %s\n",
(u_longlong_t)zc->zc_count, timebuf,
zi->zi_funcname);
}
(void) printf("\n");
}
/*
* It's possible that we killed a child during a rename test,
* in which case we'll have a 'ztest_tmp' pool lying around
* instead of 'ztest'. Do a blind rename in case this happened.
*/
kernel_init(FREAD);
if (spa_open(ztest_opts.zo_pool, &spa, FTAG) == 0) {
spa_close(spa, FTAG);
} else {
char tmpname[ZFS_MAX_DATASET_NAME_LEN];
kernel_fini();
kernel_init(FREAD | FWRITE);
(void) snprintf(tmpname, sizeof (tmpname), "%s_tmp",
ztest_opts.zo_pool);
(void) spa_rename(tmpname, ztest_opts.zo_pool);
}
kernel_fini();
if (!ztest_opts.zo_mmp_test)
ztest_run_zdb(ztest_opts.zo_pool);
}
if (ztest_opts.zo_verbose >= 1) {
if (hasalt) {
(void) printf("%d runs of older ztest: %s\n", older,
ztest_opts.zo_alt_ztest);
(void) printf("%d runs of newer ztest: %s\n", newer,
cmd);
}
(void) printf("%d killed, %d completed, %.0f%% kill rate\n",
kills, iters - kills, (100.0 * kills) / MAX(1, iters));
}
umem_free(cmd, MAXNAMELEN);
return (0);
}
diff --git a/configure.ac b/configure.ac
index 4f180fe53db2..1715b9d0effe 100644
--- a/configure.ac
+++ b/configure.ac
@@ -1,336 +1,337 @@
/*
* This file is part of the ZFS Linux port.
*
* Copyright (c) 2009 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory
* Written by:
* Brian Behlendorf <behlendorf1@llnl.gov>,
* Herb Wartens <wartens2@llnl.gov>,
* Jim Garlick <garlick@llnl.gov>
* LLNL-CODE-403049
*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License, Version 1.0 only
* (the "License"). You may not use this file except in compliance
* with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
AC_INIT(m4_esyscmd(grep Name META | cut -d ':' -f 2 | tr -d ' \n'),
m4_esyscmd(grep Version META | cut -d ':' -f 2 | tr -d ' \n'))
AC_LANG(C)
ZFS_AC_META
AC_CONFIG_AUX_DIR([config])
AC_CONFIG_MACRO_DIR([config])
AC_CANONICAL_SYSTEM
AM_MAINTAINER_MODE
m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])])
AM_INIT_AUTOMAKE([subdir-objects])
AC_CONFIG_HEADERS([zfs_config.h], [
(mv zfs_config.h zfs_config.h.tmp &&
awk -f ${ac_srcdir}/config/config.awk zfs_config.h.tmp >zfs_config.h &&
rm zfs_config.h.tmp) || exit 1])
AC_PROG_INSTALL
AC_PROG_CC
AC_PROG_LIBTOOL
AM_PROG_AS
AM_PROG_CC_C_O
AX_CODE_COVERAGE
ZFS_AC_LICENSE
ZFS_AC_PACKAGE
ZFS_AC_CONFIG
ZFS_AC_DEBUG
ZFS_AC_DEBUGINFO
AC_CONFIG_FILES([
Makefile
udev/Makefile
udev/rules.d/Makefile
etc/Makefile
etc/init.d/Makefile
etc/zfs/Makefile
etc/systemd/Makefile
etc/systemd/system/Makefile
etc/systemd/system-generators/Makefile
etc/sudoers.d/Makefile
etc/modules-load.d/Makefile
man/Makefile
man/man1/Makefile
man/man5/Makefile
man/man8/Makefile
lib/Makefile
lib/libspl/Makefile
lib/libspl/asm-generic/Makefile
lib/libspl/asm-i386/Makefile
lib/libspl/asm-x86_64/Makefile
lib/libspl/include/Makefile
lib/libspl/include/ia32/Makefile
lib/libspl/include/ia32/sys/Makefile
lib/libspl/include/rpc/Makefile
lib/libspl/include/sys/Makefile
lib/libspl/include/sys/dktp/Makefile
lib/libspl/include/util/Makefile
lib/libavl/Makefile
lib/libefi/Makefile
lib/libicp/Makefile
lib/libnvpair/Makefile
lib/libtpool/Makefile
lib/libunicode/Makefile
lib/libuutil/Makefile
lib/libzpool/Makefile
lib/libzfs/libzfs.pc
lib/libzfs/libzfs_core.pc
lib/libzfs/Makefile
lib/libzfs_core/Makefile
lib/libshare/Makefile
cmd/Makefile
cmd/zdb/Makefile
cmd/zhack/Makefile
cmd/zfs/Makefile
cmd/zinject/Makefile
cmd/zpool/Makefile
cmd/zstreamdump/Makefile
cmd/ztest/Makefile
cmd/mount_zfs/Makefile
cmd/fsck_zfs/Makefile
cmd/zvol_id/Makefile
cmd/vdev_id/Makefile
cmd/arcstat/Makefile
cmd/dbufstat/Makefile
cmd/arc_summary/Makefile
cmd/zed/Makefile
cmd/raidz_test/Makefile
cmd/zgenhostid/Makefile
contrib/Makefile
contrib/bash_completion.d/Makefile
contrib/dracut/Makefile
contrib/dracut/02zfsexpandknowledge/Makefile
contrib/dracut/90zfs/Makefile
contrib/initramfs/Makefile
contrib/initramfs/hooks/Makefile
contrib/initramfs/scripts/Makefile
contrib/initramfs/scripts/local-top/Makefile
module/Makefile
module/avl/Makefile
module/nvpair/Makefile
module/unicode/Makefile
module/zcommon/Makefile
module/zfs/Makefile
module/lua/Makefile
module/icp/Makefile
include/Makefile
include/linux/Makefile
include/sys/Makefile
include/sys/fs/Makefile
include/sys/fm/Makefile
include/sys/fm/fs/Makefile
include/sys/crypto/Makefile
include/sys/sysevent/Makefile
include/sys/lua/Makefile
scripts/Makefile
tests/Makefile
tests/test-runner/Makefile
tests/test-runner/bin/Makefile
tests/test-runner/include/Makefile
tests/test-runner/man/Makefile
tests/runfiles/Makefile
tests/zfs-tests/Makefile
tests/zfs-tests/callbacks/Makefile
tests/zfs-tests/cmd/Makefile
tests/zfs-tests/cmd/chg_usr_exec/Makefile
tests/zfs-tests/cmd/user_ns_exec/Makefile
tests/zfs-tests/cmd/devname2devid/Makefile
tests/zfs-tests/cmd/dir_rd_update/Makefile
tests/zfs-tests/cmd/file_check/Makefile
tests/zfs-tests/cmd/file_trunc/Makefile
tests/zfs-tests/cmd/file_write/Makefile
tests/zfs-tests/cmd/largest_file/Makefile
tests/zfs-tests/cmd/mkbusy/Makefile
tests/zfs-tests/cmd/mkfile/Makefile
tests/zfs-tests/cmd/mkfiles/Makefile
tests/zfs-tests/cmd/mktree/Makefile
tests/zfs-tests/cmd/mmap_exec/Makefile
tests/zfs-tests/cmd/mmap_libaio/Makefile
tests/zfs-tests/cmd/mmapwrite/Makefile
tests/zfs-tests/cmd/nvlist_to_lua/Makefile
tests/zfs-tests/cmd/randfree_file/Makefile
tests/zfs-tests/cmd/readmmap/Makefile
tests/zfs-tests/cmd/rename_dir/Makefile
tests/zfs-tests/cmd/rm_lnkcnt_zero_file/Makefile
tests/zfs-tests/cmd/threadsappend/Makefile
tests/zfs-tests/cmd/xattrtest/Makefile
tests/zfs-tests/include/Makefile
tests/zfs-tests/include/default.cfg
tests/zfs-tests/tests/Makefile
tests/zfs-tests/tests/functional/Makefile
tests/zfs-tests/tests/functional/acl/Makefile
tests/zfs-tests/tests/functional/acl/posix/Makefile
tests/zfs-tests/tests/functional/arc/Makefile
tests/zfs-tests/tests/functional/atime/Makefile
tests/zfs-tests/tests/functional/bootfs/Makefile
tests/zfs-tests/tests/functional/cache/Makefile
tests/zfs-tests/tests/functional/cachefile/Makefile
tests/zfs-tests/tests/functional/casenorm/Makefile
tests/zfs-tests/tests/functional/checksum/Makefile
tests/zfs-tests/tests/functional/channel_program/Makefile
tests/zfs-tests/tests/functional/channel_program/lua_core/Makefile
tests/zfs-tests/tests/functional/channel_program/synctask_core/Makefile
tests/zfs-tests/tests/functional/chattr/Makefile
tests/zfs-tests/tests/functional/clean_mirror/Makefile
tests/zfs-tests/tests/functional/cli_root/Makefile
tests/zfs-tests/tests/functional/cli_root/zdb/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_bookmark/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_change-key/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_clone/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_copies/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_create/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_destroy/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_diff/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_get/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_inherit/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_load-key/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_mount/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_program/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_promote/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_property/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_receive/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_rename/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_reservation/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_rollback/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_send/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_set/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_share/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_unload-key/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_unmount/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_unshare/Makefile
tests/zfs-tests/tests/functional/cli_root/zfs_upgrade/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_add/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_attach/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_clear/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_create/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_destroy/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_detach/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_events/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_expand/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_export/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_get/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_history/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_import/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_labelclear/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_offline/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_online/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_remove/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_reopen/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_replace/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_scrub/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_set/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_split/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_status/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_sync/Makefile
tests/zfs-tests/tests/functional/cli_root/zpool_upgrade/Makefile
tests/zfs-tests/tests/functional/cli_user/Makefile
tests/zfs-tests/tests/functional/cli_user/misc/Makefile
tests/zfs-tests/tests/functional/cli_user/zfs_list/Makefile
tests/zfs-tests/tests/functional/cli_user/zpool_iostat/Makefile
tests/zfs-tests/tests/functional/cli_user/zpool_list/Makefile
tests/zfs-tests/tests/functional/compression/Makefile
tests/zfs-tests/tests/functional/ctime/Makefile
tests/zfs-tests/tests/functional/deadman/Makefile
tests/zfs-tests/tests/functional/delegate/Makefile
tests/zfs-tests/tests/functional/devices/Makefile
tests/zfs-tests/tests/functional/events/Makefile
tests/zfs-tests/tests/functional/exec/Makefile
tests/zfs-tests/tests/functional/fault/Makefile
tests/zfs-tests/tests/functional/features/async_destroy/Makefile
tests/zfs-tests/tests/functional/features/large_dnode/Makefile
tests/zfs-tests/tests/functional/features/Makefile
tests/zfs-tests/tests/functional/grow_pool/Makefile
tests/zfs-tests/tests/functional/grow_replicas/Makefile
tests/zfs-tests/tests/functional/history/Makefile
tests/zfs-tests/tests/functional/hkdf/Makefile
tests/zfs-tests/tests/functional/inheritance/Makefile
tests/zfs-tests/tests/functional/inuse/Makefile
tests/zfs-tests/tests/functional/large_files/Makefile
tests/zfs-tests/tests/functional/largest_pool/Makefile
tests/zfs-tests/tests/functional/link_count/Makefile
tests/zfs-tests/tests/functional/libzfs/Makefile
tests/zfs-tests/tests/functional/migration/Makefile
tests/zfs-tests/tests/functional/mmap/Makefile
tests/zfs-tests/tests/functional/mmp/Makefile
tests/zfs-tests/tests/functional/mount/Makefile
tests/zfs-tests/tests/functional/mv_files/Makefile
tests/zfs-tests/tests/functional/nestedfs/Makefile
tests/zfs-tests/tests/functional/no_space/Makefile
tests/zfs-tests/tests/functional/nopwrite/Makefile
tests/zfs-tests/tests/functional/online_offline/Makefile
tests/zfs-tests/tests/functional/pool_names/Makefile
tests/zfs-tests/tests/functional/poolversion/Makefile
tests/zfs-tests/tests/functional/privilege/Makefile
tests/zfs-tests/tests/functional/projectquota/Makefile
tests/zfs-tests/tests/functional/quota/Makefile
tests/zfs-tests/tests/functional/raidz/Makefile
tests/zfs-tests/tests/functional/redundancy/Makefile
tests/zfs-tests/tests/functional/refquota/Makefile
tests/zfs-tests/tests/functional/refreserv/Makefile
+ tests/zfs-tests/tests/functional/removal/Makefile
tests/zfs-tests/tests/functional/rename_dirs/Makefile
tests/zfs-tests/tests/functional/replacement/Makefile
tests/zfs-tests/tests/functional/reservation/Makefile
tests/zfs-tests/tests/functional/rootpool/Makefile
tests/zfs-tests/tests/functional/rsend/Makefile
tests/zfs-tests/tests/functional/scrub_mirror/Makefile
tests/zfs-tests/tests/functional/slog/Makefile
tests/zfs-tests/tests/functional/snapshot/Makefile
tests/zfs-tests/tests/functional/snapused/Makefile
tests/zfs-tests/tests/functional/sparse/Makefile
tests/zfs-tests/tests/functional/threadsappend/Makefile
tests/zfs-tests/tests/functional/tmpfile/Makefile
tests/zfs-tests/tests/functional/truncate/Makefile
tests/zfs-tests/tests/functional/user_namespace/Makefile
tests/zfs-tests/tests/functional/userquota/Makefile
tests/zfs-tests/tests/functional/upgrade/Makefile
tests/zfs-tests/tests/functional/vdev_zaps/Makefile
tests/zfs-tests/tests/functional/write_dirs/Makefile
tests/zfs-tests/tests/functional/xattr/Makefile
tests/zfs-tests/tests/functional/zvol/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_cli/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_ENOSPC/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_misc/Makefile
tests/zfs-tests/tests/functional/zvol/zvol_swap/Makefile
tests/zfs-tests/tests/perf/Makefile
tests/zfs-tests/tests/perf/fio/Makefile
tests/zfs-tests/tests/perf/regression/Makefile
tests/zfs-tests/tests/perf/scripts/Makefile
tests/zfs-tests/tests/stress/Makefile
rpm/Makefile
rpm/redhat/Makefile
rpm/redhat/zfs.spec
rpm/redhat/zfs-kmod.spec
rpm/redhat/zfs-dkms.spec
rpm/generic/Makefile
rpm/generic/zfs.spec
rpm/generic/zfs-kmod.spec
rpm/generic/zfs-dkms.spec
zfs.release
])
AC_OUTPUT
diff --git a/include/libzfs.h b/include/libzfs.h
index 00f22cfb11bf..cbaaa13a2154 100644
--- a/include/libzfs.h
+++ b/include/libzfs.h
@@ -1,901 +1,906 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2016, Intel Corporation.
* Copyright 2016 Nexenta Systems, Inc.
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#ifndef _LIBZFS_H
#define _LIBZFS_H
#include <assert.h>
#include <libnvpair.h>
#include <sys/mnttab.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/varargs.h>
#include <sys/fs/zfs.h>
#include <sys/avl.h>
#include <ucred.h>
#include <libzfs_core.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Miscellaneous ZFS constants
*/
#define ZFS_MAXPROPLEN MAXPATHLEN
#define ZPOOL_MAXPROPLEN MAXPATHLEN
/*
* Default device paths
*/
#define DISK_ROOT "/dev"
#define UDISK_ROOT "/dev/disk"
#define ZVOL_ROOT "/dev/zvol"
/*
* Default wait time for a device name to be created.
*/
#define DISK_LABEL_WAIT (30 * 1000) /* 30 seconds */
#define IMPORT_ORDER_PREFERRED_1 1
#define IMPORT_ORDER_PREFERRED_2 2
#define IMPORT_ORDER_SCAN_OFFSET 10
#define IMPORT_ORDER_DEFAULT 100
#define DEFAULT_IMPORT_PATH_SIZE 9
extern char *zpool_default_import_path[DEFAULT_IMPORT_PATH_SIZE];
/*
* libzfs errors
*/
typedef enum zfs_error {
EZFS_SUCCESS = 0, /* no error -- success */
EZFS_NOMEM = 2000, /* out of memory */
EZFS_BADPROP, /* invalid property value */
EZFS_PROPREADONLY, /* cannot set readonly property */
EZFS_PROPTYPE, /* property does not apply to dataset type */
EZFS_PROPNONINHERIT, /* property is not inheritable */
EZFS_PROPSPACE, /* bad quota or reservation */
EZFS_BADTYPE, /* dataset is not of appropriate type */
EZFS_BUSY, /* pool or dataset is busy */
EZFS_EXISTS, /* pool or dataset already exists */
EZFS_NOENT, /* no such pool or dataset */
EZFS_BADSTREAM, /* bad backup stream */
EZFS_DSREADONLY, /* dataset is readonly */
EZFS_VOLTOOBIG, /* volume is too large for 32-bit system */
EZFS_INVALIDNAME, /* invalid dataset name */
EZFS_BADRESTORE, /* unable to restore to destination */
EZFS_BADBACKUP, /* backup failed */
EZFS_BADTARGET, /* bad attach/detach/replace target */
EZFS_NODEVICE, /* no such device in pool */
EZFS_BADDEV, /* invalid device to add */
EZFS_NOREPLICAS, /* no valid replicas */
EZFS_RESILVERING, /* currently resilvering */
EZFS_BADVERSION, /* unsupported version */
EZFS_POOLUNAVAIL, /* pool is currently unavailable */
EZFS_DEVOVERFLOW, /* too many devices in one vdev */
EZFS_BADPATH, /* must be an absolute path */
EZFS_CROSSTARGET, /* rename or clone across pool or dataset */
EZFS_ZONED, /* used improperly in local zone */
EZFS_MOUNTFAILED, /* failed to mount dataset */
EZFS_UMOUNTFAILED, /* failed to unmount dataset */
EZFS_UNSHARENFSFAILED, /* unshare(1M) failed */
EZFS_SHARENFSFAILED, /* share(1M) failed */
EZFS_PERM, /* permission denied */
EZFS_NOSPC, /* out of space */
EZFS_FAULT, /* bad address */
EZFS_IO, /* I/O error */
EZFS_INTR, /* signal received */
EZFS_ISSPARE, /* device is a hot spare */
EZFS_INVALCONFIG, /* invalid vdev configuration */
EZFS_RECURSIVE, /* recursive dependency */
EZFS_NOHISTORY, /* no history object */
EZFS_POOLPROPS, /* couldn't retrieve pool props */
EZFS_POOL_NOTSUP, /* ops not supported for this type of pool */
EZFS_POOL_INVALARG, /* invalid argument for this pool operation */
EZFS_NAMETOOLONG, /* dataset name is too long */
EZFS_OPENFAILED, /* open of device failed */
EZFS_NOCAP, /* couldn't get capacity */
EZFS_LABELFAILED, /* write of label failed */
EZFS_BADWHO, /* invalid permission who */
EZFS_BADPERM, /* invalid permission */
EZFS_BADPERMSET, /* invalid permission set name */
EZFS_NODELEGATION, /* delegated administration is disabled */
EZFS_UNSHARESMBFAILED, /* failed to unshare over smb */
EZFS_SHARESMBFAILED, /* failed to share over smb */
EZFS_BADCACHE, /* bad cache file */
EZFS_ISL2CACHE, /* device is for the level 2 ARC */
EZFS_VDEVNOTSUP, /* unsupported vdev type */
EZFS_NOTSUP, /* ops not supported on this dataset */
EZFS_ACTIVE_SPARE, /* pool has active shared spare devices */
EZFS_UNPLAYED_LOGS, /* log device has unplayed logs */
EZFS_REFTAG_RELE, /* snapshot release: tag not found */
EZFS_REFTAG_HOLD, /* snapshot hold: tag already exists */
EZFS_TAGTOOLONG, /* snapshot hold/rele: tag too long */
EZFS_PIPEFAILED, /* pipe create failed */
EZFS_THREADCREATEFAILED, /* thread create failed */
EZFS_POSTSPLIT_ONLINE, /* onlining a disk after splitting it */
EZFS_SCRUBBING, /* currently scrubbing */
EZFS_NO_SCRUB, /* no active scrub */
EZFS_DIFF, /* general failure of zfs diff */
EZFS_DIFFDATA, /* bad zfs diff data */
EZFS_POOLREADONLY, /* pool is in read-only mode */
EZFS_SCRUB_PAUSED, /* scrub currently paused */
EZFS_ACTIVE_POOL, /* pool is imported on a different system */
EZFS_CRYPTOFAILED, /* failed to setup encryption */
+ EZFS_NO_PENDING, /* cannot cancel, no operation is pending */
EZFS_UNKNOWN
} zfs_error_t;
/*
* The following data structures are all part
* of the zfs_allow_t data structure which is
* used for printing 'allow' permissions.
* It is a linked list of zfs_allow_t's which
* then contain avl tree's for user/group/sets/...
* and each one of the entries in those trees have
* avl tree's for the permissions they belong to and
* whether they are local,descendent or local+descendent
* permissions. The AVL trees are used primarily for
* sorting purposes, but also so that we can quickly find
* a given user and or permission.
*/
typedef struct zfs_perm_node {
avl_node_t z_node;
char z_pname[MAXPATHLEN];
} zfs_perm_node_t;
typedef struct zfs_allow_node {
avl_node_t z_node;
char z_key[MAXPATHLEN]; /* name, such as joe */
avl_tree_t z_localdescend; /* local+descendent perms */
avl_tree_t z_local; /* local permissions */
avl_tree_t z_descend; /* descendent permissions */
} zfs_allow_node_t;
typedef struct zfs_allow {
struct zfs_allow *z_next;
char z_setpoint[MAXPATHLEN];
avl_tree_t z_sets;
avl_tree_t z_crperms;
avl_tree_t z_user;
avl_tree_t z_group;
avl_tree_t z_everyone;
} zfs_allow_t;
/*
* Basic handle types
*/
typedef struct zfs_handle zfs_handle_t;
typedef struct zpool_handle zpool_handle_t;
typedef struct libzfs_handle libzfs_handle_t;
/*
* Library initialization
*/
extern libzfs_handle_t *libzfs_init(void);
extern void libzfs_fini(libzfs_handle_t *);
extern libzfs_handle_t *zpool_get_handle(zpool_handle_t *);
extern libzfs_handle_t *zfs_get_handle(zfs_handle_t *);
extern void libzfs_print_on_error(libzfs_handle_t *, boolean_t);
extern void zfs_save_arguments(int argc, char **, char *, int);
extern int zpool_log_history(libzfs_handle_t *, const char *);
extern int libzfs_errno(libzfs_handle_t *);
extern const char *libzfs_error_init(int);
extern const char *libzfs_error_action(libzfs_handle_t *);
extern const char *libzfs_error_description(libzfs_handle_t *);
extern int zfs_standard_error(libzfs_handle_t *, int, const char *);
extern void libzfs_mnttab_init(libzfs_handle_t *);
extern void libzfs_mnttab_fini(libzfs_handle_t *);
extern void libzfs_mnttab_cache(libzfs_handle_t *, boolean_t);
extern int libzfs_mnttab_find(libzfs_handle_t *, const char *,
struct mnttab *);
extern void libzfs_mnttab_add(libzfs_handle_t *, const char *,
const char *, const char *);
extern void libzfs_mnttab_remove(libzfs_handle_t *, const char *);
/*
* Basic handle functions
*/
extern zpool_handle_t *zpool_open(libzfs_handle_t *, const char *);
extern zpool_handle_t *zpool_open_canfail(libzfs_handle_t *, const char *);
extern void zpool_close(zpool_handle_t *);
extern const char *zpool_get_name(zpool_handle_t *);
extern int zpool_get_state(zpool_handle_t *);
extern const char *zpool_state_to_name(vdev_state_t, vdev_aux_t);
extern const char *zpool_pool_state_to_name(pool_state_t);
extern void zpool_free_handles(libzfs_handle_t *);
/*
* Iterate over all active pools in the system.
*/
typedef int (*zpool_iter_f)(zpool_handle_t *, void *);
extern int zpool_iter(libzfs_handle_t *, zpool_iter_f, void *);
extern boolean_t zpool_skip_pool(const char *);
/*
* Functions to create and destroy pools
*/
extern int zpool_create(libzfs_handle_t *, const char *, nvlist_t *,
nvlist_t *, nvlist_t *);
extern int zpool_destroy(zpool_handle_t *, const char *);
extern int zpool_add(zpool_handle_t *, nvlist_t *);
typedef struct splitflags {
/* do not split, but return the config that would be split off */
int dryrun : 1;
/* after splitting, import the pool */
int import : 1;
int name_flags;
} splitflags_t;
/*
* Functions to manipulate pool and vdev state
*/
extern int zpool_scan(zpool_handle_t *, pool_scan_func_t, pool_scrub_cmd_t);
extern int zpool_clear(zpool_handle_t *, const char *, nvlist_t *);
extern int zpool_reguid(zpool_handle_t *);
extern int zpool_reopen_one(zpool_handle_t *, void *);
extern int zpool_sync_one(zpool_handle_t *, void *);
extern int zpool_vdev_online(zpool_handle_t *, const char *, int,
vdev_state_t *);
extern int zpool_vdev_offline(zpool_handle_t *, const char *, boolean_t);
extern int zpool_vdev_attach(zpool_handle_t *, const char *,
const char *, nvlist_t *, int);
extern int zpool_vdev_detach(zpool_handle_t *, const char *);
extern int zpool_vdev_remove(zpool_handle_t *, const char *);
+extern int zpool_vdev_remove_cancel(zpool_handle_t *);
+extern int zpool_vdev_indirect_size(zpool_handle_t *, const char *, uint64_t *);
extern int zpool_vdev_split(zpool_handle_t *, char *, nvlist_t **, nvlist_t *,
splitflags_t);
extern int zpool_vdev_fault(zpool_handle_t *, uint64_t, vdev_aux_t);
extern int zpool_vdev_degrade(zpool_handle_t *, uint64_t, vdev_aux_t);
extern int zpool_vdev_clear(zpool_handle_t *, uint64_t);
extern nvlist_t *zpool_find_vdev(zpool_handle_t *, const char *, boolean_t *,
boolean_t *, boolean_t *);
extern nvlist_t *zpool_find_vdev_by_physpath(zpool_handle_t *, const char *,
boolean_t *, boolean_t *, boolean_t *);
extern int zpool_label_disk_wait(char *, int);
extern int zpool_label_disk(libzfs_handle_t *, zpool_handle_t *, char *);
extern uint64_t zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path);
int zfs_dev_is_dm(char *dev_name);
int zfs_dev_is_whole_disk(char *dev_name);
char *zfs_get_underlying_path(char *dev_name);
char *zfs_get_enclosure_sysfs_path(char *dev_name);
/*
* Functions to manage pool properties
*/
extern int zpool_set_prop(zpool_handle_t *, const char *, const char *);
extern int zpool_get_prop(zpool_handle_t *, zpool_prop_t, char *,
size_t proplen, zprop_source_t *, boolean_t literal);
extern uint64_t zpool_get_prop_int(zpool_handle_t *, zpool_prop_t,
zprop_source_t *);
extern const char *zpool_prop_to_name(zpool_prop_t);
extern const char *zpool_prop_values(zpool_prop_t);
/*
* Pool health statistics.
*/
typedef enum {
/*
* The following correspond to faults as defined in the (fault.fs.zfs.*)
* event namespace. Each is associated with a corresponding message ID.
*/
ZPOOL_STATUS_CORRUPT_CACHE, /* corrupt /kernel/drv/zpool.cache */
ZPOOL_STATUS_MISSING_DEV_R, /* missing device with replicas */
ZPOOL_STATUS_MISSING_DEV_NR, /* missing device with no replicas */
ZPOOL_STATUS_CORRUPT_LABEL_R, /* bad device label with replicas */
ZPOOL_STATUS_CORRUPT_LABEL_NR, /* bad device label with no replicas */
ZPOOL_STATUS_BAD_GUID_SUM, /* sum of device guids didn't match */
ZPOOL_STATUS_CORRUPT_POOL, /* pool metadata is corrupted */
ZPOOL_STATUS_CORRUPT_DATA, /* data errors in user (meta)data */
ZPOOL_STATUS_FAILING_DEV, /* device experiencing errors */
ZPOOL_STATUS_VERSION_NEWER, /* newer on-disk version */
ZPOOL_STATUS_HOSTID_MISMATCH, /* last accessed by another system */
ZPOOL_STATUS_HOSTID_ACTIVE, /* currently active on another system */
ZPOOL_STATUS_HOSTID_REQUIRED, /* multihost=on and hostid=0 */
ZPOOL_STATUS_IO_FAILURE_WAIT, /* failed I/O, failmode 'wait' */
ZPOOL_STATUS_IO_FAILURE_CONTINUE, /* failed I/O, failmode 'continue' */
ZPOOL_STATUS_IO_FAILURE_MMP, /* failed MMP, failmode not 'panic' */
ZPOOL_STATUS_BAD_LOG, /* cannot read log chain(s) */
ZPOOL_STATUS_ERRATA, /* informational errata available */
/*
* If the pool has unsupported features but can still be opened in
* read-only mode, its status is ZPOOL_STATUS_UNSUP_FEAT_WRITE. If the
* pool has unsupported features but cannot be opened at all, its
* status is ZPOOL_STATUS_UNSUP_FEAT_READ.
*/
ZPOOL_STATUS_UNSUP_FEAT_READ, /* unsupported features for read */
ZPOOL_STATUS_UNSUP_FEAT_WRITE, /* unsupported features for write */
/*
* These faults have no corresponding message ID. At the time we are
* checking the status, the original reason for the FMA fault (I/O or
* checksum errors) has been lost.
*/
ZPOOL_STATUS_FAULTED_DEV_R, /* faulted device with replicas */
ZPOOL_STATUS_FAULTED_DEV_NR, /* faulted device with no replicas */
/*
* The following are not faults per se, but still an error possibly
* requiring administrative attention. There is no corresponding
* message ID.
*/
ZPOOL_STATUS_VERSION_OLDER, /* older legacy on-disk version */
ZPOOL_STATUS_FEAT_DISABLED, /* supported features are disabled */
ZPOOL_STATUS_RESILVERING, /* device being resilvered */
ZPOOL_STATUS_OFFLINE_DEV, /* device offline */
ZPOOL_STATUS_REMOVED_DEV, /* removed device */
/*
* Finally, the following indicates a healthy pool.
*/
ZPOOL_STATUS_OK
} zpool_status_t;
extern zpool_status_t zpool_get_status(zpool_handle_t *, char **,
zpool_errata_t *);
extern zpool_status_t zpool_import_status(nvlist_t *, char **,
zpool_errata_t *);
extern void zpool_dump_ddt(const ddt_stat_t *dds, const ddt_histogram_t *ddh);
/*
* Statistics and configuration functions.
*/
extern nvlist_t *zpool_get_config(zpool_handle_t *, nvlist_t **);
extern nvlist_t *zpool_get_features(zpool_handle_t *);
extern int zpool_refresh_stats(zpool_handle_t *, boolean_t *);
extern int zpool_get_errlog(zpool_handle_t *, nvlist_t **);
/*
* Import and export functions
*/
extern int zpool_export(zpool_handle_t *, boolean_t, const char *);
extern int zpool_export_force(zpool_handle_t *, const char *);
extern int zpool_import(libzfs_handle_t *, nvlist_t *, const char *,
char *altroot);
extern int zpool_import_props(libzfs_handle_t *, nvlist_t *, const char *,
nvlist_t *, int);
extern void zpool_print_unsup_feat(nvlist_t *config);
/*
* Search for pools to import
*/
typedef struct importargs {
char **path; /* a list of paths to search */
int paths; /* number of paths to search */
char *poolname; /* name of a pool to find */
uint64_t guid; /* guid of a pool to find */
char *cachefile; /* cachefile to use for import */
int can_be_active : 1; /* can the pool be active? */
int unique : 1; /* does 'poolname' already exist? */
int exists : 1; /* set on return if pool already exists */
int scan : 1; /* prefer scanning to libblkid cache */
} importargs_t;
extern nvlist_t *zpool_search_import(libzfs_handle_t *, importargs_t *);
extern int zpool_tryimport(libzfs_handle_t *hdl, char *target,
nvlist_t **configp, importargs_t *args);
/* legacy pool search routines */
extern nvlist_t *zpool_find_import(libzfs_handle_t *, int, char **);
extern nvlist_t *zpool_find_import_cached(libzfs_handle_t *, const char *,
char *, uint64_t);
/*
* Miscellaneous pool functions
*/
struct zfs_cmd;
extern const char *zfs_history_event_names[];
typedef enum {
VDEV_NAME_PATH = 1 << 0,
VDEV_NAME_GUID = 1 << 1,
VDEV_NAME_FOLLOW_LINKS = 1 << 2,
VDEV_NAME_TYPE_ID = 1 << 3,
} vdev_name_t;
extern char *zpool_vdev_name(libzfs_handle_t *, zpool_handle_t *, nvlist_t *,
int name_flags);
extern int zpool_upgrade(zpool_handle_t *, uint64_t);
extern int zpool_get_history(zpool_handle_t *, nvlist_t **);
extern int zpool_history_unpack(char *, uint64_t, uint64_t *,
nvlist_t ***, uint_t *);
extern int zpool_events_next(libzfs_handle_t *, nvlist_t **, int *, unsigned,
int);
extern int zpool_events_clear(libzfs_handle_t *, int *);
extern int zpool_events_seek(libzfs_handle_t *, uint64_t, int);
extern void zpool_obj_to_path(zpool_handle_t *, uint64_t, uint64_t, char *,
size_t len);
extern int zfs_ioctl(libzfs_handle_t *, int, struct zfs_cmd *);
extern int zpool_get_physpath(zpool_handle_t *, char *, size_t);
extern void zpool_explain_recover(libzfs_handle_t *, const char *, int,
nvlist_t *);
/*
* Basic handle manipulations. These functions do not create or destroy the
* underlying datasets, only the references to them.
*/
extern zfs_handle_t *zfs_open(libzfs_handle_t *, const char *, int);
extern zfs_handle_t *zfs_handle_dup(zfs_handle_t *);
extern void zfs_close(zfs_handle_t *);
extern zfs_type_t zfs_get_type(const zfs_handle_t *);
extern const char *zfs_get_name(const zfs_handle_t *);
extern zpool_handle_t *zfs_get_pool_handle(const zfs_handle_t *);
extern const char *zfs_get_pool_name(const zfs_handle_t *);
/*
* Property management functions. Some functions are shared with the kernel,
* and are found in sys/fs/zfs.h.
*/
/*
* zfs dataset property management
*/
extern const char *zfs_prop_default_string(zfs_prop_t);
extern uint64_t zfs_prop_default_numeric(zfs_prop_t);
extern const char *zfs_prop_column_name(zfs_prop_t);
extern boolean_t zfs_prop_align_right(zfs_prop_t);
extern nvlist_t *zfs_valid_proplist(libzfs_handle_t *, zfs_type_t, nvlist_t *,
uint64_t, zfs_handle_t *, zpool_handle_t *, boolean_t, const char *);
extern const char *zfs_prop_to_name(zfs_prop_t);
extern int zfs_prop_set(zfs_handle_t *, const char *, const char *);
extern int zfs_prop_set_list(zfs_handle_t *, nvlist_t *);
extern int zfs_prop_get(zfs_handle_t *, zfs_prop_t, char *, size_t,
zprop_source_t *, char *, size_t, boolean_t);
extern int zfs_prop_get_recvd(zfs_handle_t *, const char *, char *, size_t,
boolean_t);
extern int zfs_prop_get_numeric(zfs_handle_t *, zfs_prop_t, uint64_t *,
zprop_source_t *, char *, size_t);
extern int zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue);
extern int zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
extern int zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue);
extern int zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal);
extern int zfs_prop_get_feature(zfs_handle_t *zhp, const char *propname,
char *buf, size_t len);
extern uint64_t getprop_uint64(zfs_handle_t *, zfs_prop_t, char **);
extern uint64_t zfs_prop_get_int(zfs_handle_t *, zfs_prop_t);
extern int zfs_prop_inherit(zfs_handle_t *, const char *, boolean_t);
extern const char *zfs_prop_values(zfs_prop_t);
extern int zfs_prop_is_string(zfs_prop_t prop);
extern nvlist_t *zfs_get_all_props(zfs_handle_t *);
extern nvlist_t *zfs_get_user_props(zfs_handle_t *);
extern nvlist_t *zfs_get_recvd_props(zfs_handle_t *);
extern nvlist_t *zfs_get_clones_nvl(zfs_handle_t *);
/*
* zfs encryption management
*/
extern int zfs_crypto_get_encryption_root(zfs_handle_t *, boolean_t *, char *);
extern int zfs_crypto_create(libzfs_handle_t *, char *, nvlist_t *, nvlist_t *,
uint8_t **, uint_t *);
extern int zfs_crypto_clone_check(libzfs_handle_t *, zfs_handle_t *, char *,
nvlist_t *);
extern int zfs_crypto_attempt_load_keys(libzfs_handle_t *, char *);
extern int zfs_crypto_load_key(zfs_handle_t *, boolean_t, char *);
extern int zfs_crypto_unload_key(zfs_handle_t *);
extern int zfs_crypto_rewrap(zfs_handle_t *, nvlist_t *, boolean_t);
typedef struct zprop_list {
int pl_prop;
char *pl_user_prop;
struct zprop_list *pl_next;
boolean_t pl_all;
size_t pl_width;
size_t pl_recvd_width;
boolean_t pl_fixed;
} zprop_list_t;
extern int zfs_expand_proplist(zfs_handle_t *, zprop_list_t **, boolean_t,
boolean_t);
extern void zfs_prune_proplist(zfs_handle_t *, uint8_t *);
#define ZFS_MOUNTPOINT_NONE "none"
#define ZFS_MOUNTPOINT_LEGACY "legacy"
#define ZFS_FEATURE_DISABLED "disabled"
#define ZFS_FEATURE_ENABLED "enabled"
#define ZFS_FEATURE_ACTIVE "active"
#define ZFS_UNSUPPORTED_INACTIVE "inactive"
#define ZFS_UNSUPPORTED_READONLY "readonly"
/*
* zpool property management
*/
extern int zpool_expand_proplist(zpool_handle_t *, zprop_list_t **);
extern int zpool_prop_get_feature(zpool_handle_t *, const char *, char *,
size_t);
extern const char *zpool_prop_default_string(zpool_prop_t);
extern uint64_t zpool_prop_default_numeric(zpool_prop_t);
extern const char *zpool_prop_column_name(zpool_prop_t);
extern boolean_t zpool_prop_align_right(zpool_prop_t);
/*
* Functions shared by zfs and zpool property management.
*/
extern int zprop_iter(zprop_func func, void *cb, boolean_t show_all,
boolean_t ordered, zfs_type_t type);
extern int zprop_get_list(libzfs_handle_t *, char *, zprop_list_t **,
zfs_type_t);
extern void zprop_free_list(zprop_list_t *);
#define ZFS_GET_NCOLS 5
typedef enum {
GET_COL_NONE,
GET_COL_NAME,
GET_COL_PROPERTY,
GET_COL_VALUE,
GET_COL_RECVD,
GET_COL_SOURCE
} zfs_get_column_t;
/*
* Functions for printing zfs or zpool properties
*/
typedef struct zprop_get_cbdata {
int cb_sources;
zfs_get_column_t cb_columns[ZFS_GET_NCOLS];
int cb_colwidths[ZFS_GET_NCOLS + 1];
boolean_t cb_scripted;
boolean_t cb_literal;
boolean_t cb_first;
zprop_list_t *cb_proplist;
zfs_type_t cb_type;
} zprop_get_cbdata_t;
void zprop_print_one_property(const char *, zprop_get_cbdata_t *,
const char *, const char *, zprop_source_t, const char *,
const char *);
/*
* Iterator functions.
*/
typedef int (*zfs_iter_f)(zfs_handle_t *, void *);
extern int zfs_iter_root(libzfs_handle_t *, zfs_iter_f, void *);
extern int zfs_iter_children(zfs_handle_t *, zfs_iter_f, void *);
extern int zfs_iter_dependents(zfs_handle_t *, boolean_t, zfs_iter_f, void *);
extern int zfs_iter_filesystems(zfs_handle_t *, zfs_iter_f, void *);
extern int zfs_iter_snapshots(zfs_handle_t *, boolean_t, zfs_iter_f, void *);
extern int zfs_iter_snapshots_sorted(zfs_handle_t *, zfs_iter_f, void *);
extern int zfs_iter_snapspec(zfs_handle_t *, const char *, zfs_iter_f, void *);
extern int zfs_iter_bookmarks(zfs_handle_t *, zfs_iter_f, void *);
typedef struct get_all_cb {
zfs_handle_t **cb_handles;
size_t cb_alloc;
size_t cb_used;
boolean_t cb_verbose;
int (*cb_getone)(zfs_handle_t *, void *);
} get_all_cb_t;
void libzfs_add_handle(get_all_cb_t *, zfs_handle_t *);
int libzfs_dataset_cmp(const void *, const void *);
/*
* Functions to create and destroy datasets.
*/
extern int zfs_create(libzfs_handle_t *, const char *, zfs_type_t,
nvlist_t *);
extern int zfs_create_ancestors(libzfs_handle_t *, const char *);
extern int zfs_destroy(zfs_handle_t *, boolean_t);
extern int zfs_destroy_snaps(zfs_handle_t *, char *, boolean_t);
extern int zfs_destroy_snaps_nvl(libzfs_handle_t *, nvlist_t *, boolean_t);
extern int zfs_clone(zfs_handle_t *, const char *, nvlist_t *);
extern int zfs_snapshot(libzfs_handle_t *, const char *, boolean_t, nvlist_t *);
extern int zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps,
nvlist_t *props);
extern int zfs_rollback(zfs_handle_t *, zfs_handle_t *, boolean_t);
extern int zfs_rename(zfs_handle_t *, const char *, boolean_t, boolean_t);
typedef struct sendflags {
/* print informational messages (ie, -v was specified) */
boolean_t verbose;
/* recursive send (ie, -R) */
boolean_t replicate;
/* for incrementals, do all intermediate snapshots */
boolean_t doall;
/* if dataset is a clone, do incremental from its origin */
boolean_t fromorigin;
/* do deduplication */
boolean_t dedup;
/* send properties (ie, -p) */
boolean_t props;
/* do not send (no-op, ie. -n) */
boolean_t dryrun;
/* parsable verbose output (ie. -P) */
boolean_t parsable;
/* show progress (ie. -v) */
boolean_t progress;
/* large blocks (>128K) are permitted */
boolean_t largeblock;
/* WRITE_EMBEDDED records of type DATA are permitted */
boolean_t embed_data;
/* compressed WRITE records are permitted */
boolean_t compress;
/* raw encrypted records are permitted */
boolean_t raw;
/* only send received properties (ie. -b) */
boolean_t backup;
} sendflags_t;
typedef boolean_t (snapfilter_cb_t)(zfs_handle_t *, void *);
extern int zfs_send(zfs_handle_t *, const char *, const char *,
sendflags_t *, int, snapfilter_cb_t, void *, nvlist_t **);
extern int zfs_send_one(zfs_handle_t *, const char *, int, sendflags_t flags);
extern int zfs_send_resume(libzfs_handle_t *, sendflags_t *, int outfd,
const char *);
extern nvlist_t *zfs_send_resume_token_to_nvlist(libzfs_handle_t *hdl,
const char *token);
extern int zfs_promote(zfs_handle_t *);
extern int zfs_hold(zfs_handle_t *, const char *, const char *,
boolean_t, int);
extern int zfs_hold_nvl(zfs_handle_t *, int, nvlist_t *);
extern int zfs_release(zfs_handle_t *, const char *, const char *, boolean_t);
extern int zfs_get_holds(zfs_handle_t *, nvlist_t **);
extern uint64_t zvol_volsize_to_reservation(uint64_t, nvlist_t *);
typedef int (*zfs_userspace_cb_t)(void *arg, const char *domain,
uid_t rid, uint64_t space);
extern int zfs_userspace(zfs_handle_t *, zfs_userquota_prop_t,
zfs_userspace_cb_t, void *);
extern int zfs_get_fsacl(zfs_handle_t *, nvlist_t **);
extern int zfs_set_fsacl(zfs_handle_t *, boolean_t, nvlist_t *);
typedef struct recvflags {
/* print informational messages (ie, -v was specified) */
boolean_t verbose;
/* the destination is a prefix, not the exact fs (ie, -d) */
boolean_t isprefix;
/*
* Only the tail of the sent snapshot path is appended to the
* destination to determine the received snapshot name (ie, -e).
*/
boolean_t istail;
/* do not actually do the recv, just check if it would work (ie, -n) */
boolean_t dryrun;
/* rollback/destroy filesystems as necessary (eg, -F) */
boolean_t force;
/* set "canmount=off" on all modified filesystems */
boolean_t canmountoff;
/*
* Mark the file systems as "resumable" and do not destroy them if the
* receive is interrupted
*/
boolean_t resumable;
/* byteswap flag is used internally; callers need not specify */
boolean_t byteswap;
/* do not mount file systems as they are extracted (private) */
boolean_t nomount;
} recvflags_t;
extern int zfs_receive(libzfs_handle_t *, const char *, nvlist_t *,
recvflags_t *, int, avl_tree_t *);
typedef enum diff_flags {
ZFS_DIFF_PARSEABLE = 0x1,
ZFS_DIFF_TIMESTAMP = 0x2,
ZFS_DIFF_CLASSIFY = 0x4
} diff_flags_t;
extern int zfs_show_diffs(zfs_handle_t *, int, const char *, const char *,
int);
/*
* Miscellaneous functions.
*/
extern const char *zfs_type_to_name(zfs_type_t);
extern void zfs_refresh_properties(zfs_handle_t *);
extern int zfs_name_valid(const char *, zfs_type_t);
extern zfs_handle_t *zfs_path_to_zhandle(libzfs_handle_t *, char *, zfs_type_t);
extern int zfs_parent_name(zfs_handle_t *, char *, size_t);
extern boolean_t zfs_dataset_exists(libzfs_handle_t *, const char *,
zfs_type_t);
extern int zfs_spa_version(zfs_handle_t *, int *);
extern boolean_t zfs_bookmark_exists(const char *path);
extern int zfs_append_partition(char *path, size_t max_len);
extern int zfs_resolve_shortname(const char *name, char *path, size_t pathlen);
extern int zfs_strcmp_pathname(char *name, char *cmp_name, int wholedisk);
extern int zfs_path_order(char *path, int *order);
/*
* Mount support functions.
*/
extern boolean_t is_mounted(libzfs_handle_t *, const char *special, char **);
extern boolean_t zfs_is_mounted(zfs_handle_t *, char **);
extern int zfs_mount(zfs_handle_t *, const char *, int);
extern int zfs_unmount(zfs_handle_t *, const char *, int);
extern int zfs_unmountall(zfs_handle_t *, int);
/*
* Share support functions.
*/
extern boolean_t zfs_is_shared(zfs_handle_t *);
extern int zfs_share(zfs_handle_t *);
extern int zfs_unshare(zfs_handle_t *);
/*
* Protocol-specific share support functions.
*/
extern boolean_t zfs_is_shared_nfs(zfs_handle_t *, char **);
extern boolean_t zfs_is_shared_smb(zfs_handle_t *, char **);
extern int zfs_share_nfs(zfs_handle_t *);
extern int zfs_share_smb(zfs_handle_t *);
extern int zfs_shareall(zfs_handle_t *);
extern int zfs_unshare_nfs(zfs_handle_t *, const char *);
extern int zfs_unshare_smb(zfs_handle_t *, const char *);
extern int zfs_unshareall_nfs(zfs_handle_t *);
extern int zfs_unshareall_smb(zfs_handle_t *);
extern int zfs_unshareall_bypath(zfs_handle_t *, const char *);
extern int zfs_unshareall_bytype(zfs_handle_t *, const char *, const char *);
extern int zfs_unshareall(zfs_handle_t *);
extern int zfs_deleg_share_nfs(libzfs_handle_t *, char *, char *, char *,
void *, void *, int, zfs_share_op_t);
/*
* Formats for iostat numbers. Examples: "12K", "30ms", "4B", "2321234", "-".
*
* ZFS_NICENUM_1024: Print kilo, mega, tera, peta, exa..
* ZFS_NICENUM_BYTES: Print single bytes ("13B"), kilo, mega, tera...
* ZFS_NICENUM_TIME: Print nanosecs, microsecs, millisecs, seconds...
* ZFS_NICENUM_RAW: Print the raw number without any formatting
* ZFS_NICENUM_RAWTIME: Same as RAW, but print dashes ('-') for zero.
*/
enum zfs_nicenum_format {
ZFS_NICENUM_1024 = 0,
ZFS_NICENUM_BYTES = 1,
ZFS_NICENUM_TIME = 2,
ZFS_NICENUM_RAW = 3,
ZFS_NICENUM_RAWTIME = 4
};
/*
* Utility function to convert a number to a human-readable form.
*/
extern void zfs_nicenum(uint64_t, char *, size_t);
extern void zfs_nicenum_format(uint64_t num, char *buf, size_t buflen,
enum zfs_nicenum_format type);
extern void zfs_nicetime(uint64_t, char *, size_t);
extern void zfs_nicebytes(uint64_t, char *, size_t);
extern int zfs_nicestrtonum(libzfs_handle_t *, const char *, uint64_t *);
/*
* Utility functions to run an external process.
*/
#define STDOUT_VERBOSE 0x01
#define STDERR_VERBOSE 0x02
#define NO_DEFAULT_PATH 0x04 /* Don't use $PATH to lookup the command */
int libzfs_run_process(const char *, char **, int flags);
int libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[],
char **lines[], int *lines_cnt);
int libzfs_run_process_get_stdout_nopath(const char *path, char *argv[],
char *env[], char **lines[], int *lines_cnt);
void libzfs_free_str_array(char **strs, int count);
int libzfs_envvar_is_set(char *envvar);
/*
* Given a device or file, determine if it is part of a pool.
*/
extern int zpool_in_use(libzfs_handle_t *, int, pool_state_t *, char **,
boolean_t *);
/*
* Label manipulation.
*/
extern int zpool_read_label(int, nvlist_t **, int *);
extern int zpool_clear_label(int);
/*
* Management interfaces for SMB ACL files
*/
int zfs_smb_acl_add(libzfs_handle_t *, char *, char *, char *);
int zfs_smb_acl_remove(libzfs_handle_t *, char *, char *, char *);
int zfs_smb_acl_purge(libzfs_handle_t *, char *, char *);
int zfs_smb_acl_rename(libzfs_handle_t *, char *, char *, char *, char *);
/*
* Enable and disable datasets within a pool by mounting/unmounting and
* sharing/unsharing them.
*/
extern int zpool_enable_datasets(zpool_handle_t *, const char *, int);
extern int zpool_disable_datasets(zpool_handle_t *, boolean_t);
/*
* Support for Linux libudev derived persistent device strings
*/
extern boolean_t is_mpath_whole_disk(const char *);
extern void update_vdev_config_dev_strs(nvlist_t *);
extern char *zfs_strip_partition(char *);
extern char *zfs_strip_partition_path(char *);
#ifdef HAVE_LIBUDEV
struct udev_device;
extern boolean_t udev_is_mpath(struct udev_device *dev);
extern int zfs_device_get_devid(struct udev_device *, char *, size_t);
extern int zfs_device_get_physical(struct udev_device *, char *, size_t);
#endif
+extern int zfs_remap_indirects(libzfs_handle_t *hdl, const char *);
+
#ifdef __cplusplus
}
#endif
#endif /* _LIBZFS_H */
diff --git a/include/libzfs_core.h b/include/libzfs_core.h
index 721d90f81ca6..5af0e1e7571b 100644
--- a/include/libzfs_core.h
+++ b/include/libzfs_core.h
@@ -1,116 +1,117 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright 2017 RackTop Systems.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#ifndef _LIBZFS_CORE_H
#define _LIBZFS_CORE_H
#include <libnvpair.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#ifdef __cplusplus
extern "C" {
#endif
int libzfs_core_init(void);
void libzfs_core_fini(void);
/*
* NB: this type should be kept binary compatible with dmu_objset_type_t.
*/
enum lzc_dataset_type {
LZC_DATSET_TYPE_ZFS = 2,
LZC_DATSET_TYPE_ZVOL
};
+int lzc_remap(const char *fsname);
int lzc_snapshot(nvlist_t *, nvlist_t *, nvlist_t **);
int lzc_create(const char *, enum lzc_dataset_type, nvlist_t *, uint8_t *,
uint_t);
int lzc_clone(const char *, const char *, nvlist_t *);
int lzc_promote(const char *, char *, int);
int lzc_destroy_snaps(nvlist_t *, boolean_t, nvlist_t **);
int lzc_bookmark(nvlist_t *, nvlist_t **);
int lzc_get_bookmarks(const char *, nvlist_t *, nvlist_t **);
int lzc_destroy_bookmarks(nvlist_t *, nvlist_t **);
int lzc_load_key(const char *, boolean_t, uint8_t *, uint_t);
int lzc_unload_key(const char *);
int lzc_change_key(const char *, uint64_t, nvlist_t *, uint8_t *, uint_t);
int lzc_snaprange_space(const char *, const char *, uint64_t *);
int lzc_hold(nvlist_t *, int, nvlist_t **);
int lzc_release(nvlist_t *, nvlist_t **);
int lzc_get_holds(const char *, nvlist_t **);
enum lzc_send_flags {
LZC_SEND_FLAG_EMBED_DATA = 1 << 0,
LZC_SEND_FLAG_LARGE_BLOCK = 1 << 1,
LZC_SEND_FLAG_COMPRESS = 1 << 2,
LZC_SEND_FLAG_RAW = 1 << 3,
};
int lzc_send(const char *, const char *, int, enum lzc_send_flags);
int lzc_send_resume(const char *, const char *, int,
enum lzc_send_flags, uint64_t, uint64_t);
int lzc_send_space(const char *, const char *, enum lzc_send_flags, uint64_t *);
struct dmu_replay_record;
int lzc_receive(const char *, nvlist_t *, const char *, boolean_t, boolean_t,
int);
int lzc_receive_resumable(const char *, nvlist_t *, const char *, boolean_t,
boolean_t, int);
int lzc_receive_with_header(const char *, nvlist_t *, const char *, boolean_t,
boolean_t, boolean_t, int, const struct dmu_replay_record *);
int lzc_receive_one(const char *, nvlist_t *, const char *, boolean_t,
boolean_t, boolean_t, int, const struct dmu_replay_record *, int,
uint64_t *, uint64_t *, uint64_t *, nvlist_t **);
int lzc_receive_with_cmdprops(const char *, nvlist_t *, nvlist_t *,
const char *, boolean_t, boolean_t, boolean_t, int,
const struct dmu_replay_record *, int, uint64_t *, uint64_t *,
uint64_t *, nvlist_t **);
boolean_t lzc_exists(const char *);
int lzc_rollback(const char *, char *, int);
int lzc_rollback_to(const char *, const char *);
int lzc_channel_program(const char *, const char *, uint64_t,
uint64_t, nvlist_t *, nvlist_t **);
int lzc_channel_program_nosync(const char *, const char *, uint64_t,
uint64_t, nvlist_t *, nvlist_t **);
int lzc_sync(const char *, nvlist_t *, nvlist_t **);
int lzc_reopen(const char *, boolean_t);
#ifdef __cplusplus
}
#endif
#endif /* _LIBZFS_CORE_H */
diff --git a/include/sys/Makefile.am b/include/sys/Makefile.am
index 8e18a87904a8..f30c9427e90b 100644
--- a/include/sys/Makefile.am
+++ b/include/sys/Makefile.am
@@ -1,144 +1,148 @@
SUBDIRS = fm fs crypto lua sysevent
COMMON_H = \
$(top_srcdir)/include/sys/abd.h \
$(top_srcdir)/include/sys/arc.h \
$(top_srcdir)/include/sys/arc_impl.h \
$(top_srcdir)/include/sys/avl.h \
$(top_srcdir)/include/sys/avl_impl.h \
$(top_srcdir)/include/sys/blkptr.h \
$(top_srcdir)/include/sys/bplist.h \
$(top_srcdir)/include/sys/bpobj.h \
$(top_srcdir)/include/sys/bptree.h \
$(top_srcdir)/include/sys/bqueue.h \
$(top_srcdir)/include/sys/dbuf.h \
$(top_srcdir)/include/sys/ddt.h \
$(top_srcdir)/include/sys/dmu.h \
$(top_srcdir)/include/sys/dmu_impl.h \
$(top_srcdir)/include/sys/dmu_objset.h \
$(top_srcdir)/include/sys/dmu_send.h \
$(top_srcdir)/include/sys/dmu_traverse.h \
$(top_srcdir)/include/sys/dmu_tx.h \
$(top_srcdir)/include/sys/dmu_zfetch.h \
$(top_srcdir)/include/sys/dnode.h \
$(top_srcdir)/include/sys/dsl_bookmark.h \
$(top_srcdir)/include/sys/dsl_dataset.h \
$(top_srcdir)/include/sys/dsl_deadlist.h \
$(top_srcdir)/include/sys/dsl_deleg.h \
$(top_srcdir)/include/sys/dsl_destroy.h \
$(top_srcdir)/include/sys/dsl_dir.h \
$(top_srcdir)/include/sys/dsl_crypt.h \
$(top_srcdir)/include/sys/dsl_pool.h \
$(top_srcdir)/include/sys/dsl_prop.h \
$(top_srcdir)/include/sys/dsl_scan.h \
$(top_srcdir)/include/sys/dsl_synctask.h \
$(top_srcdir)/include/sys/dsl_userhold.h \
$(top_srcdir)/include/sys/edonr.h \
$(top_srcdir)/include/sys/efi_partition.h \
$(top_srcdir)/include/sys/frame.h \
$(top_srcdir)/include/sys/hkdf.h \
$(top_srcdir)/include/sys/metaslab.h \
$(top_srcdir)/include/sys/metaslab_impl.h \
$(top_srcdir)/include/sys/mmp.h \
$(top_srcdir)/include/sys/mntent.h \
$(top_srcdir)/include/sys/multilist.h \
$(top_srcdir)/include/sys/nvpair.h \
$(top_srcdir)/include/sys/nvpair_impl.h \
$(top_srcdir)/include/sys/pathname.h \
$(top_srcdir)/include/sys/policy.h \
$(top_srcdir)/include/sys/range_tree.h \
$(top_srcdir)/include/sys/refcount.h \
$(top_srcdir)/include/sys/rrwlock.h \
$(top_srcdir)/include/sys/sa.h \
$(top_srcdir)/include/sys/sa_impl.h \
$(top_srcdir)/include/sys/sdt.h \
$(top_srcdir)/include/sys/sha2.h \
$(top_srcdir)/include/sys/skein.h \
$(top_srcdir)/include/sys/spa_boot.h \
$(top_srcdir)/include/sys/space_map.h \
$(top_srcdir)/include/sys/space_reftree.h \
$(top_srcdir)/include/sys/spa.h \
$(top_srcdir)/include/sys/spa_impl.h \
$(top_srcdir)/include/sys/spa_checksum.h \
$(top_srcdir)/include/sys/sysevent.h \
$(top_srcdir)/include/sys/trace.h \
$(top_srcdir)/include/sys/trace_acl.h \
$(top_srcdir)/include/sys/trace_arc.h \
$(top_srcdir)/include/sys/trace_common.h \
$(top_srcdir)/include/sys/trace_dbgmsg.h \
$(top_srcdir)/include/sys/trace_dbuf.h \
$(top_srcdir)/include/sys/trace_dmu.h \
$(top_srcdir)/include/sys/trace_dnode.h \
$(top_srcdir)/include/sys/trace_multilist.h \
$(top_srcdir)/include/sys/trace_txg.h \
+ $(top_srcdir)/include/sys/trace_vdev.h \
$(top_srcdir)/include/sys/trace_zil.h \
$(top_srcdir)/include/sys/trace_zio.h \
$(top_srcdir)/include/sys/trace_zrlock.h \
$(top_srcdir)/include/sys/txg.h \
$(top_srcdir)/include/sys/txg_impl.h \
$(top_srcdir)/include/sys/u8_textprep_data.h \
$(top_srcdir)/include/sys/u8_textprep.h \
$(top_srcdir)/include/sys/uberblock.h \
$(top_srcdir)/include/sys/uberblock_impl.h \
$(top_srcdir)/include/sys/uio_impl.h \
$(top_srcdir)/include/sys/unique.h \
$(top_srcdir)/include/sys/uuid.h \
$(top_srcdir)/include/sys/vdev_disk.h \
$(top_srcdir)/include/sys/vdev_file.h \
$(top_srcdir)/include/sys/vdev.h \
$(top_srcdir)/include/sys/vdev_impl.h \
+ $(top_srcdir)/include/sys/vdev_indirect_births.h \
+ $(top_srcdir)/include/sys/vdev_indirect_mapping.h \
$(top_srcdir)/include/sys/vdev_raidz.h \
$(top_srcdir)/include/sys/vdev_raidz_impl.h \
+ $(top_srcdir)/include/sys/vdev_removal.h \
$(top_srcdir)/include/sys/xvattr.h \
$(top_srcdir)/include/sys/zap.h \
$(top_srcdir)/include/sys/zap_impl.h \
$(top_srcdir)/include/sys/zap_leaf.h \
$(top_srcdir)/include/sys/zcp.h \
$(top_srcdir)/include/sys/zcp_global.h \
$(top_srcdir)/include/sys/zcp_iter.h \
$(top_srcdir)/include/sys/zcp_prop.h \
$(top_srcdir)/include/sys/zfeature.h \
$(top_srcdir)/include/sys/zfs_acl.h \
$(top_srcdir)/include/sys/zfs_context.h \
$(top_srcdir)/include/sys/zfs_ctldir.h \
$(top_srcdir)/include/sys/zfs_debug.h \
$(top_srcdir)/include/sys/zfs_delay.h \
$(top_srcdir)/include/sys/zfs_dir.h \
$(top_srcdir)/include/sys/zfs_fuid.h \
$(top_srcdir)/include/sys/zfs_project.h \
$(top_srcdir)/include/sys/zfs_ratelimit.h \
$(top_srcdir)/include/sys/zfs_rlock.h \
$(top_srcdir)/include/sys/zfs_sa.h \
$(top_srcdir)/include/sys/zfs_stat.h \
$(top_srcdir)/include/sys/zfs_vfsops.h \
$(top_srcdir)/include/sys/zfs_vnops.h \
$(top_srcdir)/include/sys/zfs_znode.h \
$(top_srcdir)/include/sys/zil.h \
$(top_srcdir)/include/sys/zil_impl.h \
$(top_srcdir)/include/sys/zio_checksum.h \
$(top_srcdir)/include/sys/zio_compress.h \
$(top_srcdir)/include/sys/zio_crypt.h \
$(top_srcdir)/include/sys/zio.h \
$(top_srcdir)/include/sys/zio_impl.h \
$(top_srcdir)/include/sys/zio_priority.h \
$(top_srcdir)/include/sys/zrlock.h
KERNEL_H = \
$(top_srcdir)/include/sys/zfs_ioctl.h \
$(top_srcdir)/include/sys/zfs_onexit.h \
${top_srcdir}/include/sys/zpl.h \
$(top_srcdir)/include/sys/zvol.h
USER_H =
EXTRA_DIST = $(COMMON_H) $(KERNEL_H) $(USER_H)
if CONFIG_USER
libzfsdir = $(includedir)/libzfs/sys
libzfs_HEADERS = $(COMMON_H) $(USER_H)
endif
if CONFIG_KERNEL
kerneldir = @prefix@/src/zfs-$(VERSION)/include/sys
kernel_HEADERS = $(COMMON_H) $(KERNEL_H)
endif
diff --git a/include/sys/bpobj.h b/include/sys/bpobj.h
index 2a365199ce44..d425e239f6a6 100644
--- a/include/sys/bpobj.h
+++ b/include/sys/bpobj.h
@@ -1,93 +1,95 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_BPOBJ_H
#define _SYS_BPOBJ_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct bpobj_phys {
/*
* This is the bonus buffer for the dead lists. The object's
* contents is an array of bpo_entries blkptr_t's, representing
* a total of bpo_bytes physical space.
*/
uint64_t bpo_num_blkptrs;
uint64_t bpo_bytes;
uint64_t bpo_comp;
uint64_t bpo_uncomp;
uint64_t bpo_subobjs;
uint64_t bpo_num_subobjs;
} bpobj_phys_t;
#define BPOBJ_SIZE_V0 (2 * sizeof (uint64_t))
#define BPOBJ_SIZE_V1 (4 * sizeof (uint64_t))
typedef struct bpobj {
kmutex_t bpo_lock;
objset_t *bpo_os;
uint64_t bpo_object;
int bpo_epb;
uint8_t bpo_havecomp;
uint8_t bpo_havesubobj;
bpobj_phys_t *bpo_phys;
dmu_buf_t *bpo_dbuf;
dmu_buf_t *bpo_cached_dbuf;
} bpobj_t;
typedef int bpobj_itor_t(void *arg, const blkptr_t *bp, dmu_tx_t *tx);
uint64_t bpobj_alloc(objset_t *mos, int blocksize, dmu_tx_t *tx);
uint64_t bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx);
void bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx);
void bpobj_decr_empty(objset_t *os, dmu_tx_t *tx);
int bpobj_open(bpobj_t *bpo, objset_t *mos, uint64_t object);
void bpobj_close(bpobj_t *bpo);
+boolean_t bpobj_is_open(const bpobj_t *bpo);
int bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx);
int bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *, dmu_tx_t *);
void bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx);
void bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, dmu_tx_t *tx);
int bpobj_space(bpobj_t *bpo,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
+boolean_t bpobj_is_empty(bpobj_t *bpo);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_BPOBJ_H */
diff --git a/include/sys/dbuf.h b/include/sys/dbuf.h
index 633dfd25aa17..32f036862cc1 100644
--- a/include/sys/dbuf.h
+++ b/include/sys/dbuf.h
@@ -1,406 +1,408 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DBUF_H
#define _SYS_DBUF_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/zfs_context.h>
#include <sys/refcount.h>
#include <sys/zrlock.h>
#include <sys/multilist.h>
#ifdef __cplusplus
extern "C" {
#endif
#define IN_DMU_SYNC 2
/*
* define flags for dbuf_read
*/
#define DB_RF_MUST_SUCCEED (1 << 0)
#define DB_RF_CANFAIL (1 << 1)
#define DB_RF_HAVESTRUCT (1 << 2)
#define DB_RF_NOPREFETCH (1 << 3)
#define DB_RF_NEVERWAIT (1 << 4)
#define DB_RF_CACHED (1 << 5)
#define DB_RF_NO_DECRYPT (1 << 6)
/*
* The simplified state transition diagram for dbufs looks like:
*
* +----> READ ----+
* | |
* | V
* (alloc)-->UNCACHED CACHED-->EVICTING-->(free)
* | ^ ^
* | | |
* +----> FILL ----+ |
* | |
* | |
* +--------> NOFILL -------+
*
* DB_SEARCH is an invalid state for a dbuf. It is used by dbuf_free_range
* to find all dbufs in a range of a dnode and must be less than any other
* dbuf_states_t (see comment on dn_dbufs in dnode.h).
*/
typedef enum dbuf_states {
DB_SEARCH = -1,
DB_UNCACHED,
DB_FILL,
DB_NOFILL,
DB_READ,
DB_CACHED,
DB_EVICTING
} dbuf_states_t;
struct dnode;
struct dmu_tx;
/*
* level = 0 means the user data
* level = 1 means the single indirect block
* etc.
*/
struct dmu_buf_impl;
typedef enum override_states {
DR_NOT_OVERRIDDEN,
DR_IN_DMU_SYNC,
DR_OVERRIDDEN
} override_states_t;
typedef struct dbuf_dirty_record {
/* link on our parents dirty list */
list_node_t dr_dirty_node;
/* transaction group this data will sync in */
uint64_t dr_txg;
/* zio of outstanding write IO */
zio_t *dr_zio;
/* pointer back to our dbuf */
struct dmu_buf_impl *dr_dbuf;
/* pointer to next dirty record */
struct dbuf_dirty_record *dr_next;
/* pointer to parent dirty record */
struct dbuf_dirty_record *dr_parent;
/* How much space was changed to dsl_pool_dirty_space() for this? */
unsigned int dr_accounted;
/* A copy of the bp that points to us */
blkptr_t dr_bp_copy;
union dirty_types {
struct dirty_indirect {
/* protect access to list */
kmutex_t dr_mtx;
/* Our list of dirty children */
list_t dr_children;
} di;
struct dirty_leaf {
/*
* dr_data is set when we dirty the buffer
* so that we can retain the pointer even if it
* gets COW'd in a subsequent transaction group.
*/
arc_buf_t *dr_data;
blkptr_t dr_overridden_by;
override_states_t dr_override_state;
uint8_t dr_copies;
boolean_t dr_nopwrite;
boolean_t dr_raw;
} dl;
} dt;
} dbuf_dirty_record_t;
typedef struct dmu_buf_impl {
/*
* The following members are immutable, with the exception of
* db.db_data, which is protected by db_mtx.
*/
/* the publicly visible structure */
dmu_buf_t db;
/* the objset we belong to */
struct objset *db_objset;
/*
* handle to safely access the dnode we belong to (NULL when evicted)
*/
struct dnode_handle *db_dnode_handle;
/*
* our parent buffer; if the dnode points to us directly,
* db_parent == db_dnode_handle->dnh_dnode->dn_dbuf
* only accessed by sync thread ???
* (NULL when evicted)
* May change from NULL to non-NULL under the protection of db_mtx
* (see dbuf_check_blkptr())
*/
struct dmu_buf_impl *db_parent;
/*
* link for hash table of all dmu_buf_impl_t's
*/
struct dmu_buf_impl *db_hash_next;
/* our block number */
uint64_t db_blkid;
/*
* Pointer to the blkptr_t which points to us. May be NULL if we
* don't have one yet. (NULL when evicted)
*/
blkptr_t *db_blkptr;
/*
* Our indirection level. Data buffers have db_level==0.
* Indirect buffers which point to data buffers have
* db_level==1. etc. Buffers which contain dnodes have
* db_level==0, since the dnodes are stored in a file.
*/
uint8_t db_level;
/* db_mtx protects the members below */
kmutex_t db_mtx;
/*
* Current state of the buffer
*/
dbuf_states_t db_state;
/*
* Refcount accessed by dmu_buf_{hold,rele}.
* If nonzero, the buffer can't be destroyed.
* Protected by db_mtx.
*/
refcount_t db_holds;
/* buffer holding our data */
arc_buf_t *db_buf;
kcondvar_t db_changed;
dbuf_dirty_record_t *db_data_pending;
/* pointer to most recent dirty record for this buffer */
dbuf_dirty_record_t *db_last_dirty;
/*
* Our link on the owner dnodes's dn_dbufs list.
* Protected by its dn_dbufs_mtx.
*/
avl_node_t db_link;
/*
* Link in dbuf_cache.
*/
multilist_node_t db_cache_link;
/* Data which is unique to data (leaf) blocks: */
/* User callback information. */
dmu_buf_user_t *db_user;
/*
* Evict user data as soon as the dirty and reference
* counts are equal.
*/
uint8_t db_user_immediate_evict;
/*
* This block was freed while a read or write was
* active.
*/
uint8_t db_freed_in_flight;
/*
* dnode_evict_dbufs() or dnode_evict_bonus() tried to
* evict this dbuf, but couldn't due to outstanding
* references. Evict once the refcount drops to 0.
*/
uint8_t db_pending_evict;
uint8_t db_dirtycnt;
} dmu_buf_impl_t;
/* Note: the dbuf hash table is exposed only for the mdb module */
#define DBUF_MUTEXES 8192
#define DBUF_HASH_MUTEX(h, idx) (&(h)->hash_mutexes[(idx) & (DBUF_MUTEXES-1)])
typedef struct dbuf_hash_table {
uint64_t hash_table_mask;
dmu_buf_impl_t **hash_table;
kmutex_t hash_mutexes[DBUF_MUTEXES];
} dbuf_hash_table_t;
uint64_t dbuf_whichblock(const struct dnode *di, const int64_t level,
const uint64_t offset);
void dbuf_create_bonus(struct dnode *dn);
int dbuf_spill_set_blksz(dmu_buf_t *db, uint64_t blksz, dmu_tx_t *tx);
void dbuf_rm_spill(struct dnode *dn, dmu_tx_t *tx);
dmu_buf_impl_t *dbuf_hold(struct dnode *dn, uint64_t blkid, void *tag);
dmu_buf_impl_t *dbuf_hold_level(struct dnode *dn, int level, uint64_t blkid,
void *tag);
int dbuf_hold_impl(struct dnode *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
void *tag, dmu_buf_impl_t **dbp);
void dbuf_prefetch(struct dnode *dn, int64_t level, uint64_t blkid,
zio_priority_t prio, arc_flags_t aflags);
void dbuf_add_ref(dmu_buf_impl_t *db, void *tag);
boolean_t dbuf_try_add_ref(dmu_buf_t *db, objset_t *os, uint64_t obj,
uint64_t blkid, void *tag);
uint64_t dbuf_refcount(dmu_buf_impl_t *db);
void dbuf_rele(dmu_buf_impl_t *db, void *tag);
void dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag);
dmu_buf_impl_t *dbuf_find(struct objset *os, uint64_t object, uint8_t level,
uint64_t blkid);
int dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags);
void dmu_buf_will_not_fill(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_will_fill(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_fill_done(dmu_buf_t *db, dmu_tx_t *tx);
void dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx);
dbuf_dirty_record_t *dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
arc_buf_t *dbuf_loan_arcbuf(dmu_buf_impl_t *db);
void dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder, dmu_tx_t *tx);
void dbuf_destroy(dmu_buf_impl_t *db);
void dbuf_unoverride(dbuf_dirty_record_t *dr);
void dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx);
void dbuf_release_bp(dmu_buf_impl_t *db);
+boolean_t dbuf_can_remap(const dmu_buf_impl_t *buf);
+
void dbuf_free_range(struct dnode *dn, uint64_t start, uint64_t end,
struct dmu_tx *);
void dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx);
void dbuf_stats_init(dbuf_hash_table_t *hash);
void dbuf_stats_destroy(void);
#define DB_DNODE(_db) ((_db)->db_dnode_handle->dnh_dnode)
#define DB_DNODE_LOCK(_db) ((_db)->db_dnode_handle->dnh_zrlock)
#define DB_DNODE_ENTER(_db) (zrl_add(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_EXIT(_db) (zrl_remove(&DB_DNODE_LOCK(_db)))
#define DB_DNODE_HELD(_db) (!zrl_is_zero(&DB_DNODE_LOCK(_db)))
void dbuf_init(void);
void dbuf_fini(void);
boolean_t dbuf_is_metadata(dmu_buf_impl_t *db);
#define DBUF_GET_BUFC_TYPE(_db) \
(dbuf_is_metadata(_db) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
#define DBUF_IS_CACHEABLE(_db) \
((_db)->db_objset->os_primary_cache == ZFS_CACHE_ALL || \
(dbuf_is_metadata(_db) && \
((_db)->db_objset->os_primary_cache == ZFS_CACHE_METADATA)))
#define DBUF_IS_L2CACHEABLE(_db) \
((_db)->db_objset->os_secondary_cache == ZFS_CACHE_ALL || \
(dbuf_is_metadata(_db) && \
((_db)->db_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
#define DNODE_LEVEL_IS_L2CACHEABLE(_dn, _level) \
((_dn)->dn_objset->os_secondary_cache == ZFS_CACHE_ALL || \
(((_level) > 0 || \
DMU_OT_IS_METADATA((_dn)->dn_handle->dnh_dnode->dn_type)) && \
((_dn)->dn_objset->os_secondary_cache == ZFS_CACHE_METADATA)))
#ifdef ZFS_DEBUG
/*
* There should be a ## between the string literal and fmt, to make it
* clear that we're joining two strings together, but gcc does not
* support that preprocessor token.
*/
#define dprintf_dbuf(dbuf, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char __db_buf[32]; \
uint64_t __db_obj = (dbuf)->db.db_object; \
if (__db_obj == DMU_META_DNODE_OBJECT) \
(void) strcpy(__db_buf, "mdn"); \
else \
(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
(u_longlong_t)__db_obj); \
dprintf_ds((dbuf)->db_objset->os_dsl_dataset, \
"obj=%s lvl=%u blkid=%lld " fmt, \
__db_buf, (dbuf)->db_level, \
(u_longlong_t)(dbuf)->db_blkid, __VA_ARGS__); \
} \
_NOTE(CONSTCOND) } while (0)
#define dprintf_dbuf_bp(db, bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, bp); \
dprintf_dbuf(db, fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
_NOTE(CONSTCOND) } while (0)
#define DBUF_VERIFY(db) dbuf_verify(db)
#else
#define dprintf_dbuf(db, fmt, ...)
#define dprintf_dbuf_bp(db, bp, fmt, ...)
#define DBUF_VERIFY(db)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DBUF_H */
diff --git a/include/sys/dmu.h b/include/sys/dmu.h
index 027d3d9fc02f..ccc07006d30a 100644
--- a/include/sys/dmu.h
+++ b/include/sys/dmu.h
@@ -1,1050 +1,1057 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright 2014 HybridCluster. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_DMU_H
#define _SYS_DMU_H
/*
* This file describes the interface that the DMU provides for its
* consumers.
*
* The DMU also interacts with the SPA. That interface is described in
* dmu_spa.h.
*/
#include <sys/zfs_context.h>
#include <sys/inttypes.h>
#include <sys/cred.h>
#include <sys/fs/zfs.h>
#include <sys/zio_compress.h>
#include <sys/zio_priority.h>
#include <sys/uio.h>
#ifdef __cplusplus
extern "C" {
#endif
struct page;
struct vnode;
struct spa;
struct zilog;
struct zio;
struct blkptr;
struct zap_cursor;
struct dsl_dataset;
struct dsl_pool;
struct dnode;
struct drr_begin;
struct drr_end;
struct zbookmark_phys;
struct spa;
struct nvlist;
struct arc_buf;
struct zio_prop;
struct sa_handle;
struct dsl_crypto_params;
typedef struct objset objset_t;
typedef struct dmu_tx dmu_tx_t;
typedef struct dsl_dir dsl_dir_t;
typedef struct dnode dnode_t;
typedef enum dmu_object_byteswap {
DMU_BSWAP_UINT8,
DMU_BSWAP_UINT16,
DMU_BSWAP_UINT32,
DMU_BSWAP_UINT64,
DMU_BSWAP_ZAP,
DMU_BSWAP_DNODE,
DMU_BSWAP_OBJSET,
DMU_BSWAP_ZNODE,
DMU_BSWAP_OLDACL,
DMU_BSWAP_ACL,
/*
* Allocating a new byteswap type number makes the on-disk format
* incompatible with any other format that uses the same number.
*
* Data can usually be structured to work with one of the
* DMU_BSWAP_UINT* or DMU_BSWAP_ZAP types.
*/
DMU_BSWAP_NUMFUNCS
} dmu_object_byteswap_t;
#define DMU_OT_NEWTYPE 0x80
#define DMU_OT_METADATA 0x40
#define DMU_OT_ENCRYPTED 0x20
#define DMU_OT_BYTESWAP_MASK 0x1f
/*
* Defines a uint8_t object type. Object types specify if the data
* in the object is metadata (boolean) and how to byteswap the data
* (dmu_object_byteswap_t).
*/
#define DMU_OT(byteswap, metadata, encrypted) \
(DMU_OT_NEWTYPE | \
((metadata) ? DMU_OT_METADATA : 0) | \
((encrypted) ? DMU_OT_ENCRYPTED : 0) | \
((byteswap) & DMU_OT_BYTESWAP_MASK))
#define DMU_OT_IS_VALID(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_BYTESWAP_MASK) < DMU_BSWAP_NUMFUNCS : \
(ot) < DMU_OT_NUMTYPES)
/*
* MDB doesn't have dmu_ot; it defines these macros itself.
*/
#ifndef ZFS_MDB
#define DMU_OT_IS_METADATA_IMPL(ot) (dmu_ot[ot].ot_metadata)
#define DMU_OT_IS_ENCRYPTED_IMPL(ot) (dmu_ot[ot].ot_encrypt)
#define DMU_OT_BYTESWAP_IMPL(ot) (dmu_ot[ot].ot_byteswap)
#endif
#define DMU_OT_IS_METADATA(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_METADATA) : \
DMU_OT_IS_METADATA_IMPL(ot))
#define DMU_OT_IS_ENCRYPTED(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_ENCRYPTED) : \
DMU_OT_IS_ENCRYPTED_IMPL(ot))
/*
* These object types use bp_fill != 1 for their L0 bp's. Therefore they can't
* have their data embedded (i.e. use a BP_IS_EMBEDDED() bp), because bp_fill
* is repurposed for embedded BPs.
*/
#define DMU_OT_HAS_FILL(ot) \
((ot) == DMU_OT_DNODE || (ot) == DMU_OT_OBJSET)
#define DMU_OT_BYTESWAP(ot) (((ot) & DMU_OT_NEWTYPE) ? \
((ot) & DMU_OT_BYTESWAP_MASK) : \
DMU_OT_BYTESWAP_IMPL(ot))
typedef enum dmu_object_type {
DMU_OT_NONE,
/* general: */
DMU_OT_OBJECT_DIRECTORY, /* ZAP */
DMU_OT_OBJECT_ARRAY, /* UINT64 */
DMU_OT_PACKED_NVLIST, /* UINT8 (XDR by nvlist_pack/unpack) */
DMU_OT_PACKED_NVLIST_SIZE, /* UINT64 */
DMU_OT_BPOBJ, /* UINT64 */
DMU_OT_BPOBJ_HDR, /* UINT64 */
/* spa: */
DMU_OT_SPACE_MAP_HEADER, /* UINT64 */
DMU_OT_SPACE_MAP, /* UINT64 */
/* zil: */
DMU_OT_INTENT_LOG, /* UINT64 */
/* dmu: */
DMU_OT_DNODE, /* DNODE */
DMU_OT_OBJSET, /* OBJSET */
/* dsl: */
DMU_OT_DSL_DIR, /* UINT64 */
DMU_OT_DSL_DIR_CHILD_MAP, /* ZAP */
DMU_OT_DSL_DS_SNAP_MAP, /* ZAP */
DMU_OT_DSL_PROPS, /* ZAP */
DMU_OT_DSL_DATASET, /* UINT64 */
/* zpl: */
DMU_OT_ZNODE, /* ZNODE */
DMU_OT_OLDACL, /* Old ACL */
DMU_OT_PLAIN_FILE_CONTENTS, /* UINT8 */
DMU_OT_DIRECTORY_CONTENTS, /* ZAP */
DMU_OT_MASTER_NODE, /* ZAP */
DMU_OT_UNLINKED_SET, /* ZAP */
/* zvol: */
DMU_OT_ZVOL, /* UINT8 */
DMU_OT_ZVOL_PROP, /* ZAP */
/* other; for testing only! */
DMU_OT_PLAIN_OTHER, /* UINT8 */
DMU_OT_UINT64_OTHER, /* UINT64 */
DMU_OT_ZAP_OTHER, /* ZAP */
/* new object types: */
DMU_OT_ERROR_LOG, /* ZAP */
DMU_OT_SPA_HISTORY, /* UINT8 */
DMU_OT_SPA_HISTORY_OFFSETS, /* spa_his_phys_t */
DMU_OT_POOL_PROPS, /* ZAP */
DMU_OT_DSL_PERMS, /* ZAP */
DMU_OT_ACL, /* ACL */
DMU_OT_SYSACL, /* SYSACL */
DMU_OT_FUID, /* FUID table (Packed NVLIST UINT8) */
DMU_OT_FUID_SIZE, /* FUID table size UINT64 */
DMU_OT_NEXT_CLONES, /* ZAP */
DMU_OT_SCAN_QUEUE, /* ZAP */
DMU_OT_USERGROUP_USED, /* ZAP */
DMU_OT_USERGROUP_QUOTA, /* ZAP */
DMU_OT_USERREFS, /* ZAP */
DMU_OT_DDT_ZAP, /* ZAP */
DMU_OT_DDT_STATS, /* ZAP */
DMU_OT_SA, /* System attr */
DMU_OT_SA_MASTER_NODE, /* ZAP */
DMU_OT_SA_ATTR_REGISTRATION, /* ZAP */
DMU_OT_SA_ATTR_LAYOUTS, /* ZAP */
DMU_OT_SCAN_XLATE, /* ZAP */
DMU_OT_DEDUP, /* fake dedup BP from ddt_bp_create() */
DMU_OT_DEADLIST, /* ZAP */
DMU_OT_DEADLIST_HDR, /* UINT64 */
DMU_OT_DSL_CLONES, /* ZAP */
DMU_OT_BPOBJ_SUBOBJ, /* UINT64 */
/*
* Do not allocate new object types here. Doing so makes the on-disk
* format incompatible with any other format that uses the same object
* type number.
*
* When creating an object which does not have one of the above types
* use the DMU_OTN_* type with the correct byteswap and metadata
* values.
*
* The DMU_OTN_* types do not have entries in the dmu_ot table,
* use the DMU_OT_IS_METDATA() and DMU_OT_BYTESWAP() macros instead
* of indexing into dmu_ot directly (this works for both DMU_OT_* types
* and DMU_OTN_* types).
*/
DMU_OT_NUMTYPES,
/*
* Names for valid types declared with DMU_OT().
*/
DMU_OTN_UINT8_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_FALSE),
DMU_OTN_UINT8_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_FALSE),
DMU_OTN_UINT16_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_FALSE),
DMU_OTN_UINT16_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_FALSE),
DMU_OTN_UINT32_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_FALSE),
DMU_OTN_UINT32_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_FALSE),
DMU_OTN_UINT64_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_FALSE),
DMU_OTN_UINT64_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_FALSE),
DMU_OTN_ZAP_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_FALSE),
DMU_OTN_ZAP_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_FALSE),
DMU_OTN_UINT8_ENC_DATA = DMU_OT(DMU_BSWAP_UINT8, B_FALSE, B_TRUE),
DMU_OTN_UINT8_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT8, B_TRUE, B_TRUE),
DMU_OTN_UINT16_ENC_DATA = DMU_OT(DMU_BSWAP_UINT16, B_FALSE, B_TRUE),
DMU_OTN_UINT16_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT16, B_TRUE, B_TRUE),
DMU_OTN_UINT32_ENC_DATA = DMU_OT(DMU_BSWAP_UINT32, B_FALSE, B_TRUE),
DMU_OTN_UINT32_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT32, B_TRUE, B_TRUE),
DMU_OTN_UINT64_ENC_DATA = DMU_OT(DMU_BSWAP_UINT64, B_FALSE, B_TRUE),
DMU_OTN_UINT64_ENC_METADATA = DMU_OT(DMU_BSWAP_UINT64, B_TRUE, B_TRUE),
DMU_OTN_ZAP_ENC_DATA = DMU_OT(DMU_BSWAP_ZAP, B_FALSE, B_TRUE),
DMU_OTN_ZAP_ENC_METADATA = DMU_OT(DMU_BSWAP_ZAP, B_TRUE, B_TRUE),
} dmu_object_type_t;
/*
* These flags are intended to be used to specify the "txg_how"
* parameter when calling the dmu_tx_assign() function. See the comment
* above dmu_tx_assign() for more details on the meaning of these flags.
*/
#define TXG_NOWAIT (0ULL)
#define TXG_WAIT (1ULL<<0)
#define TXG_NOTHROTTLE (1ULL<<1)
void byteswap_uint64_array(void *buf, size_t size);
void byteswap_uint32_array(void *buf, size_t size);
void byteswap_uint16_array(void *buf, size_t size);
void byteswap_uint8_array(void *buf, size_t size);
void zap_byteswap(void *buf, size_t size);
void zfs_oldacl_byteswap(void *buf, size_t size);
void zfs_acl_byteswap(void *buf, size_t size);
void zfs_znode_byteswap(void *buf, size_t size);
#define DS_FIND_SNAPSHOTS (1<<0)
#define DS_FIND_CHILDREN (1<<1)
#define DS_FIND_SERIALIZE (1<<2)
/*
* The maximum number of bytes that can be accessed as part of one
* operation, including metadata.
*/
#define DMU_MAX_ACCESS (64 * 1024 * 1024) /* 64MB */
#define DMU_MAX_DELETEBLKCNT (20480) /* ~5MB of indirect blocks */
#define DMU_USERUSED_OBJECT (-1ULL)
#define DMU_GROUPUSED_OBJECT (-2ULL)
#define DMU_PROJECTUSED_OBJECT (-3ULL)
/*
* Zap prefix for object accounting in DMU_{USER,GROUP,PROJECT}USED_OBJECT.
*/
#define DMU_OBJACCT_PREFIX "obj-"
#define DMU_OBJACCT_PREFIX_LEN 4
/*
* artificial blkids for bonus buffer and spill blocks
*/
#define DMU_BONUS_BLKID (-1ULL)
#define DMU_SPILL_BLKID (-2ULL)
/*
* Public routines to create, destroy, open, and close objsets.
*/
typedef void dmu_objset_create_sync_func_t(objset_t *os, void *arg,
cred_t *cr, dmu_tx_t *tx);
int dmu_objset_hold(const char *name, void *tag, objset_t **osp);
int dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t key_required, void *tag, objset_t **osp);
void dmu_objset_rele(objset_t *os, void *tag);
void dmu_objset_disown(objset_t *os, boolean_t key_required, void *tag);
int dmu_objset_open_ds(struct dsl_dataset *ds, objset_t **osp);
void dmu_objset_evict_dbufs(objset_t *os);
int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
struct dsl_crypto_params *dcp, dmu_objset_create_sync_func_t func,
void *arg);
int dmu_objset_clone(const char *name, const char *origin);
int dsl_destroy_snapshots_nvl(struct nvlist *snaps, boolean_t defer,
struct nvlist *errlist);
int dmu_objset_snapshot_one(const char *fsname, const char *snapname);
int dmu_objset_snapshot_tmp(const char *, const char *, int);
int dmu_objset_find(char *name, int func(const char *, void *), void *arg,
int flags);
void dmu_objset_byteswap(void *buf, size_t size);
int dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive);
+int dmu_objset_remap_indirects(const char *fsname);
typedef struct dmu_buf {
uint64_t db_object; /* object that this buffer is part of */
uint64_t db_offset; /* byte offset in this object */
uint64_t db_size; /* size of buffer in bytes */
void *db_data; /* data in buffer */
} dmu_buf_t;
/*
* The names of zap entries in the DIRECTORY_OBJECT of the MOS.
*/
#define DMU_POOL_DIRECTORY_OBJECT 1
#define DMU_POOL_CONFIG "config"
#define DMU_POOL_FEATURES_FOR_WRITE "features_for_write"
#define DMU_POOL_FEATURES_FOR_READ "features_for_read"
#define DMU_POOL_FEATURE_DESCRIPTIONS "feature_descriptions"
#define DMU_POOL_FEATURE_ENABLED_TXG "feature_enabled_txg"
#define DMU_POOL_ROOT_DATASET "root_dataset"
#define DMU_POOL_SYNC_BPOBJ "sync_bplist"
#define DMU_POOL_ERRLOG_SCRUB "errlog_scrub"
#define DMU_POOL_ERRLOG_LAST "errlog_last"
#define DMU_POOL_SPARES "spares"
#define DMU_POOL_DEFLATE "deflate"
#define DMU_POOL_HISTORY "history"
#define DMU_POOL_PROPS "pool_props"
#define DMU_POOL_L2CACHE "l2cache"
#define DMU_POOL_TMP_USERREFS "tmp_userrefs"
#define DMU_POOL_DDT "DDT-%s-%s-%s"
#define DMU_POOL_DDT_STATS "DDT-statistics"
#define DMU_POOL_CREATION_VERSION "creation_version"
#define DMU_POOL_SCAN "scan"
#define DMU_POOL_FREE_BPOBJ "free_bpobj"
#define DMU_POOL_BPTREE_OBJ "bptree_obj"
#define DMU_POOL_EMPTY_BPOBJ "empty_bpobj"
#define DMU_POOL_CHECKSUM_SALT "org.illumos:checksum_salt"
#define DMU_POOL_VDEV_ZAP_MAP "com.delphix:vdev_zap_map"
+#define DMU_POOL_REMOVING "com.delphix:removing"
+#define DMU_POOL_OBSOLETE_BPOBJ "com.delphix:obsolete_bpobj"
+#define DMU_POOL_CONDENSING_INDIRECT "com.delphix:condensing_indirect"
/*
* Allocate an object from this objset. The range of object numbers
* available is (0, DN_MAX_OBJECT). Object 0 is the meta-dnode.
*
* The transaction must be assigned to a txg. The newly allocated
* object will be "held" in the transaction (ie. you can modify the
* newly allocated object in this transaction).
*
* dmu_object_alloc() chooses an object and returns it in *objectp.
*
* dmu_object_claim() allocates a specific object number. If that
* number is already allocated, it fails and returns EEXIST.
*
* Return 0 on success, or ENOSPC or EEXIST as specified above.
*/
uint64_t dmu_object_alloc(objset_t *os, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
uint64_t dmu_object_alloc_dnsize(objset_t *os, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len,
int dnodesize, dmu_tx_t *tx);
int dmu_object_claim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len, dmu_tx_t *tx);
int dmu_object_claim_dnsize(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonus_type, int bonus_len,
int dnodesize, dmu_tx_t *tx);
int dmu_object_reclaim(objset_t *os, uint64_t object, dmu_object_type_t ot,
int blocksize, dmu_object_type_t bonustype, int bonuslen, dmu_tx_t *txp);
int dmu_object_reclaim_dnsize(objset_t *os, uint64_t object,
dmu_object_type_t ot, int blocksize, dmu_object_type_t bonustype,
int bonuslen, int dnodesize, dmu_tx_t *txp);
/*
* Free an object from this objset.
*
* The object's data will be freed as well (ie. you don't need to call
* dmu_free(object, 0, -1, tx)).
*
* The object need not be held in the transaction.
*
* If there are any holds on this object's buffers (via dmu_buf_hold()),
* or tx holds on the object (via dmu_tx_hold_object()), you can not
* free it; it fails and returns EBUSY.
*
* If the object is not allocated, it fails and returns ENOENT.
*
* Return 0 on success, or EBUSY or ENOENT as specified above.
*/
int dmu_object_free(objset_t *os, uint64_t object, dmu_tx_t *tx);
/*
* Find the next allocated or free object.
*
* The objectp parameter is in-out. It will be updated to be the next
* object which is allocated. Ignore objects which have not been
* modified since txg.
*
* XXX Can only be called on a objset with no dirty data.
*
* Returns 0 on success, or ENOENT if there are no more objects.
*/
int dmu_object_next(objset_t *os, uint64_t *objectp,
boolean_t hole, uint64_t txg);
/*
* Set the number of levels on a dnode. nlevels must be greater than the
* current number of levels or an EINVAL will be returned.
*/
int dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels,
dmu_tx_t *tx);
/*
* Set the data blocksize for an object.
*
* The object cannot have any blocks allcated beyond the first. If
* the first block is allocated already, the new size must be greater
* than the current block size. If these conditions are not met,
* ENOTSUP will be returned.
*
* Returns 0 on success, or EBUSY if there are any holds on the object
* contents, or ENOTSUP as described above.
*/
int dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size,
int ibs, dmu_tx_t *tx);
/*
* Manually set the maxblkid on a dnode. This will adjust nlevels accordingly
* to accommodate the change.
*/
int dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
dmu_tx_t *tx);
/*
* Set the checksum property on a dnode. The new checksum algorithm will
* apply to all newly written blocks; existing blocks will not be affected.
*/
void dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
dmu_tx_t *tx);
/*
* Set the compress property on a dnode. The new compression algorithm will
* apply to all newly written blocks; existing blocks will not be affected.
*/
void dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx);
int dmu_object_dirty_raw(objset_t *os, uint64_t object, dmu_tx_t *tx);
+int dmu_object_remap_indirects(objset_t *os, uint64_t object, uint64_t txg);
+
void dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx);
/*
* Decide how to write a block: checksum, compression, number of copies, etc.
*/
#define WP_NOFILL 0x1
#define WP_DMU_SYNC 0x2
#define WP_SPILL 0x4
void dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp,
struct zio_prop *zp);
/*
* The bonus data is accessed more or less like a regular buffer.
* You must dmu_bonus_hold() to get the buffer, which will give you a
* dmu_buf_t with db_offset==-1ULL, and db_size = the size of the bonus
- * data. As with any normal buffer, you must call dmu_buf_read() to
- * read db_data, dmu_buf_will_dirty() before modifying it, and the
+ * data. As with any normal buffer, you must call dmu_buf_will_dirty()
+ * before modifying it, and the
* object must be held in an assigned transaction before calling
* dmu_buf_will_dirty. You may use dmu_buf_set_user() on the bonus
* buffer as well. You must release what you hold with dmu_buf_rele().
*
* Returns ENOENT, EIO, or 0.
*/
int dmu_bonus_hold_impl(objset_t *os, uint64_t object, void *tag,
uint32_t flags, dmu_buf_t **dbp);
int dmu_bonus_hold(objset_t *os, uint64_t object, void *tag, dmu_buf_t **);
int dmu_bonus_max(void);
int dmu_set_bonus(dmu_buf_t *, int, dmu_tx_t *);
int dmu_set_bonustype(dmu_buf_t *, dmu_object_type_t, dmu_tx_t *);
dmu_object_type_t dmu_get_bonustype(dmu_buf_t *);
int dmu_rm_spill(objset_t *, uint64_t, dmu_tx_t *);
/*
* Special spill buffer support used by "SA" framework
*/
int dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp);
int dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags,
void *tag, dmu_buf_t **dbp);
int dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp);
/*
* Obtain the DMU buffer from the specified object which contains the
* specified offset. dmu_buf_hold() puts a "hold" on the buffer, so
* that it will remain in memory. You must release the hold with
* dmu_buf_rele(). You must not access the dmu_buf_t after releasing
* what you hold. You must have a hold on any dmu_buf_t* you pass to the DMU.
*
* You must call dmu_buf_read, dmu_buf_will_dirty, or dmu_buf_will_fill
* on the returned buffer before reading or writing the buffer's
* db_data. The comments for those routines describe what particular
* operations are valid after calling them.
*
* The object number must be a valid, allocated object number.
*/
int dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
void *tag, dmu_buf_t **, int flags);
int dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
void *tag, dmu_buf_t **dbp, int flags);
/*
* Add a reference to a dmu buffer that has already been held via
* dmu_buf_hold() in the current context.
*/
void dmu_buf_add_ref(dmu_buf_t *db, void* tag);
/*
* Attempt to add a reference to a dmu buffer that is in an unknown state,
* using a pointer that may have been invalidated by eviction processing.
* The request will succeed if the passed in dbuf still represents the
* same os/object/blkid, is ineligible for eviction, and has at least
* one hold by a user other than the syncer.
*/
boolean_t dmu_buf_try_add_ref(dmu_buf_t *, objset_t *os, uint64_t object,
uint64_t blkid, void *tag);
void dmu_buf_rele(dmu_buf_t *db, void *tag);
uint64_t dmu_buf_refcount(dmu_buf_t *db);
/*
* dmu_buf_hold_array holds the DMU buffers which contain all bytes in a
* range of an object. A pointer to an array of dmu_buf_t*'s is
* returned (in *dbpp).
*
* dmu_buf_rele_array releases the hold on an array of dmu_buf_t*'s, and
* frees the array. The hold on the array of buffers MUST be released
* with dmu_buf_rele_array. You can NOT release the hold on each buffer
* individually with dmu_buf_rele.
*/
int dmu_buf_hold_array_by_bonus(dmu_buf_t *db, uint64_t offset,
uint64_t length, boolean_t read, void *tag,
int *numbufsp, dmu_buf_t ***dbpp);
void dmu_buf_rele_array(dmu_buf_t **, int numbufs, void *tag);
typedef void dmu_buf_evict_func_t(void *user_ptr);
/*
* A DMU buffer user object may be associated with a dbuf for the
* duration of its lifetime. This allows the user of a dbuf (client)
* to attach private data to a dbuf (e.g. in-core only data such as a
* dnode_children_t, zap_t, or zap_leaf_t) and be optionally notified
* when that dbuf has been evicted. Clients typically respond to the
* eviction notification by freeing their private data, thus ensuring
* the same lifetime for both dbuf and private data.
*
* The mapping from a dmu_buf_user_t to any client private data is the
* client's responsibility. All current consumers of the API with private
* data embed a dmu_buf_user_t as the first member of the structure for
* their private data. This allows conversions between the two types
* with a simple cast. Since the DMU buf user API never needs access
* to the private data, other strategies can be employed if necessary
* or convenient for the client (e.g. using container_of() to do the
* conversion for private data that cannot have the dmu_buf_user_t as
* its first member).
*
* Eviction callbacks are executed without the dbuf mutex held or any
* other type of mechanism to guarantee that the dbuf is still available.
* For this reason, users must assume the dbuf has already been freed
* and not reference the dbuf from the callback context.
*
* Users requesting "immediate eviction" are notified as soon as the dbuf
* is only referenced by dirty records (dirties == holds). Otherwise the
* notification occurs after eviction processing for the dbuf begins.
*/
typedef struct dmu_buf_user {
/*
* Asynchronous user eviction callback state.
*/
taskq_ent_t dbu_tqent;
/*
* This instance's eviction function pointers.
*
* dbu_evict_func_sync is called synchronously and then
* dbu_evict_func_async is executed asynchronously on a taskq.
*/
dmu_buf_evict_func_t *dbu_evict_func_sync;
dmu_buf_evict_func_t *dbu_evict_func_async;
#ifdef ZFS_DEBUG
/*
* Pointer to user's dbuf pointer. NULL for clients that do
* not associate a dbuf with their user data.
*
* The dbuf pointer is cleared upon eviction so as to catch
* use-after-evict bugs in clients.
*/
dmu_buf_t **dbu_clear_on_evict_dbufp;
#endif
} dmu_buf_user_t;
/*
* Initialize the given dmu_buf_user_t instance with the eviction function
* evict_func, to be called when the user is evicted.
*
* NOTE: This function should only be called once on a given dmu_buf_user_t.
* To allow enforcement of this, dbu must already be zeroed on entry.
*/
/*ARGSUSED*/
static inline void
dmu_buf_init_user(dmu_buf_user_t *dbu, dmu_buf_evict_func_t *evict_func_sync,
dmu_buf_evict_func_t *evict_func_async, dmu_buf_t **clear_on_evict_dbufp)
{
ASSERT(dbu->dbu_evict_func_sync == NULL);
ASSERT(dbu->dbu_evict_func_async == NULL);
/* must have at least one evict func */
IMPLY(evict_func_sync == NULL, evict_func_async != NULL);
dbu->dbu_evict_func_sync = evict_func_sync;
dbu->dbu_evict_func_async = evict_func_async;
taskq_init_ent(&dbu->dbu_tqent);
#ifdef ZFS_DEBUG
dbu->dbu_clear_on_evict_dbufp = clear_on_evict_dbufp;
#endif
}
/*
* Attach user data to a dbuf and mark it for normal (when the dbuf's
* data is cleared or its reference count goes to zero) eviction processing.
*
* Returns NULL on success, or the existing user if another user currently
* owns the buffer.
*/
void *dmu_buf_set_user(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Attach user data to a dbuf and mark it for immediate (its dirty and
* reference counts are equal) eviction processing.
*
* Returns NULL on success, or the existing user if another user currently
* owns the buffer.
*/
void *dmu_buf_set_user_ie(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Replace the current user of a dbuf.
*
* If given the current user of a dbuf, replaces the dbuf's user with
* "new_user" and returns the user data pointer that was replaced.
* Otherwise returns the current, and unmodified, dbuf user pointer.
*/
void *dmu_buf_replace_user(dmu_buf_t *db,
dmu_buf_user_t *old_user, dmu_buf_user_t *new_user);
/*
* Remove the specified user data for a DMU buffer.
*
* Returns the user that was removed on success, or the current user if
* another user currently owns the buffer.
*/
void *dmu_buf_remove_user(dmu_buf_t *db, dmu_buf_user_t *user);
/*
* Returns the user data (dmu_buf_user_t *) associated with this dbuf.
*/
void *dmu_buf_get_user(dmu_buf_t *db);
objset_t *dmu_buf_get_objset(dmu_buf_t *db);
dnode_t *dmu_buf_dnode_enter(dmu_buf_t *db);
void dmu_buf_dnode_exit(dmu_buf_t *db);
/* Block until any in-progress dmu buf user evictions complete. */
void dmu_buf_user_evict_wait(void);
/*
* Returns the blkptr associated with this dbuf, or NULL if not set.
*/
struct blkptr *dmu_buf_get_blkptr(dmu_buf_t *db);
/*
* Indicate that you are going to modify the buffer's data (db_data).
*
* The transaction (tx) must be assigned to a txg (ie. you've called
* dmu_tx_assign()). The buffer's object must be held in the tx
* (ie. you've called dmu_tx_hold_object(tx, db->db_object)).
*/
void dmu_buf_will_dirty(dmu_buf_t *db, dmu_tx_t *tx);
void dmu_buf_will_change_crypt_params(dmu_buf_t *db, dmu_tx_t *tx);
/*
* You must create a transaction, then hold the objects which you will
* (or might) modify as part of this transaction. Then you must assign
* the transaction to a transaction group. Once the transaction has
* been assigned, you can modify buffers which belong to held objects as
* part of this transaction. You can't modify buffers before the
* transaction has been assigned; you can't modify buffers which don't
* belong to objects which this transaction holds; you can't hold
* objects once the transaction has been assigned. You may hold an
* object which you are going to free (with dmu_object_free()), but you
* don't have to.
*
* You can abort the transaction before it has been assigned.
*
* Note that you may hold buffers (with dmu_buf_hold) at any time,
* regardless of transaction state.
*/
#define DMU_NEW_OBJECT (-1ULL)
#define DMU_OBJECT_END (-1ULL)
dmu_tx_t *dmu_tx_create(objset_t *os);
void dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len);
void dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
int len);
void dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off,
uint64_t len);
void dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off,
uint64_t len);
+void dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name);
void dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add,
const char *name);
void dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn);
void dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object);
void dmu_tx_hold_sa(dmu_tx_t *tx, struct sa_handle *hdl, boolean_t may_grow);
void dmu_tx_hold_sa_create(dmu_tx_t *tx, int total_size);
void dmu_tx_abort(dmu_tx_t *tx);
int dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how);
void dmu_tx_wait(dmu_tx_t *tx);
void dmu_tx_commit(dmu_tx_t *tx);
void dmu_tx_mark_netfree(dmu_tx_t *tx);
/*
* To register a commit callback, dmu_tx_callback_register() must be called.
*
* dcb_data is a pointer to caller private data that is passed on as a
* callback parameter. The caller is responsible for properly allocating and
* freeing it.
*
* When registering a callback, the transaction must be already created, but
* it cannot be committed or aborted. It can be assigned to a txg or not.
*
* The callback will be called after the transaction has been safely written
* to stable storage and will also be called if the dmu_tx is aborted.
* If there is any error which prevents the transaction from being committed to
* disk, the callback will be called with a value of error != 0.
*
* When multiple callbacks are registered to the transaction, the callbacks
* will be called in reverse order to let Lustre, the only user of commit
* callback currently, take the fast path of its commit callback handling.
*/
typedef void dmu_tx_callback_func_t(void *dcb_data, int error);
void dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *dcb_func,
void *dcb_data);
void dmu_tx_do_callbacks(list_t *cb_list, int error);
/*
* Free up the data blocks for a defined range of a file. If size is
* -1, the range from offset to end-of-file is freed.
*/
int dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx);
int dmu_free_long_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size);
int dmu_free_long_range_raw(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size);
int dmu_free_long_object(objset_t *os, uint64_t object);
int dmu_free_long_object_raw(objset_t *os, uint64_t object);
/*
* Convenience functions.
*
* Canfail routines will return 0 on success, or an errno if there is a
* nonrecoverable I/O error.
*/
#define DMU_READ_PREFETCH 0 /* prefetch */
#define DMU_READ_NO_PREFETCH 1 /* don't prefetch */
#define DMU_READ_NO_DECRYPT 2 /* don't decrypt */
int dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags);
int dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
uint32_t flags);
void dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx);
void dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx);
void dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx);
#ifdef _KERNEL
#include <linux/blkdev_compat.h>
int dmu_read_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size);
int dmu_read_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size);
int dmu_read_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size);
int dmu_write_uio(objset_t *os, uint64_t object, struct uio *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dbuf(dmu_buf_t *zdb, struct uio *uio, uint64_t size,
dmu_tx_t *tx);
int dmu_write_uio_dnode(dnode_t *dn, struct uio *uio, uint64_t size,
dmu_tx_t *tx);
#endif
struct arc_buf *dmu_request_arcbuf(dmu_buf_t *handle, int size);
void dmu_return_arcbuf(struct arc_buf *buf);
void dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx);
void dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset,
struct arc_buf *buf, dmu_tx_t *tx);
#define dmu_assign_arcbuf dmu_assign_arcbuf_by_dbuf
int dmu_convert_mdn_block_to_raw(objset_t *os, uint64_t firstobj,
boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac, dmu_tx_t *tx);
void dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset,
dmu_buf_t *handle, dmu_tx_t *tx);
#ifdef HAVE_UIO_ZEROCOPY
int dmu_xuio_init(struct xuio *uio, int niov);
void dmu_xuio_fini(struct xuio *uio);
int dmu_xuio_add(struct xuio *uio, struct arc_buf *abuf, offset_t off,
size_t n);
int dmu_xuio_cnt(struct xuio *uio);
struct arc_buf *dmu_xuio_arcbuf(struct xuio *uio, int i);
void dmu_xuio_clear(struct xuio *uio, int i);
#endif /* HAVE_UIO_ZEROCOPY */
void xuio_stat_wbuf_copied(void);
void xuio_stat_wbuf_nocopy(void);
extern int zfs_prefetch_disable;
extern int zfs_max_recordsize;
/*
* Asynchronously try to read in the data.
*/
void dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t len, enum zio_priority pri);
typedef struct dmu_object_info {
/* All sizes are in bytes unless otherwise indicated. */
uint32_t doi_data_block_size;
uint32_t doi_metadata_block_size;
dmu_object_type_t doi_type;
dmu_object_type_t doi_bonus_type;
uint64_t doi_bonus_size;
uint8_t doi_indirection; /* 2 = dnode->indirect->data */
uint8_t doi_checksum;
uint8_t doi_compress;
uint8_t doi_nblkptr;
uint8_t doi_pad[4];
uint64_t doi_dnodesize;
uint64_t doi_physical_blocks_512; /* data + metadata, 512b blks */
uint64_t doi_max_offset;
uint64_t doi_fill_count; /* number of non-empty blocks */
} dmu_object_info_t;
typedef void (*const arc_byteswap_func_t)(void *buf, size_t size);
typedef struct dmu_object_type_info {
dmu_object_byteswap_t ot_byteswap;
boolean_t ot_metadata;
boolean_t ot_encrypt;
char *ot_name;
} dmu_object_type_info_t;
typedef const struct dmu_object_byteswap_info {
arc_byteswap_func_t ob_func;
char *ob_name;
} dmu_object_byteswap_info_t;
extern const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES];
extern const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS];
/*
* Get information on a DMU object.
*
* Return 0 on success or ENOENT if object is not allocated.
*
* If doi is NULL, just indicates whether the object exists.
*/
int dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi);
void __dmu_object_info_from_dnode(struct dnode *dn, dmu_object_info_t *doi);
/* Like dmu_object_info, but faster if you have a held dnode in hand. */
void dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi);
/* Like dmu_object_info, but faster if you have a held dbuf in hand. */
void dmu_object_info_from_db(dmu_buf_t *db, dmu_object_info_t *doi);
/*
* Like dmu_object_info_from_db, but faster still when you only care about
* the size. This is specifically optimized for zfs_getattr().
*/
void dmu_object_size_from_db(dmu_buf_t *db, uint32_t *blksize,
u_longlong_t *nblk512);
void dmu_object_dnsize_from_db(dmu_buf_t *db, int *dnsize);
typedef struct dmu_objset_stats {
uint64_t dds_num_clones; /* number of clones of this */
uint64_t dds_creation_txg;
uint64_t dds_guid;
dmu_objset_type_t dds_type;
uint8_t dds_is_snapshot;
uint8_t dds_inconsistent;
char dds_origin[ZFS_MAX_DATASET_NAME_LEN];
} dmu_objset_stats_t;
/*
* Get stats on a dataset.
*/
void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat);
/*
* Add entries to the nvlist for all the objset's properties. See
* zfs_prop_table[] and zfs(1m) for details on the properties.
*/
void dmu_objset_stats(objset_t *os, struct nvlist *nv);
/*
* Get the space usage statistics for statvfs().
*
* refdbytes is the amount of space "referenced" by this objset.
* availbytes is the amount of space available to this objset, taking
* into account quotas & reservations, assuming that no other objsets
* use the space first. These values correspond to the 'referenced' and
* 'available' properties, described in the zfs(1m) manpage.
*
* usedobjs and availobjs are the number of objects currently allocated,
* and available.
*/
void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp);
/*
* The fsid_guid is a 56-bit ID that can change to avoid collisions.
* (Contrast with the ds_guid which is a 64-bit ID that will never
* change, so there is a small probability that it will collide.)
*/
uint64_t dmu_objset_fsid_guid(objset_t *os);
/*
* Get the [cm]time for an objset's snapshot dir
*/
timestruc_t dmu_objset_snap_cmtime(objset_t *os);
int dmu_objset_is_snapshot(objset_t *os);
extern struct spa *dmu_objset_spa(objset_t *os);
extern struct zilog *dmu_objset_zil(objset_t *os);
extern struct dsl_pool *dmu_objset_pool(objset_t *os);
extern struct dsl_dataset *dmu_objset_ds(objset_t *os);
extern void dmu_objset_name(objset_t *os, char *buf);
extern dmu_objset_type_t dmu_objset_type(objset_t *os);
extern uint64_t dmu_objset_id(objset_t *os);
extern uint64_t dmu_objset_dnodesize(objset_t *os);
extern zfs_sync_type_t dmu_objset_syncprop(objset_t *os);
extern zfs_logbias_op_t dmu_objset_logbias(objset_t *os);
extern int dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
uint64_t *id, uint64_t *offp, boolean_t *case_conflict);
extern int dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *val);
extern int dmu_snapshot_realname(objset_t *os, char *name, char *real,
int maxlen, boolean_t *conflict);
extern int dmu_dir_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp);
typedef int objset_used_cb_t(dmu_object_type_t bonustype,
void *bonus, uint64_t *userp, uint64_t *groupp, uint64_t *projectp);
extern void dmu_objset_register_type(dmu_objset_type_t ost,
objset_used_cb_t *cb);
extern void dmu_objset_set_user(objset_t *os, void *user_ptr);
extern void *dmu_objset_get_user(objset_t *os);
/*
* Return the txg number for the given assigned transaction.
*/
uint64_t dmu_tx_get_txg(dmu_tx_t *tx);
/*
* Synchronous write.
* If a parent zio is provided this function initiates a write on the
* provided buffer as a child of the parent zio.
* In the absence of a parent zio, the write is completed synchronously.
* At write completion, blk is filled with the bp of the written block.
* Note that while the data covered by this function will be on stable
* storage when the write completes this new data does not become a
* permanent part of the file until the associated transaction commits.
*/
/*
* {zfs,zvol,ztest}_get_done() args
*/
typedef struct zgd {
struct lwb *zgd_lwb;
struct blkptr *zgd_bp;
dmu_buf_t *zgd_db;
struct rl *zgd_rl;
void *zgd_private;
} zgd_t;
typedef void dmu_sync_cb_t(zgd_t *arg, int error);
int dmu_sync(struct zio *zio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd);
/*
* Find the next hole or data block in file starting at *off
* Return found offset in *off. Return ESRCH for end of file.
*/
int dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole,
uint64_t *off);
/*
* Initial setup and final teardown.
*/
extern void dmu_init(void);
extern void dmu_fini(void);
typedef void (*dmu_traverse_cb_t)(objset_t *os, void *arg, struct blkptr *bp,
uint64_t object, uint64_t offset, int len);
void dmu_traverse_objset(objset_t *os, uint64_t txg_start,
dmu_traverse_cb_t cb, void *arg);
int dmu_diff(const char *tosnap_name, const char *fromsnap_name,
struct vnode *vp, offset_t *offp);
/* CRC64 table */
#define ZFS_CRC64_POLY 0xC96C5795D7870F42ULL /* ECMA-182, reflected form */
extern uint64_t zfs_crc64_table[256];
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DMU_H */
diff --git a/include/sys/dnode.h b/include/sys/dnode.h
index 2f70d54461cb..9c1df45e9d59 100644
--- a/include/sys/dnode.h
+++ b/include/sys/dnode.h
@@ -1,618 +1,619 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DNODE_H
#define _SYS_DNODE_H
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/refcount.h>
#include <sys/dmu_zfetch.h>
#include <sys/zrlock.h>
#include <sys/multilist.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* dnode_hold() flags.
*/
#define DNODE_MUST_BE_ALLOCATED 1
#define DNODE_MUST_BE_FREE 2
/*
* dnode_next_offset() flags.
*/
#define DNODE_FIND_HOLE 1
#define DNODE_FIND_BACKWARDS 2
#define DNODE_FIND_HAVELOCK 4
/*
* Fixed constants.
*/
#define DNODE_SHIFT 9 /* 512 bytes */
#define DN_MIN_INDBLKSHIFT 12 /* 4k */
/*
* If we ever increase this value beyond 20, we need to revisit all logic that
* does x << level * ebps to handle overflow. With a 1M indirect block size,
* 4 levels of indirect blocks would not be able to guarantee addressing an
* entire object, so 5 levels will be used, but 5 * (20 - 7) = 65.
*/
#define DN_MAX_INDBLKSHIFT 17 /* 128k */
#define DNODE_BLOCK_SHIFT 14 /* 16k */
#define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */
#define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */
#define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */
/*
* dnode id flags
*
* Note: a file will never ever have its ids moved from bonus->spill
*/
#define DN_ID_CHKED_BONUS 0x1
#define DN_ID_CHKED_SPILL 0x2
#define DN_ID_OLD_EXIST 0x4
#define DN_ID_NEW_EXIST 0x8
/*
* Derived constants.
*/
#define DNODE_MIN_SIZE (1 << DNODE_SHIFT)
#define DNODE_MAX_SIZE (1 << DNODE_BLOCK_SHIFT)
#define DNODE_BLOCK_SIZE (1 << DNODE_BLOCK_SHIFT)
#define DNODE_MIN_SLOTS (DNODE_MIN_SIZE >> DNODE_SHIFT)
#define DNODE_MAX_SLOTS (DNODE_MAX_SIZE >> DNODE_SHIFT)
#define DN_BONUS_SIZE(dnsize) ((dnsize) - DNODE_CORE_SIZE - \
(1 << SPA_BLKPTRSHIFT))
#define DN_SLOTS_TO_BONUSLEN(slots) DN_BONUS_SIZE((slots) << DNODE_SHIFT)
#define DN_OLD_MAX_BONUSLEN (DN_BONUS_SIZE(DNODE_MIN_SIZE))
#define DN_MAX_NBLKPTR ((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT)
#define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT)
#define DN_ZERO_BONUSLEN (DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1)
#define DN_KILL_SPILLBLK (1)
#define DN_SLOT_UNINIT ((void *)NULL) /* Uninitialized */
#define DN_SLOT_FREE ((void *)1UL) /* Free slot */
#define DN_SLOT_ALLOCATED ((void *)2UL) /* Allocated slot */
#define DN_SLOT_INTERIOR ((void *)3UL) /* Interior allocated slot */
#define DN_SLOT_IS_PTR(dn) ((void *)dn > DN_SLOT_INTERIOR)
#define DN_SLOT_IS_VALID(dn) ((void *)dn != NULL)
#define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT)
#define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT)
/*
* This is inaccurate if the indblkshift of the particular object is not the
* max. But it's only used by userland to calculate the zvol reservation.
*/
#define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT)
#define DNODES_PER_LEVEL (1ULL << DNODES_PER_LEVEL_SHIFT)
#define DN_MAX_LEVELS (DIV_ROUND_UP(DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT, \
DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT) + 1)
#define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \
(((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t))))
#define DN_MAX_BONUS_LEN(dnp) \
((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) ? \
(uint8_t *)DN_SPILL_BLKPTR(dnp) - (uint8_t *)DN_BONUS(dnp) : \
(uint8_t *)(dnp + (dnp->dn_extra_slots + 1)) - (uint8_t *)DN_BONUS(dnp))
#define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \
(dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT)
#define EPB(blkshift, typeshift) (1 << (blkshift - typeshift))
struct dmu_buf_impl;
struct objset;
struct zio;
enum dnode_dirtycontext {
DN_UNDIRTIED,
DN_DIRTY_OPEN,
DN_DIRTY_SYNC
};
/* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */
#define DNODE_FLAG_USED_BYTES (1 << 0)
#define DNODE_FLAG_USERUSED_ACCOUNTED (1 << 1)
/* Does dnode have a SA spill blkptr in bonus? */
#define DNODE_FLAG_SPILL_BLKPTR (1 << 2)
/* User/Group/Project dnode accounting */
#define DNODE_FLAG_USEROBJUSED_ACCOUNTED (1 << 3)
/*
* This mask defines the set of flags which are "portable", meaning
* that they can be preserved when doing a raw encrypted zfs send.
* Flags included in this mask will be protected by AAD when the block
* of dnodes is encrypted.
*/
#define DNODE_CRYPT_PORTABLE_FLAGS_MASK (DNODE_FLAG_SPILL_BLKPTR)
/*
* VARIABLE-LENGTH (LARGE) DNODES
*
* The motivation for variable-length dnodes is to eliminate the overhead
* associated with using spill blocks. Spill blocks are used to store
* system attribute data (i.e. file metadata) that does not fit in the
* dnode's bonus buffer. By allowing a larger bonus buffer area the use of
* a spill block can be avoided. Spill blocks potentially incur an
* additional read I/O for every dnode in a dnode block. As a worst case
* example, reading 32 dnodes from a 16k dnode block and all of the spill
* blocks could issue 33 separate reads. Now suppose those dnodes have size
* 1024 and therefore don't need spill blocks. Then the worst case number
* of blocks read is reduced to from 33 to two--one per dnode block.
*
* ZFS-on-Linux systems that make heavy use of extended attributes benefit
* from this feature. In particular, ZFS-on-Linux supports the xattr=sa
* dataset property which allows file extended attribute data to be stored
* in the dnode bonus buffer as an alternative to the traditional
* directory-based format. Workloads such as SELinux and the Lustre
* distributed filesystem often store enough xattr data to force spill
* blocks when xattr=sa is in effect. Large dnodes may therefore provide a
* performance benefit to such systems. Other use cases that benefit from
* this feature include files with large ACLs and symbolic links with long
* target names.
*
* The size of a dnode may be a multiple of 512 bytes up to the size of a
* dnode block (currently 16384 bytes). The dn_extra_slots field of the
* on-disk dnode_phys_t structure describes the size of the physical dnode
* on disk. The field represents how many "extra" dnode_phys_t slots a
* dnode consumes in its dnode block. This convention results in a value of
* 0 for 512 byte dnodes which preserves on-disk format compatibility with
* older software which doesn't support large dnodes.
*
* Similarly, the in-memory dnode_t structure has a dn_num_slots field
* to represent the total number of dnode_phys_t slots consumed on disk.
* Thus dn->dn_num_slots is 1 greater than the corresponding
* dnp->dn_extra_slots. This difference in convention was adopted
* because, unlike on-disk structures, backward compatibility is not a
* concern for in-memory objects, so we used a more natural way to
* represent size for a dnode_t.
*
* The default size for newly created dnodes is determined by the value of
* the "dnodesize" dataset property. By default the property is set to
* "legacy" which is compatible with older software. Setting the property
* to "auto" will allow the filesystem to choose the most suitable dnode
* size. Currently this just sets the default dnode size to 1k, but future
* code improvements could dynamically choose a size based on observed
* workload patterns. Dnodes of varying sizes can coexist within the same
* dataset and even within the same dnode block.
*/
typedef struct dnode_phys {
uint8_t dn_type; /* dmu_object_type_t */
uint8_t dn_indblkshift; /* ln2(indirect block size) */
uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */
uint8_t dn_nblkptr; /* length of dn_blkptr */
uint8_t dn_bonustype; /* type of data in bonus buffer */
uint8_t dn_checksum; /* ZIO_CHECKSUM type */
uint8_t dn_compress; /* ZIO_COMPRESS type */
uint8_t dn_flags; /* DNODE_FLAG_* */
uint16_t dn_datablkszsec; /* data block size in 512b sectors */
uint16_t dn_bonuslen; /* length of dn_bonus */
uint8_t dn_extra_slots; /* # of subsequent slots consumed */
uint8_t dn_pad2[3];
/* accounting is protected by dn_dirty_mtx */
uint64_t dn_maxblkid; /* largest allocated block ID */
uint64_t dn_used; /* bytes (or sectors) of disk space */
/*
* Both dn_pad2 and dn_pad3 are protected by the block's MAC. This
* allows us to protect any fields that might be added here in the
* future. In either case, developers will want to check
* zio_crypt_init_uios_dnode() to ensure the new field is being
* protected properly.
*/
uint64_t dn_pad3[4];
/*
* The tail region is 448 bytes for a 512 byte dnode, and
* correspondingly larger for larger dnode sizes. The spill
* block pointer, when present, is always at the end of the tail
* region. There are three ways this space may be used, using
* a 512 byte dnode for this diagram:
*
* 0 64 128 192 256 320 384 448 (offset)
* +---------------+---------------+---------------+-------+
* | dn_blkptr[0] | dn_blkptr[1] | dn_blkptr[2] | / |
* +---------------+---------------+---------------+-------+
* | dn_blkptr[0] | dn_bonus[0..319] |
* +---------------+-----------------------+---------------+
* | dn_blkptr[0] | dn_bonus[0..191] | dn_spill |
* +---------------+-----------------------+---------------+
*/
union {
blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)];
struct {
blkptr_t __dn_ignore1;
uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN];
};
struct {
blkptr_t __dn_ignore2;
uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN -
sizeof (blkptr_t)];
blkptr_t dn_spill;
};
};
} dnode_phys_t;
#define DN_SPILL_BLKPTR(dnp) (blkptr_t *)((char *)(dnp) + \
(((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT))
struct dnode {
/*
* Protects the structure of the dnode, including the number of levels
* of indirection (dn_nlevels), dn_maxblkid, and dn_next_*
*/
krwlock_t dn_struct_rwlock;
/* Our link on dn_objset->os_dnodes list; protected by os_lock. */
list_node_t dn_link;
/* immutable: */
struct objset *dn_objset;
uint64_t dn_object;
struct dmu_buf_impl *dn_dbuf;
struct dnode_handle *dn_handle;
dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */
/*
* Copies of stuff in dn_phys. They're valid in the open
* context (eg. even before the dnode is first synced).
* Where necessary, these are protected by dn_struct_rwlock.
*/
dmu_object_type_t dn_type; /* object type */
uint16_t dn_bonuslen; /* bonus length */
uint8_t dn_bonustype; /* bonus type */
uint8_t dn_nblkptr; /* number of blkptrs (immutable) */
uint8_t dn_checksum; /* ZIO_CHECKSUM type */
uint8_t dn_compress; /* ZIO_COMPRESS type */
uint8_t dn_nlevels;
uint8_t dn_indblkshift;
uint8_t dn_datablkshift; /* zero if blksz not power of 2! */
uint8_t dn_moved; /* Has this dnode been moved? */
uint16_t dn_datablkszsec; /* in 512b sectors */
uint32_t dn_datablksz; /* in bytes */
uint64_t dn_maxblkid;
uint8_t dn_next_type[TXG_SIZE];
uint8_t dn_num_slots; /* metadnode slots consumed on disk */
uint8_t dn_next_nblkptr[TXG_SIZE];
uint8_t dn_next_nlevels[TXG_SIZE];
uint8_t dn_next_indblkshift[TXG_SIZE];
uint8_t dn_next_bonustype[TXG_SIZE];
uint8_t dn_rm_spillblk[TXG_SIZE]; /* for removing spill blk */
uint16_t dn_next_bonuslen[TXG_SIZE];
uint32_t dn_next_blksz[TXG_SIZE]; /* next block size in bytes */
uint64_t dn_next_maxblkid[TXG_SIZE]; /* next maxblkid in bytes */
/* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */
uint32_t dn_dbufs_count; /* count of dn_dbufs */
/* protected by os_lock: */
multilist_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */
/* protected by dn_mtx: */
kmutex_t dn_mtx;
list_t dn_dirty_records[TXG_SIZE];
struct range_tree *dn_free_ranges[TXG_SIZE];
uint64_t dn_allocated_txg;
uint64_t dn_free_txg;
uint64_t dn_assigned_txg;
uint64_t dn_dirty_txg; /* txg dnode was last dirtied */
kcondvar_t dn_notxholds;
enum dnode_dirtycontext dn_dirtyctx;
uint8_t *dn_dirtyctx_firstset; /* dbg: contents meaningless */
/* protected by own devices */
refcount_t dn_tx_holds;
refcount_t dn_holds;
kmutex_t dn_dbufs_mtx;
/*
* Descendent dbufs, ordered by dbuf_compare. Note that dn_dbufs
* can contain multiple dbufs of the same (level, blkid) when a
* dbuf is marked DB_EVICTING without being removed from
* dn_dbufs. To maintain the avl invariant that there cannot be
* duplicate entries, we order the dbufs by an arbitrary value -
* their address in memory. This means that dn_dbufs cannot be used to
* directly look up a dbuf. Instead, callers must use avl_walk, have
* a reference to the dbuf, or look up a non-existent node with
* db_state = DB_SEARCH (see dbuf_free_range for an example).
*/
avl_tree_t dn_dbufs;
/* protected by dn_struct_rwlock */
struct dmu_buf_impl *dn_bonus; /* bonus buffer dbuf */
boolean_t dn_have_spill; /* have spill or are spilling */
/* parent IO for current sync write */
zio_t *dn_zio;
/* used in syncing context */
uint64_t dn_oldused; /* old phys used bytes */
uint64_t dn_oldflags; /* old phys dn_flags */
uint64_t dn_olduid, dn_oldgid, dn_oldprojid;
uint64_t dn_newuid, dn_newgid, dn_newprojid;
int dn_id_flags;
/* holds prefetch structure */
struct zfetch dn_zfetch;
};
/*
* Adds a level of indirection between the dbuf and the dnode to avoid
* iterating descendent dbufs in dnode_move(). Handles are not allocated
* individually, but as an array of child dnodes in dnode_hold_impl().
*/
typedef struct dnode_handle {
/* Protects dnh_dnode from modification by dnode_move(). */
zrlock_t dnh_zrlock;
dnode_t *dnh_dnode;
} dnode_handle_t;
typedef struct dnode_children {
dmu_buf_user_t dnc_dbu; /* User evict data */
size_t dnc_count; /* number of children */
dnode_handle_t dnc_children[]; /* sized dynamically */
} dnode_children_t;
typedef struct free_range {
avl_node_t fr_node;
uint64_t fr_blkid;
uint64_t fr_nblks;
} free_range_t;
void dnode_special_open(struct objset *dd, dnode_phys_t *dnp,
uint64_t object, dnode_handle_t *dnh);
void dnode_special_close(dnode_handle_t *dnh);
void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx);
void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx);
void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx);
int dnode_hold(struct objset *dd, uint64_t object,
void *ref, dnode_t **dnp);
int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots,
void *ref, dnode_t **dnp);
boolean_t dnode_add_ref(dnode_t *dn, void *ref);
void dnode_rele(dnode_t *dn, void *ref);
void dnode_rele_and_unlock(dnode_t *dn, void *tag);
void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx);
void dnode_sync(dnode_t *dn, dmu_tx_t *tx);
void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx);
void dnode_free(dnode_t *dn, dmu_tx_t *tx);
void dnode_byteswap(dnode_phys_t *dnp);
void dnode_buf_byteswap(void *buf, size_t size);
void dnode_verify(dnode_t *dn);
int dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx);
int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx);
void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx);
void dnode_diduse_space(dnode_t *dn, int64_t space);
void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t);
uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid);
void dnode_init(void);
void dnode_fini(void);
int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off,
int minlvl, uint64_t blkfill, uint64_t txg);
void dnode_evict_dbufs(dnode_t *dn);
void dnode_evict_bonus(dnode_t *dn);
void dnode_free_interior_slots(dnode_t *dn);
+boolean_t dnode_needs_remap(const dnode_t *dn);
#define DNODE_IS_DIRTY(_dn) \
((_dn)->dn_dirty_txg >= spa_syncing_txg((_dn)->dn_objset->os_spa))
#define DNODE_IS_CACHEABLE(_dn) \
((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \
(DMU_OT_IS_METADATA((_dn)->dn_type) && \
(_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA))
#define DNODE_META_IS_CACHEABLE(_dn) \
((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \
(_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA)
/*
* Used for dnodestats kstat.
*/
typedef struct dnode_stats {
/*
* Number of failed attempts to hold a meta dnode dbuf.
*/
kstat_named_t dnode_hold_dbuf_hold;
/*
* Number of failed attempts to read a meta dnode dbuf.
*/
kstat_named_t dnode_hold_dbuf_read;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was able
* to hold the requested object number which was allocated. This is
* the common case when looking up any allocated object number.
*/
kstat_named_t dnode_hold_alloc_hits;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
* able to hold the request object number because it was not allocated.
*/
kstat_named_t dnode_hold_alloc_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) was not
* able to hold the request object number because the object number
* refers to an interior large dnode slot.
*/
kstat_named_t dnode_hold_alloc_interior;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) needed
* to retry acquiring slot zrl locks due to contention.
*/
kstat_named_t dnode_hold_alloc_lock_retry;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) did not
* need to create the dnode because another thread did so after
* dropping the read lock but before acquiring the write lock.
*/
kstat_named_t dnode_hold_alloc_lock_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_ALLOCATED) found
* a free dnode instantiated by dnode_create() but not yet allocated
* by dnode_allocate().
*/
kstat_named_t dnode_hold_alloc_type_none;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was able
* to hold the requested range of free dnode slots.
*/
kstat_named_t dnode_hold_free_hits;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
* able to hold the requested range of free dnode slots because
* at least one slot was allocated.
*/
kstat_named_t dnode_hold_free_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) was not
* able to hold the requested range of free dnode slots because
* after acquiring the zrl lock at least one slot was allocated.
*/
kstat_named_t dnode_hold_free_lock_misses;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) needed
* to retry acquiring slot zrl locks due to contention.
*/
kstat_named_t dnode_hold_free_lock_retry;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
* a range of dnode slots which were held by another thread.
*/
kstat_named_t dnode_hold_free_refcount;
/*
* Number of times dnode_hold(..., DNODE_MUST_BE_FREE) requested
* a range of dnode slots which would overflow the dnode_phys_t.
*/
kstat_named_t dnode_hold_free_overflow;
/*
* Number of times a dnode_hold(...) was attempted on a dnode
* which had already been unlinked in an earlier txg.
*/
kstat_named_t dnode_hold_free_txg;
/*
* Number of times dnode_free_interior_slots() needed to retry
* acquiring a slot zrl lock due to contention.
*/
kstat_named_t dnode_free_interior_lock_retry;
/*
* Number of new dnodes allocated by dnode_allocate().
*/
kstat_named_t dnode_allocate;
/*
* Number of dnodes re-allocated by dnode_reallocate().
*/
kstat_named_t dnode_reallocate;
/*
* Number of meta dnode dbufs evicted.
*/
kstat_named_t dnode_buf_evict;
/*
* Number of times dmu_object_alloc*() reached the end of the existing
* object ID chunk and advanced to a new one.
*/
kstat_named_t dnode_alloc_next_chunk;
/*
* Number of times multiple threads attempted to allocate a dnode
* from the same block of free dnodes.
*/
kstat_named_t dnode_alloc_race;
/*
* Number of times dmu_object_alloc*() was forced to advance to the
* next meta dnode dbuf due to an error from dmu_object_next().
*/
kstat_named_t dnode_alloc_next_block;
/*
* Statistics for tracking dnodes which have been moved.
*/
kstat_named_t dnode_move_invalid;
kstat_named_t dnode_move_recheck1;
kstat_named_t dnode_move_recheck2;
kstat_named_t dnode_move_special;
kstat_named_t dnode_move_handle;
kstat_named_t dnode_move_rwlock;
kstat_named_t dnode_move_active;
} dnode_stats_t;
extern dnode_stats_t dnode_stats;
#define DNODE_STAT_INCR(stat, val) \
atomic_add_64(&dnode_stats.stat.value.ui64, (val));
#define DNODE_STAT_BUMP(stat) \
DNODE_STAT_INCR(stat, 1);
#ifdef ZFS_DEBUG
/*
* There should be a ## between the string literal and fmt, to make it
* clear that we're joining two strings together, but that piece of shit
* gcc doesn't support that preprocessor token.
*/
#define dprintf_dnode(dn, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char __db_buf[32]; \
uint64_t __db_obj = (dn)->dn_object; \
if (__db_obj == DMU_META_DNODE_OBJECT) \
(void) strcpy(__db_buf, "mdn"); \
else \
(void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \
(u_longlong_t)__db_obj);\
dprintf_ds((dn)->dn_objset->os_dsl_dataset, "obj=%s " fmt, \
__db_buf, __VA_ARGS__); \
} \
_NOTE(CONSTCOND) } while (0)
#define DNODE_VERIFY(dn) dnode_verify(dn)
#define FREE_VERIFY(db, start, end, tx) free_verify(db, start, end, tx)
#else
#define dprintf_dnode(db, fmt, ...)
#define DNODE_VERIFY(dn)
#define FREE_VERIFY(db, start, end, tx)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DNODE_H */
diff --git a/include/sys/dsl_dataset.h b/include/sys/dsl_dataset.h
index 0030fca24444..abd178e300f2 100644
--- a/include/sys/dsl_dataset.h
+++ b/include/sys/dsl_dataset.h
@@ -1,441 +1,471 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DSL_DATASET_H
#define _SYS_DSL_DATASET_H
#include <sys/dmu.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/bplist.h>
#include <sys/dsl_synctask.h>
#include <sys/zfs_context.h>
#include <sys/dsl_deadlist.h>
#include <sys/refcount.h>
#include <sys/rrwlock.h>
#include <sys/dsl_crypt.h>
#include <zfeature_common.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dsl_dataset;
struct dsl_dir;
struct dsl_pool;
struct dsl_crypto_params;
#define DS_FLAG_INCONSISTENT (1ULL<<0)
#define DS_IS_INCONSISTENT(ds) \
(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT)
/*
* Do not allow this dataset to be promoted.
*/
#define DS_FLAG_NOPROMOTE (1ULL<<1)
/*
* DS_FLAG_UNIQUE_ACCURATE is set if ds_unique_bytes has been correctly
* calculated for head datasets (starting with SPA_VERSION_UNIQUE_ACCURATE,
* refquota/refreservations).
*/
#define DS_FLAG_UNIQUE_ACCURATE (1ULL<<2)
/*
* DS_FLAG_DEFER_DESTROY is set after 'zfs destroy -d' has been called
* on a dataset. This allows the dataset to be destroyed using 'zfs release'.
*/
#define DS_FLAG_DEFER_DESTROY (1ULL<<3)
#define DS_IS_DEFER_DESTROY(ds) \
(dsl_dataset_phys(ds)->ds_flags & DS_FLAG_DEFER_DESTROY)
/*
* DS_FIELD_* are strings that are used in the "extensified" dataset zap object.
* They should be of the format <reverse-dns>:<field>.
*/
/*
* This field's value is the object ID of a zap object which contains the
* bookmarks of this dataset. If it is present, then this dataset is counted
* in the refcount of the SPA_FEATURES_BOOKMARKS feature.
*/
#define DS_FIELD_BOOKMARK_NAMES "com.delphix:bookmarks"
/*
* This field is present (with value=0) if this dataset may contain large
* dnodes (>512B). If it is present, then this dataset is counted in the
* refcount of the SPA_FEATURE_LARGE_DNODE feature.
*/
#define DS_FIELD_LARGE_DNODE "org.zfsonlinux:large_dnode"
/*
* These fields are set on datasets that are in the middle of a resumable
* receive, and allow the sender to resume the send if it is interrupted.
*/
#define DS_FIELD_RESUME_FROMGUID "com.delphix:resume_fromguid"
#define DS_FIELD_RESUME_TONAME "com.delphix:resume_toname"
#define DS_FIELD_RESUME_TOGUID "com.delphix:resume_toguid"
#define DS_FIELD_RESUME_OBJECT "com.delphix:resume_object"
#define DS_FIELD_RESUME_OFFSET "com.delphix:resume_offset"
#define DS_FIELD_RESUME_BYTES "com.delphix:resume_bytes"
#define DS_FIELD_RESUME_LARGEBLOCK "com.delphix:resume_largeblockok"
#define DS_FIELD_RESUME_EMBEDOK "com.delphix:resume_embedok"
#define DS_FIELD_RESUME_COMPRESSOK "com.delphix:resume_compressok"
#define DS_FIELD_RESUME_RAWOK "com.datto:resume_rawok"
+/*
+ * This field is set to the object number of the remap deadlist if one exists.
+ */
+#define DS_FIELD_REMAP_DEADLIST "com.delphix:remap_deadlist"
+
/*
* DS_FLAG_CI_DATASET is set if the dataset contains a file system whose
* name lookups should be performed case-insensitively.
*/
#define DS_FLAG_CI_DATASET (1ULL<<16)
#define DS_CREATE_FLAG_NODIRTY (1ULL<<24)
typedef struct dsl_dataset_phys {
uint64_t ds_dir_obj; /* DMU_OT_DSL_DIR */
uint64_t ds_prev_snap_obj; /* DMU_OT_DSL_DATASET */
uint64_t ds_prev_snap_txg;
uint64_t ds_next_snap_obj; /* DMU_OT_DSL_DATASET */
uint64_t ds_snapnames_zapobj; /* DMU_OT_DSL_DS_SNAP_MAP 0 for snaps */
uint64_t ds_num_children; /* clone/snap children; ==0 for head */
uint64_t ds_creation_time; /* seconds since 1970 */
uint64_t ds_creation_txg;
uint64_t ds_deadlist_obj; /* DMU_OT_DEADLIST */
/*
* ds_referenced_bytes, ds_compressed_bytes, and ds_uncompressed_bytes
* include all blocks referenced by this dataset, including those
* shared with any other datasets.
*/
uint64_t ds_referenced_bytes;
uint64_t ds_compressed_bytes;
uint64_t ds_uncompressed_bytes;
uint64_t ds_unique_bytes; /* only relevant to snapshots */
/*
* The ds_fsid_guid is a 56-bit ID that can change to avoid
* collisions. The ds_guid is a 64-bit ID that will never
* change, so there is a small probability that it will collide.
*/
uint64_t ds_fsid_guid;
uint64_t ds_guid;
uint64_t ds_flags; /* DS_FLAG_* */
blkptr_t ds_bp;
uint64_t ds_next_clones_obj; /* DMU_OT_DSL_CLONES */
uint64_t ds_props_obj; /* DMU_OT_DSL_PROPS for snaps */
uint64_t ds_userrefs_obj; /* DMU_OT_USERREFS */
uint64_t ds_pad[5]; /* pad out to 320 bytes for good measure */
} dsl_dataset_phys_t;
typedef struct dsl_dataset {
dmu_buf_user_t ds_dbu;
rrwlock_t ds_bp_rwlock; /* Protects ds_phys->ds_bp */
/* Immutable: */
struct dsl_dir *ds_dir;
dmu_buf_t *ds_dbuf;
uint64_t ds_object;
uint64_t ds_fsid_guid;
boolean_t ds_is_snapshot;
/* only used in syncing context, only valid for non-snapshots: */
struct dsl_dataset *ds_prev;
uint64_t ds_bookmarks; /* DMU_OTN_ZAP_METADATA */
/* has internal locking: */
dsl_deadlist_t ds_deadlist;
bplist_t ds_pending_deadlist;
+ /*
+ * The remap deadlist contains blocks (DVA's, really) that are
+ * referenced by the previous snapshot and point to indirect vdevs,
+ * but in this dataset they have been remapped to point to concrete
+ * (or at least, less-indirect) vdevs. In other words, the
+ * physical DVA is referenced by the previous snapshot but not by
+ * this dataset. Logically, the DVA continues to be referenced,
+ * but we are using a different (less indirect) physical DVA.
+ * This deadlist is used to determine when physical DVAs that
+ * point to indirect vdevs are no longer referenced anywhere,
+ * and thus should be marked obsolete.
+ *
+ * This is only used if SPA_FEATURE_OBSOLETE_COUNTS is enabled.
+ */
+ dsl_deadlist_t ds_remap_deadlist;
+ /* protects creation of the ds_remap_deadlist */
+ kmutex_t ds_remap_deadlist_lock;
+
/* protected by lock on pool's dp_dirty_datasets list */
txg_node_t ds_dirty_link;
list_node_t ds_synced_link;
/*
* ds_phys->ds_<accounting> is also protected by ds_lock.
* Protected by ds_lock:
*/
kmutex_t ds_lock;
objset_t *ds_objset;
uint64_t ds_userrefs;
void *ds_owner;
/*
* Long holds prevent the ds from being destroyed; they allow the
* ds to remain held even after dropping the dp_config_rwlock.
* Owning counts as a long hold. See the comments above
* dsl_pool_hold() for details.
*/
refcount_t ds_longholds;
/* no locking; only for making guesses */
uint64_t ds_trysnap_txg;
/* for objset_open() */
kmutex_t ds_opening_lock;
uint64_t ds_reserved; /* cached refreservation */
uint64_t ds_quota; /* cached refquota */
kmutex_t ds_sendstream_lock;
list_t ds_sendstreams;
/*
* When in the middle of a resumable receive, tracks how much
* progress we have made.
*/
uint64_t ds_resume_object[TXG_SIZE];
uint64_t ds_resume_offset[TXG_SIZE];
uint64_t ds_resume_bytes[TXG_SIZE];
/* Protected by our dsl_dir's dd_lock */
list_t ds_prop_cbs;
/*
* For ZFEATURE_FLAG_PER_DATASET features, set if this dataset
* uses this feature.
*/
uint8_t ds_feature_inuse[SPA_FEATURES];
/*
* Set if we need to activate the feature on this dataset this txg
* (used only in syncing context).
*/
uint8_t ds_feature_activation_needed[SPA_FEATURES];
/* Protected by ds_lock; keep at end of struct for better locality */
char ds_snapname[ZFS_MAX_DATASET_NAME_LEN];
} dsl_dataset_t;
static inline dsl_dataset_phys_t *
dsl_dataset_phys(dsl_dataset_t *ds)
{
return (ds->ds_dbuf->db_data);
}
typedef struct dsl_dataset_promote_arg {
const char *ddpa_clonename;
dsl_dataset_t *ddpa_clone;
list_t shared_snaps, origin_snaps, clone_snaps;
dsl_dataset_t *origin_origin; /* origin of the origin */
uint64_t used, comp, uncomp, unique, cloneusedsnap, originusedsnap;
nvlist_t *err_ds;
cred_t *cr;
} dsl_dataset_promote_arg_t;
typedef struct dsl_dataset_rollback_arg {
const char *ddra_fsname;
const char *ddra_tosnap;
void *ddra_owner;
nvlist_t *ddra_result;
} dsl_dataset_rollback_arg_t;
typedef struct dsl_dataset_snapshot_arg {
nvlist_t *ddsa_snaps;
nvlist_t *ddsa_props;
nvlist_t *ddsa_errors;
cred_t *ddsa_cr;
} dsl_dataset_snapshot_arg_t;
/*
* The max length of a temporary tag prefix is the number of hex digits
* required to express UINT64_MAX plus one for the hyphen.
*/
#define MAX_TAG_PREFIX_LEN 17
#define dsl_dataset_is_snapshot(ds) \
(dsl_dataset_phys(ds)->ds_num_children != 0)
#define DS_UNIQUE_IS_ACCURATE(ds) \
((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_UNIQUE_ACCURATE) != 0)
/* flags for holding the dataset */
typedef enum ds_hold_flags {
DS_HOLD_FLAG_DECRYPT = 1 << 0 /* needs access to encrypted data */
} ds_hold_flags_t;
int dsl_dataset_hold(struct dsl_pool *dp, const char *name, void *tag,
dsl_dataset_t **dsp);
int dsl_dataset_hold_flags(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
boolean_t dsl_dataset_try_add_ref(struct dsl_pool *dp, dsl_dataset_t *ds,
void *tag);
int dsl_dataset_hold_obj(struct dsl_pool *dp, uint64_t dsobj, void *tag,
dsl_dataset_t **);
int dsl_dataset_hold_obj_flags(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **);
void dsl_dataset_rele(dsl_dataset_t *ds, void *tag);
void dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags,
void *tag);
int dsl_dataset_own(struct dsl_pool *dp, const char *name,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
int dsl_dataset_own_obj(struct dsl_pool *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp);
void dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag);
void dsl_dataset_name(dsl_dataset_t *ds, char *name);
int dsl_dataset_namelen(dsl_dataset_t *ds);
boolean_t dsl_dataset_has_owner(dsl_dataset_t *ds);
boolean_t dsl_dataset_tryown(dsl_dataset_t *ds, void *tag);
uint64_t dsl_dataset_create_sync(dsl_dir_t *pds, const char *lastname,
dsl_dataset_t *origin, uint64_t flags, cred_t *,
struct dsl_crypto_params *, dmu_tx_t *);
uint64_t dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
struct dsl_crypto_params *dcp, uint64_t flags, dmu_tx_t *tx);
void dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx);
int dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors);
void dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_promote_check(void *arg, dmu_tx_t *tx);
int dsl_dataset_promote(const char *name, char *conflsnap);
int dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive);
int dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
minor_t cleanup_minor, const char *htag);
blkptr_t *dsl_dataset_get_blkptr(dsl_dataset_t *ds);
spa_t *dsl_dataset_get_spa(dsl_dataset_t *ds);
boolean_t dsl_dataset_modified_since_snap(dsl_dataset_t *ds,
dsl_dataset_t *snap);
void dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx);
void dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx);
void dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp,
dmu_tx_t *tx);
int dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp,
dmu_tx_t *tx, boolean_t async);
+void dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev,
+ uint64_t offset, uint64_t size, uint64_t birth, dmu_tx_t *tx);
int dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name,
uint64_t *value);
void dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx);
int get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val);
char *get_receive_resume_stats_impl(dsl_dataset_t *ds);
char *get_child_receive_stats(dsl_dataset_t *ds);
uint64_t dsl_get_refratio(dsl_dataset_t *ds);
uint64_t dsl_get_logicalreferenced(dsl_dataset_t *ds);
uint64_t dsl_get_compressratio(dsl_dataset_t *ds);
uint64_t dsl_get_used(dsl_dataset_t *ds);
uint64_t dsl_get_creation(dsl_dataset_t *ds);
uint64_t dsl_get_creationtxg(dsl_dataset_t *ds);
uint64_t dsl_get_refquota(dsl_dataset_t *ds);
uint64_t dsl_get_refreservation(dsl_dataset_t *ds);
uint64_t dsl_get_guid(dsl_dataset_t *ds);
uint64_t dsl_get_unique(dsl_dataset_t *ds);
uint64_t dsl_get_objsetid(dsl_dataset_t *ds);
uint64_t dsl_get_userrefs(dsl_dataset_t *ds);
uint64_t dsl_get_defer_destroy(dsl_dataset_t *ds);
uint64_t dsl_get_referenced(dsl_dataset_t *ds);
uint64_t dsl_get_numclones(dsl_dataset_t *ds);
uint64_t dsl_get_inconsistent(dsl_dataset_t *ds);
uint64_t dsl_get_available(dsl_dataset_t *ds);
int dsl_get_written(dsl_dataset_t *ds, uint64_t *written);
int dsl_get_prev_snap(dsl_dataset_t *ds, char *snap);
int dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
char *source);
void get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv);
void dsl_dataset_stats(dsl_dataset_t *os, nvlist_t *nv);
void dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat);
void dsl_dataset_space(dsl_dataset_t *ds,
uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp);
uint64_t dsl_dataset_fsid_guid(dsl_dataset_t *ds);
int dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
int dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap, dsl_dataset_t *last,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
boolean_t dsl_dataset_is_dirty(dsl_dataset_t *ds);
int dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf);
int dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
uint64_t asize, uint64_t inflight, uint64_t *used,
uint64_t *ref_rsrv);
int dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
uint64_t quota);
int dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
uint64_t reservation);
boolean_t dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
uint64_t earlier_txg);
void dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag);
void dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag);
boolean_t dsl_dataset_long_held(dsl_dataset_t *ds);
int dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx);
void dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, dmu_tx_t *tx);
int dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr);
void dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx);
void dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx);
void dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds);
int dsl_dataset_get_snapname(dsl_dataset_t *ds);
int dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name,
uint64_t *value);
int dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
boolean_t adj_cnt);
void dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
zprop_source_t source, uint64_t value, dmu_tx_t *tx);
void dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx);
boolean_t dsl_dataset_is_zapified(dsl_dataset_t *ds);
boolean_t dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds);
int dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx);
void dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx);
int dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
nvlist_t *result);
+uint64_t dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds);
+void dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx);
+boolean_t dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds);
+void dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx);
+
void dsl_dataset_activate_feature(uint64_t dsobj,
spa_feature_t f, dmu_tx_t *tx);
void dsl_dataset_deactivate_feature(uint64_t dsobj,
spa_feature_t f, dmu_tx_t *tx);
#ifdef ZFS_DEBUG
#define dprintf_ds(ds, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); \
dsl_dataset_name(ds, __ds_name); \
dprintf("ds=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, ZFS_MAX_DATASET_NAME_LEN); \
} \
_NOTE(CONSTCOND) } while (0)
#else
#define dprintf_ds(dd, fmt, ...)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DATASET_H */
diff --git a/include/sys/dsl_deadlist.h b/include/sys/dsl_deadlist.h
index d2c16d72c17e..08f38233d7ab 100644
--- a/include/sys/dsl_deadlist.h
+++ b/include/sys/dsl_deadlist.h
@@ -1,87 +1,89 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_DSL_DEADLIST_H
#define _SYS_DSL_DEADLIST_H
#include <sys/bpobj.h>
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dmu_buf;
struct dsl_dataset;
typedef struct dsl_deadlist_phys {
uint64_t dl_used;
uint64_t dl_comp;
uint64_t dl_uncomp;
uint64_t dl_pad[37]; /* pad out to 320b for future expansion */
} dsl_deadlist_phys_t;
typedef struct dsl_deadlist {
objset_t *dl_os;
uint64_t dl_object;
avl_tree_t dl_tree;
boolean_t dl_havetree;
struct dmu_buf *dl_dbuf;
dsl_deadlist_phys_t *dl_phys;
kmutex_t dl_lock;
/* if it's the old on-disk format: */
bpobj_t dl_bpobj;
boolean_t dl_oldfmt;
} dsl_deadlist_t;
typedef struct dsl_deadlist_entry {
avl_node_t dle_node;
uint64_t dle_mintxg;
bpobj_t dle_bpobj;
} dsl_deadlist_entry_t;
void dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object);
void dsl_deadlist_close(dsl_deadlist_t *dl);
uint64_t dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx);
void dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx);
void dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, dmu_tx_t *tx);
void dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx);
void dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx);
uint64_t dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
uint64_t mrs_obj, dmu_tx_t *tx);
void dsl_deadlist_space(dsl_deadlist_t *dl,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
void dsl_deadlist_space_range(dsl_deadlist_t *dl,
uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp);
void dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx);
void dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
dmu_tx_t *tx);
+boolean_t dsl_deadlist_is_open(dsl_deadlist_t *dl);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DEADLIST_H */
diff --git a/include/sys/dsl_deleg.h b/include/sys/dsl_deleg.h
index eb95c68e8a9d..bb28014ac358 100644
--- a/include/sys/dsl_deleg.h
+++ b/include/sys/dsl_deleg.h
@@ -1,90 +1,91 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_DSL_DELEG_H
#define _SYS_DSL_DELEG_H
#include <sys/dmu.h>
#include <sys/dsl_pool.h>
#include <sys/zfs_context.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ZFS_DELEG_PERM_NONE ""
#define ZFS_DELEG_PERM_CREATE "create"
#define ZFS_DELEG_PERM_DESTROY "destroy"
#define ZFS_DELEG_PERM_SNAPSHOT "snapshot"
#define ZFS_DELEG_PERM_ROLLBACK "rollback"
#define ZFS_DELEG_PERM_CLONE "clone"
#define ZFS_DELEG_PERM_PROMOTE "promote"
#define ZFS_DELEG_PERM_RENAME "rename"
#define ZFS_DELEG_PERM_MOUNT "mount"
#define ZFS_DELEG_PERM_SHARE "share"
#define ZFS_DELEG_PERM_SEND "send"
#define ZFS_DELEG_PERM_RECEIVE "receive"
#define ZFS_DELEG_PERM_ALLOW "allow"
#define ZFS_DELEG_PERM_USERPROP "userprop"
#define ZFS_DELEG_PERM_VSCAN "vscan"
#define ZFS_DELEG_PERM_USERQUOTA "userquota"
#define ZFS_DELEG_PERM_GROUPQUOTA "groupquota"
#define ZFS_DELEG_PERM_USEROBJQUOTA "userobjquota"
#define ZFS_DELEG_PERM_GROUPOBJQUOTA "groupobjquota"
#define ZFS_DELEG_PERM_USERUSED "userused"
#define ZFS_DELEG_PERM_GROUPUSED "groupused"
#define ZFS_DELEG_PERM_USEROBJUSED "userobjused"
#define ZFS_DELEG_PERM_GROUPOBJUSED "groupobjused"
#define ZFS_DELEG_PERM_HOLD "hold"
#define ZFS_DELEG_PERM_RELEASE "release"
#define ZFS_DELEG_PERM_DIFF "diff"
#define ZFS_DELEG_PERM_BOOKMARK "bookmark"
+#define ZFS_DELEG_PERM_REMAP "remap"
#define ZFS_DELEG_PERM_LOAD_KEY "load-key"
#define ZFS_DELEG_PERM_CHANGE_KEY "change-key"
#define ZFS_DELEG_PERM_PROJECTUSED "projectused"
#define ZFS_DELEG_PERM_PROJECTQUOTA "projectquota"
#define ZFS_DELEG_PERM_PROJECTOBJUSED "projectobjused"
#define ZFS_DELEG_PERM_PROJECTOBJQUOTA "projectobjquota"
/*
* Note: the names of properties that are marked delegatable are also
* valid delegated permissions
*/
int dsl_deleg_get(const char *ddname, nvlist_t **nvp);
int dsl_deleg_set(const char *ddname, nvlist_t *nvp, boolean_t unset);
int dsl_deleg_access(const char *ddname, const char *perm, cred_t *cr);
int dsl_deleg_access_impl(struct dsl_dataset *ds, const char *perm, cred_t *cr);
void dsl_deleg_set_create_perms(dsl_dir_t *dd, dmu_tx_t *tx, cred_t *cr);
int dsl_deleg_can_allow(char *ddname, nvlist_t *nvp, cred_t *cr);
int dsl_deleg_can_unallow(char *ddname, nvlist_t *nvp, cred_t *cr);
int dsl_deleg_destroy(objset_t *os, uint64_t zapobj, dmu_tx_t *tx);
boolean_t dsl_delegation_on(objset_t *os);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DELEG_H */
diff --git a/include/sys/dsl_dir.h b/include/sys/dsl_dir.h
index 8b1e343a0bbf..58d243885bb9 100644
--- a/include/sys/dsl_dir.h
+++ b/include/sys/dsl_dir.h
@@ -1,209 +1,212 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#ifndef _SYS_DSL_DIR_H
#define _SYS_DSL_DIR_H
#include <sys/dmu.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/refcount.h>
#include <sys/zfs_context.h>
#include <sys/dsl_crypt.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dsl_dataset;
/*
* DD_FIELD_* are strings that are used in the "extensified" dsl_dir zap object.
* They should be of the format <reverse-dns>:<field>.
*/
#define DD_FIELD_FILESYSTEM_COUNT "com.joyent:filesystem_count"
#define DD_FIELD_SNAPSHOT_COUNT "com.joyent:snapshot_count"
+#define DD_FIELD_LAST_REMAP_TXG "com.delphix:last_remap_txg"
#define DD_FIELD_CRYPTO_KEY_OBJ "com.datto:crypto_key_obj"
+#define DD_FIELD_LAST_REMAP_TXG "com.delphix:last_remap_txg"
typedef enum dd_used {
DD_USED_HEAD,
DD_USED_SNAP,
DD_USED_CHILD,
DD_USED_CHILD_RSRV,
DD_USED_REFRSRV,
DD_USED_NUM
} dd_used_t;
#define DD_FLAG_USED_BREAKDOWN (1<<0)
typedef struct dsl_dir_phys {
uint64_t dd_creation_time; /* not actually used */
uint64_t dd_head_dataset_obj;
uint64_t dd_parent_obj;
uint64_t dd_origin_obj;
uint64_t dd_child_dir_zapobj;
/*
* how much space our children are accounting for; for leaf
* datasets, == physical space used by fs + snaps
*/
uint64_t dd_used_bytes;
uint64_t dd_compressed_bytes;
uint64_t dd_uncompressed_bytes;
/* Administrative quota setting */
uint64_t dd_quota;
/* Administrative reservation setting */
uint64_t dd_reserved;
uint64_t dd_props_zapobj;
uint64_t dd_deleg_zapobj; /* dataset delegation permissions */
uint64_t dd_flags;
uint64_t dd_used_breakdown[DD_USED_NUM];
uint64_t dd_clones; /* dsl_dir objects */
uint64_t dd_pad[13]; /* pad out to 256 bytes for good measure */
} dsl_dir_phys_t;
struct dsl_dir {
dmu_buf_user_t dd_dbu;
/* These are immutable; no lock needed: */
uint64_t dd_object;
uint64_t dd_crypto_obj;
dsl_pool_t *dd_pool;
/* Stable until user eviction; no lock needed: */
dmu_buf_t *dd_dbuf;
/* protected by lock on pool's dp_dirty_dirs list */
txg_node_t dd_dirty_link;
/* protected by dp_config_rwlock */
dsl_dir_t *dd_parent;
/* Protected by dd_lock */
kmutex_t dd_lock;
list_t dd_props; /* list of dsl_prop_record_t's */
timestruc_t dd_snap_cmtime; /* last time snapshot namespace changed */
uint64_t dd_origin_txg;
/* gross estimate of space used by in-flight tx's */
uint64_t dd_tempreserved[TXG_SIZE];
/* amount of space we expect to write; == amount of dirty data */
int64_t dd_space_towrite[TXG_SIZE];
/* protected by dd_lock; keep at end of struct for better locality */
char dd_myname[ZFS_MAX_DATASET_NAME_LEN];
};
static inline dsl_dir_phys_t *
dsl_dir_phys(dsl_dir_t *dd)
{
return (dd->dd_dbuf->db_data);
}
void dsl_dir_rele(dsl_dir_t *dd, void *tag);
void dsl_dir_async_rele(dsl_dir_t *dd, void *tag);
int dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
dsl_dir_t **, const char **tail);
int dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
const char *tail, void *tag, dsl_dir_t **);
void dsl_dir_name(dsl_dir_t *dd, char *buf);
int dsl_dir_namelen(dsl_dir_t *dd);
uint64_t dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds,
const char *name, dmu_tx_t *tx);
uint64_t dsl_dir_get_used(dsl_dir_t *dd);
uint64_t dsl_dir_get_quota(dsl_dir_t *dd);
uint64_t dsl_dir_get_reservation(dsl_dir_t *dd);
uint64_t dsl_dir_get_compressratio(dsl_dir_t *dd);
uint64_t dsl_dir_get_logicalused(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedsnap(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedds(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedrefreserv(dsl_dir_t *dd);
uint64_t dsl_dir_get_usedchild(dsl_dir_t *dd);
void dsl_dir_get_origin(dsl_dir_t *dd, char *buf);
int dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count);
int dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count);
void dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv);
uint64_t dsl_dir_space_available(dsl_dir_t *dd,
dsl_dir_t *ancestor, int64_t delta, int ondiskonly);
void dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx);
+int dsl_dir_get_remaptxg(dsl_dir_t *dd, uint64_t *count);
void dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx);
int dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t mem,
uint64_t asize, boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx);
void dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx);
void dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx);
void dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx);
void dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx);
int dsl_dir_set_quota(const char *ddname, zprop_source_t source,
uint64_t quota);
int dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
uint64_t reservation);
int dsl_dir_activate_fs_ss_limit(const char *);
int dsl_fs_ss_limit_check(dsl_dir_t *, uint64_t, zfs_prop_t, dsl_dir_t *,
cred_t *);
void dsl_fs_ss_count_adjust(dsl_dir_t *, int64_t, const char *, dmu_tx_t *);
+int dsl_dir_update_last_remap_txg(dsl_dir_t *, uint64_t);
int dsl_dir_rename(const char *oldname, const char *newname);
int dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *);
boolean_t dsl_dir_is_clone(dsl_dir_t *dd);
void dsl_dir_new_refreservation(dsl_dir_t *dd, struct dsl_dataset *ds,
uint64_t reservation, cred_t *cr, dmu_tx_t *tx);
void dsl_dir_snap_cmtime_update(dsl_dir_t *dd);
timestruc_t dsl_dir_snap_cmtime(dsl_dir_t *dd);
void dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value,
dmu_tx_t *tx);
void dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx);
boolean_t dsl_dir_is_zapified(dsl_dir_t *dd);
/* internal reserved dir name */
#define MOS_DIR_NAME "$MOS"
#define ORIGIN_DIR_NAME "$ORIGIN"
-#define XLATION_DIR_NAME "$XLATION"
#define FREE_DIR_NAME "$FREE"
#define LEAK_DIR_NAME "$LEAK"
#ifdef ZFS_DEBUG
#define dprintf_dd(dd, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__ds_name = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP); \
dsl_dir_name(dd, __ds_name); \
dprintf("dd=%s " fmt, __ds_name, __VA_ARGS__); \
kmem_free(__ds_name, ZFS_MAX_DATASET_NAME_LEN); \
} \
_NOTE(CONSTCOND) } while (0)
#else
#define dprintf_dd(dd, fmt, ...)
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_DIR_H */
diff --git a/include/sys/dsl_pool.h b/include/sys/dsl_pool.h
index 9ceb59d9b638..c60e4bf9d039 100644
--- a/include/sys/dsl_pool.h
+++ b/include/sys/dsl_pool.h
@@ -1,187 +1,190 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2013, 2017 by Delphix. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#ifndef _SYS_DSL_POOL_H
#define _SYS_DSL_POOL_H
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/txg_impl.h>
#include <sys/zfs_context.h>
#include <sys/zio.h>
#include <sys/dnode.h>
#include <sys/ddt.h>
#include <sys/arc.h>
#include <sys/bpobj.h>
#include <sys/bptree.h>
#include <sys/rrwlock.h>
#include <sys/mmp.h>
#ifdef __cplusplus
extern "C" {
#endif
extern int zfs_txg_synctime_ms;
struct objset;
struct dsl_dir;
struct dsl_dataset;
struct dsl_pool;
struct dmu_tx;
struct dsl_scan;
struct dsl_crypto_params;
extern unsigned long zfs_dirty_data_max;
extern unsigned long zfs_dirty_data_max_max;
extern unsigned long zfs_dirty_data_sync;
extern int zfs_dirty_data_max_percent;
extern int zfs_dirty_data_max_max_percent;
extern int zfs_delay_min_dirty_percent;
extern unsigned long zfs_delay_scale;
/* These macros are for indexing into the zfs_all_blkstats_t. */
#define DMU_OT_DEFERRED DMU_OT_NONE
#define DMU_OT_OTHER DMU_OT_NUMTYPES /* place holder for DMU_OT() types */
#define DMU_OT_TOTAL (DMU_OT_NUMTYPES + 1)
typedef struct zfs_blkstat {
uint64_t zb_count;
uint64_t zb_asize;
uint64_t zb_lsize;
uint64_t zb_psize;
uint64_t zb_gangs;
uint64_t zb_ditto_2_of_2_samevdev;
uint64_t zb_ditto_2_of_3_samevdev;
uint64_t zb_ditto_3_of_3_samevdev;
} zfs_blkstat_t;
typedef struct zfs_all_blkstats {
zfs_blkstat_t zab_type[DN_MAX_LEVELS + 1][DMU_OT_TOTAL + 1];
kmutex_t zab_lock;
} zfs_all_blkstats_t;
typedef struct dsl_pool {
/* Immutable */
spa_t *dp_spa;
struct objset *dp_meta_objset;
struct dsl_dir *dp_root_dir;
struct dsl_dir *dp_mos_dir;
struct dsl_dir *dp_free_dir;
struct dsl_dir *dp_leak_dir;
struct dsl_dataset *dp_origin_snap;
uint64_t dp_root_dir_obj;
struct taskq *dp_iput_taskq;
/* No lock needed - sync context only */
blkptr_t dp_meta_rootbp;
uint64_t dp_tmp_userrefs_obj;
bpobj_t dp_free_bpobj;
uint64_t dp_bptree_obj;
uint64_t dp_empty_bpobj;
+ bpobj_t dp_obsolete_bpobj;
struct dsl_scan *dp_scan;
/* Uses dp_lock */
kmutex_t dp_lock;
kcondvar_t dp_spaceavail_cv;
uint64_t dp_dirty_pertxg[TXG_SIZE];
uint64_t dp_dirty_total;
uint64_t dp_long_free_dirty_pertxg[TXG_SIZE];
uint64_t dp_mos_used_delta;
uint64_t dp_mos_compressed_delta;
uint64_t dp_mos_uncompressed_delta;
/*
* Time of most recently scheduled (furthest in the future)
* wakeup for delayed transactions.
*/
hrtime_t dp_last_wakeup;
/* Has its own locking */
tx_state_t dp_tx;
txg_list_t dp_dirty_datasets;
txg_list_t dp_dirty_zilogs;
txg_list_t dp_dirty_dirs;
txg_list_t dp_sync_tasks;
taskq_t *dp_sync_taskq;
taskq_t *dp_zil_clean_taskq;
/*
* Protects administrative changes (properties, namespace)
*
* It is only held for write in syncing context. Therefore
* syncing context does not need to ever have it for read, since
* nobody else could possibly have it for write.
*/
rrwlock_t dp_config_rwlock;
zfs_all_blkstats_t *dp_blkstats;
} dsl_pool_t;
int dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp);
int dsl_pool_open(dsl_pool_t *dp);
void dsl_pool_close(dsl_pool_t *dp);
dsl_pool_t *dsl_pool_create(spa_t *spa, nvlist_t *zplprops,
struct dsl_crypto_params *dcp, uint64_t txg);
void dsl_pool_sync(dsl_pool_t *dp, uint64_t txg);
void dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg);
int dsl_pool_sync_context(dsl_pool_t *dp);
uint64_t dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree);
-uint64_t dsl_pool_adjustedfree(dsl_pool_t *dp, boolean_t netfree);
void dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx);
void dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg);
void dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp);
void dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg,
const blkptr_t *bpp);
void dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx);
void dsl_pool_mos_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp);
boolean_t dsl_pool_need_dirty_delay(dsl_pool_t *dp);
void dsl_pool_config_enter(dsl_pool_t *dp, void *tag);
void dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag);
void dsl_pool_config_exit(dsl_pool_t *dp, void *tag);
boolean_t dsl_pool_config_held(dsl_pool_t *dp);
boolean_t dsl_pool_config_held_writer(dsl_pool_t *dp);
taskq_t *dsl_pool_iput_taskq(dsl_pool_t *dp);
int dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, uint64_t now, dmu_tx_t *tx);
int dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, dmu_tx_t *tx);
void dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp);
int dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **);
int dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp);
void dsl_pool_rele(dsl_pool_t *dp, void *tag);
+void dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx);
+void dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx);
+
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_POOL_H */
diff --git a/include/sys/dsl_scan.h b/include/sys/dsl_scan.h
index 7a29d9788067..345d2754fb65 100644
--- a/include/sys/dsl_scan.h
+++ b/include/sys/dsl_scan.h
@@ -1,187 +1,188 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2017 Datto Inc.
*/
#ifndef _SYS_DSL_SCAN_H
#define _SYS_DSL_SCAN_H
#include <sys/zfs_context.h>
#include <sys/zio.h>
#include <sys/ddt.h>
#include <sys/bplist.h>
#ifdef __cplusplus
extern "C" {
#endif
struct objset;
struct dsl_dir;
struct dsl_dataset;
struct dsl_pool;
struct dmu_tx;
/*
* All members of this structure must be uint64_t, for byteswap
* purposes.
*/
typedef struct dsl_scan_phys {
uint64_t scn_func; /* pool_scan_func_t */
uint64_t scn_state; /* dsl_scan_state_t */
uint64_t scn_queue_obj;
uint64_t scn_min_txg;
uint64_t scn_max_txg;
uint64_t scn_cur_min_txg;
uint64_t scn_cur_max_txg;
uint64_t scn_start_time;
uint64_t scn_end_time;
uint64_t scn_to_examine; /* total bytes to be scanned */
uint64_t scn_examined; /* bytes scanned so far */
uint64_t scn_to_process;
uint64_t scn_processed;
uint64_t scn_errors; /* scan I/O error count */
uint64_t scn_ddt_class_max;
ddt_bookmark_t scn_ddt_bookmark;
zbookmark_phys_t scn_bookmark;
uint64_t scn_flags; /* dsl_scan_flags_t */
} dsl_scan_phys_t;
#define SCAN_PHYS_NUMINTS (sizeof (dsl_scan_phys_t) / sizeof (uint64_t))
typedef enum dsl_scan_flags {
DSF_VISIT_DS_AGAIN = 1<<0,
DSF_SCRUB_PAUSED = 1<<1,
} dsl_scan_flags_t;
#define DSL_SCAN_FLAGS_MASK (DSF_VISIT_DS_AGAIN)
/*
* Every pool will have one dsl_scan_t and this structure will contain
* in-memory information about the scan and a pointer to the on-disk
* representation (i.e. dsl_scan_phys_t). Most of the state of the scan
* is contained on-disk to allow the scan to resume in the event of a reboot
* or panic. This structure maintains information about the behavior of a
* running scan, some caching information, and how it should traverse the pool.
*
* The following members of this structure direct the behavior of the scan:
*
* scn_suspending - a scan that cannot be completed in a single txg or
* has exceeded its allotted time will need to suspend.
* When this flag is set the scanner will stop traversing
* the pool and write out the current state to disk.
*
* scn_restart_txg - directs the scanner to either restart or start a
* a scan at the specified txg value.
*
* scn_done_txg - when a scan completes its traversal it will set
* the completion txg to the next txg. This is necessary
* to ensure that any blocks that were freed during
* the scan but have not yet been processed (i.e deferred
* frees) are accounted for.
*
* This structure also maintains information about deferred frees which are
* a special kind of traversal. Deferred free can exist in either a bptree or
* a bpobj structure. The scn_is_bptree flag will indicate the type of
* deferred free that is in progress. If the deferred free is part of an
* asynchronous destroy then the scn_async_destroying flag will be set.
*/
typedef struct dsl_scan {
struct dsl_pool *scn_dp;
uint64_t scn_restart_txg;
uint64_t scn_done_txg;
uint64_t scn_sync_start_time;
uint64_t scn_issued_before_pass;
/* for freeing blocks */
boolean_t scn_is_bptree;
boolean_t scn_async_destroying;
boolean_t scn_async_stalled;
+ uint64_t scn_async_block_min_time_ms;
/* flags and stats for controlling scan state */
boolean_t scn_is_sorted; /* doing sequential scan */
boolean_t scn_clearing; /* scan is issuing sequential extents */
boolean_t scn_checkpointing; /* scan is issuing all queued extents */
boolean_t scn_suspending; /* scan is suspending until next txg */
uint64_t scn_last_checkpoint; /* time of last checkpoint */
/* members for thread synchronization */
zio_t *scn_zio_root; /* root zio for waiting on IO */
taskq_t *scn_taskq; /* task queue for issuing extents */
/* for controlling scan prefetch, protected by spa_scrub_lock */
boolean_t scn_prefetch_stop; /* prefetch should stop */
zbookmark_phys_t scn_prefetch_bookmark; /* prefetch start bookmark */
avl_tree_t scn_prefetch_queue; /* priority queue of prefetch IOs */
uint64_t scn_maxinflight_bytes; /* max bytes in flight for pool */
/* per txg statistics */
uint64_t scn_visited_this_txg; /* total bps visited this txg */
uint64_t scn_holes_this_txg;
uint64_t scn_lt_min_this_txg;
uint64_t scn_gt_max_this_txg;
uint64_t scn_ddt_contained_this_txg;
uint64_t scn_objsets_visited_this_txg;
uint64_t scn_avg_seg_size_this_txg;
uint64_t scn_segs_this_txg;
uint64_t scn_avg_zio_size_this_txg;
uint64_t scn_zios_this_txg;
/* members needed for syncing scan status to disk */
dsl_scan_phys_t scn_phys; /* on disk representation of scan */
dsl_scan_phys_t scn_phys_cached;
avl_tree_t scn_queue; /* queue of datasets to scan */
uint64_t scn_bytes_pending; /* outstanding data to issue */
} dsl_scan_t;
typedef struct dsl_scan_io_queue dsl_scan_io_queue_t;
void scan_init(void);
void scan_fini(void);
int dsl_scan_init(struct dsl_pool *dp, uint64_t txg);
void dsl_scan_fini(struct dsl_pool *dp);
void dsl_scan_sync(struct dsl_pool *, dmu_tx_t *);
int dsl_scan_cancel(struct dsl_pool *);
int dsl_scan(struct dsl_pool *, pool_scan_func_t);
boolean_t dsl_scan_scrubbing(const struct dsl_pool *dp);
int dsl_scrub_set_pause_resume(const struct dsl_pool *dp, pool_scrub_cmd_t cmd);
void dsl_resilver_restart(struct dsl_pool *, uint64_t txg);
boolean_t dsl_scan_resilvering(struct dsl_pool *dp);
boolean_t dsl_dataset_unstable(struct dsl_dataset *ds);
void dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_entry_t *dde, dmu_tx_t *tx);
void dsl_scan_ds_destroyed(struct dsl_dataset *ds, struct dmu_tx *tx);
void dsl_scan_ds_snapshotted(struct dsl_dataset *ds, struct dmu_tx *tx);
void dsl_scan_ds_clone_swapped(struct dsl_dataset *ds1, struct dsl_dataset *ds2,
struct dmu_tx *tx);
boolean_t dsl_scan_active(dsl_scan_t *scn);
boolean_t dsl_scan_is_paused_scrub(const dsl_scan_t *scn);
void dsl_scan_freed(spa_t *spa, const blkptr_t *bp);
void dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue);
void dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_DSL_SCAN_H */
diff --git a/include/sys/fs/zfs.h b/include/sys/fs/zfs.h
index e40c427f61d9..de3b729ebe04 100644
--- a/include/sys/fs/zfs.h
+++ b/include/sys/fs/zfs.h
@@ -1,1290 +1,1320 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013, 2017 Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright (c) 2017 Datto Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_FS_ZFS_H
#define _SYS_FS_ZFS_H
#include <sys/time.h>
#include <sys/zio_priority.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Types and constants shared between userland and the kernel.
*/
/*
* Each dataset can be one of the following types. These constants can be
* combined into masks that can be passed to various functions.
*/
typedef enum {
ZFS_TYPE_FILESYSTEM = (1 << 0),
ZFS_TYPE_SNAPSHOT = (1 << 1),
ZFS_TYPE_VOLUME = (1 << 2),
ZFS_TYPE_POOL = (1 << 3),
ZFS_TYPE_BOOKMARK = (1 << 4)
} zfs_type_t;
/*
* NB: lzc_dataset_type should be updated whenever a new objset type is added,
* if it represents a real type of a dataset that can be created from userland.
*/
typedef enum dmu_objset_type {
DMU_OST_NONE,
DMU_OST_META,
DMU_OST_ZFS,
DMU_OST_ZVOL,
DMU_OST_OTHER, /* For testing only! */
DMU_OST_ANY, /* Be careful! */
DMU_OST_NUMTYPES
} dmu_objset_type_t;
#define ZFS_TYPE_DATASET \
(ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME | ZFS_TYPE_SNAPSHOT)
/*
* All of these include the terminating NUL byte.
*/
#define ZAP_MAXNAMELEN 256
#define ZAP_MAXVALUELEN (1024 * 8)
#define ZAP_OLDMAXVALUELEN 1024
#define ZFS_MAX_DATASET_NAME_LEN 256
/*
* Dataset properties are identified by these constants and must be added to
* the end of this list to ensure that external consumers are not affected
* by the change. If you make any changes to this list, be sure to update
* the property table in module/zcommon/zfs_prop.c.
*/
typedef enum {
ZPROP_CONT = -2,
ZPROP_INVAL = -1,
ZFS_PROP_TYPE = 0,
ZFS_PROP_CREATION,
ZFS_PROP_USED,
ZFS_PROP_AVAILABLE,
ZFS_PROP_REFERENCED,
ZFS_PROP_COMPRESSRATIO,
ZFS_PROP_MOUNTED,
ZFS_PROP_ORIGIN,
ZFS_PROP_QUOTA,
ZFS_PROP_RESERVATION,
ZFS_PROP_VOLSIZE,
ZFS_PROP_VOLBLOCKSIZE,
ZFS_PROP_RECORDSIZE,
ZFS_PROP_MOUNTPOINT,
ZFS_PROP_SHARENFS,
ZFS_PROP_CHECKSUM,
ZFS_PROP_COMPRESSION,
ZFS_PROP_ATIME,
ZFS_PROP_DEVICES,
ZFS_PROP_EXEC,
ZFS_PROP_SETUID,
ZFS_PROP_READONLY,
ZFS_PROP_ZONED,
ZFS_PROP_SNAPDIR,
ZFS_PROP_PRIVATE, /* not exposed to user, temporary */
ZFS_PROP_ACLINHERIT,
ZFS_PROP_CREATETXG,
ZFS_PROP_NAME, /* not exposed to the user */
ZFS_PROP_CANMOUNT,
ZFS_PROP_ISCSIOPTIONS, /* not exposed to the user */
ZFS_PROP_XATTR,
ZFS_PROP_NUMCLONES, /* not exposed to the user */
ZFS_PROP_COPIES,
ZFS_PROP_VERSION,
ZFS_PROP_UTF8ONLY,
ZFS_PROP_NORMALIZE,
ZFS_PROP_CASE,
ZFS_PROP_VSCAN,
ZFS_PROP_NBMAND,
ZFS_PROP_SHARESMB,
ZFS_PROP_REFQUOTA,
ZFS_PROP_REFRESERVATION,
ZFS_PROP_GUID,
ZFS_PROP_PRIMARYCACHE,
ZFS_PROP_SECONDARYCACHE,
ZFS_PROP_USEDSNAP,
ZFS_PROP_USEDDS,
ZFS_PROP_USEDCHILD,
ZFS_PROP_USEDREFRESERV,
ZFS_PROP_USERACCOUNTING, /* not exposed to the user */
ZFS_PROP_STMF_SHAREINFO, /* not exposed to the user */
ZFS_PROP_DEFER_DESTROY,
ZFS_PROP_USERREFS,
ZFS_PROP_LOGBIAS,
ZFS_PROP_UNIQUE, /* not exposed to the user */
ZFS_PROP_OBJSETID, /* not exposed to the user */
ZFS_PROP_DEDUP,
ZFS_PROP_MLSLABEL,
ZFS_PROP_SYNC,
ZFS_PROP_DNODESIZE,
ZFS_PROP_REFRATIO,
ZFS_PROP_WRITTEN,
ZFS_PROP_CLONES,
ZFS_PROP_LOGICALUSED,
ZFS_PROP_LOGICALREFERENCED,
ZFS_PROP_INCONSISTENT, /* not exposed to the user */
ZFS_PROP_VOLMODE,
ZFS_PROP_FILESYSTEM_LIMIT,
ZFS_PROP_SNAPSHOT_LIMIT,
ZFS_PROP_FILESYSTEM_COUNT,
ZFS_PROP_SNAPSHOT_COUNT,
ZFS_PROP_SNAPDEV,
ZFS_PROP_ACLTYPE,
ZFS_PROP_SELINUX_CONTEXT,
ZFS_PROP_SELINUX_FSCONTEXT,
ZFS_PROP_SELINUX_DEFCONTEXT,
ZFS_PROP_SELINUX_ROOTCONTEXT,
ZFS_PROP_RELATIME,
ZFS_PROP_REDUNDANT_METADATA,
ZFS_PROP_OVERLAY,
ZFS_PROP_PREV_SNAP,
ZFS_PROP_RECEIVE_RESUME_TOKEN,
ZFS_PROP_ENCRYPTION,
ZFS_PROP_KEYLOCATION,
ZFS_PROP_KEYFORMAT,
ZFS_PROP_PBKDF2_SALT,
ZFS_PROP_PBKDF2_ITERS,
ZFS_PROP_ENCRYPTION_ROOT,
ZFS_PROP_KEY_GUID,
ZFS_PROP_KEYSTATUS,
+ ZFS_PROP_REMAPTXG, /* not exposed to the user */
ZFS_NUM_PROPS
} zfs_prop_t;
typedef enum {
ZFS_PROP_USERUSED,
ZFS_PROP_USERQUOTA,
ZFS_PROP_GROUPUSED,
ZFS_PROP_GROUPQUOTA,
ZFS_PROP_USEROBJUSED,
ZFS_PROP_USEROBJQUOTA,
ZFS_PROP_GROUPOBJUSED,
ZFS_PROP_GROUPOBJQUOTA,
ZFS_PROP_PROJECTUSED,
ZFS_PROP_PROJECTQUOTA,
ZFS_PROP_PROJECTOBJUSED,
ZFS_PROP_PROJECTOBJQUOTA,
ZFS_NUM_USERQUOTA_PROPS
} zfs_userquota_prop_t;
extern const char *zfs_userquota_prop_prefixes[ZFS_NUM_USERQUOTA_PROPS];
/*
* Pool properties are identified by these constants and must be added to the
* end of this list to ensure that external consumers are not affected
* by the change. If you make any changes to this list, be sure to update
* the property table in module/zcommon/zpool_prop.c.
*/
typedef enum {
ZPOOL_PROP_INVAL = -1,
ZPOOL_PROP_NAME,
ZPOOL_PROP_SIZE,
ZPOOL_PROP_CAPACITY,
ZPOOL_PROP_ALTROOT,
ZPOOL_PROP_HEALTH,
ZPOOL_PROP_GUID,
ZPOOL_PROP_VERSION,
ZPOOL_PROP_BOOTFS,
ZPOOL_PROP_DELEGATION,
ZPOOL_PROP_AUTOREPLACE,
ZPOOL_PROP_CACHEFILE,
ZPOOL_PROP_FAILUREMODE,
ZPOOL_PROP_LISTSNAPS,
ZPOOL_PROP_AUTOEXPAND,
ZPOOL_PROP_DEDUPDITTO,
ZPOOL_PROP_DEDUPRATIO,
ZPOOL_PROP_FREE,
ZPOOL_PROP_ALLOCATED,
ZPOOL_PROP_READONLY,
ZPOOL_PROP_ASHIFT,
ZPOOL_PROP_COMMENT,
ZPOOL_PROP_EXPANDSZ,
ZPOOL_PROP_FREEING,
ZPOOL_PROP_FRAGMENTATION,
ZPOOL_PROP_LEAKED,
ZPOOL_PROP_MAXBLOCKSIZE,
ZPOOL_PROP_TNAME,
ZPOOL_PROP_MAXDNODESIZE,
ZPOOL_PROP_MULTIHOST,
ZPOOL_NUM_PROPS
} zpool_prop_t;
/* Small enough to not hog a whole line of printout in zpool(1M). */
#define ZPROP_MAX_COMMENT 32
#define ZPROP_VALUE "value"
#define ZPROP_SOURCE "source"
typedef enum {
ZPROP_SRC_NONE = 0x1,
ZPROP_SRC_DEFAULT = 0x2,
ZPROP_SRC_TEMPORARY = 0x4,
ZPROP_SRC_LOCAL = 0x8,
ZPROP_SRC_INHERITED = 0x10,
ZPROP_SRC_RECEIVED = 0x20
} zprop_source_t;
#define ZPROP_SRC_ALL 0x3f
#define ZPROP_SOURCE_VAL_RECVD "$recvd"
#define ZPROP_N_MORE_ERRORS "N_MORE_ERRORS"
/*
* Dataset flag implemented as a special entry in the props zap object
* indicating that the dataset has received properties on or after
* SPA_VERSION_RECVD_PROPS. The first such receive blows away local properties
* just as it did in earlier versions, and thereafter, local properties are
* preserved.
*/
#define ZPROP_HAS_RECVD "$hasrecvd"
typedef enum {
ZPROP_ERR_NOCLEAR = 0x1, /* failure to clear existing props */
ZPROP_ERR_NORESTORE = 0x2 /* failure to restore props on error */
} zprop_errflags_t;
typedef int (*zprop_func)(int, void *);
/*
* Properties to be set on the root file system of a new pool
* are stuffed into their own nvlist, which is then included in
* the properties nvlist with the pool properties.
*/
#define ZPOOL_ROOTFS_PROPS "root-props-nvl"
/*
* Length of 'written@' and 'written#'
*/
#define ZFS_WRITTEN_PROP_PREFIX_LEN 8
/*
* Dataset property functions shared between libzfs and kernel.
*/
const char *zfs_prop_default_string(zfs_prop_t);
uint64_t zfs_prop_default_numeric(zfs_prop_t);
boolean_t zfs_prop_readonly(zfs_prop_t);
boolean_t zfs_prop_visible(zfs_prop_t prop);
boolean_t zfs_prop_inheritable(zfs_prop_t);
boolean_t zfs_prop_setonce(zfs_prop_t);
boolean_t zfs_prop_encryption_key_param(zfs_prop_t);
boolean_t zfs_prop_valid_keylocation(const char *, boolean_t);
const char *zfs_prop_to_name(zfs_prop_t);
zfs_prop_t zfs_name_to_prop(const char *);
boolean_t zfs_prop_user(const char *);
boolean_t zfs_prop_userquota(const char *);
boolean_t zfs_prop_written(const char *);
int zfs_prop_index_to_string(zfs_prop_t, uint64_t, const char **);
int zfs_prop_string_to_index(zfs_prop_t, const char *, uint64_t *);
uint64_t zfs_prop_random_value(zfs_prop_t, uint64_t seed);
boolean_t zfs_prop_valid_for_type(int, zfs_type_t, boolean_t);
/*
* Pool property functions shared between libzfs and kernel.
*/
zpool_prop_t zpool_name_to_prop(const char *);
const char *zpool_prop_to_name(zpool_prop_t);
const char *zpool_prop_default_string(zpool_prop_t);
uint64_t zpool_prop_default_numeric(zpool_prop_t);
boolean_t zpool_prop_readonly(zpool_prop_t);
boolean_t zpool_prop_feature(const char *);
boolean_t zpool_prop_unsupported(const char *);
int zpool_prop_index_to_string(zpool_prop_t, uint64_t, const char **);
int zpool_prop_string_to_index(zpool_prop_t, const char *, uint64_t *);
uint64_t zpool_prop_random_value(zpool_prop_t, uint64_t seed);
/*
* Definitions for the Delegation.
*/
typedef enum {
ZFS_DELEG_WHO_UNKNOWN = 0,
ZFS_DELEG_USER = 'u',
ZFS_DELEG_USER_SETS = 'U',
ZFS_DELEG_GROUP = 'g',
ZFS_DELEG_GROUP_SETS = 'G',
ZFS_DELEG_EVERYONE = 'e',
ZFS_DELEG_EVERYONE_SETS = 'E',
ZFS_DELEG_CREATE = 'c',
ZFS_DELEG_CREATE_SETS = 'C',
ZFS_DELEG_NAMED_SET = 's',
ZFS_DELEG_NAMED_SET_SETS = 'S'
} zfs_deleg_who_type_t;
typedef enum {
ZFS_DELEG_NONE = 0,
ZFS_DELEG_PERM_LOCAL = 1,
ZFS_DELEG_PERM_DESCENDENT = 2,
ZFS_DELEG_PERM_LOCALDESCENDENT = 3,
ZFS_DELEG_PERM_CREATE = 4
} zfs_deleg_inherit_t;
#define ZFS_DELEG_PERM_UID "uid"
#define ZFS_DELEG_PERM_GID "gid"
#define ZFS_DELEG_PERM_GROUPS "groups"
#define ZFS_MLSLABEL_DEFAULT "none"
#define ZFS_SMB_ACL_SRC "src"
#define ZFS_SMB_ACL_TARGET "target"
typedef enum {
ZFS_CANMOUNT_OFF = 0,
ZFS_CANMOUNT_ON = 1,
ZFS_CANMOUNT_NOAUTO = 2
} zfs_canmount_type_t;
typedef enum {
ZFS_LOGBIAS_LATENCY = 0,
ZFS_LOGBIAS_THROUGHPUT = 1
} zfs_logbias_op_t;
typedef enum zfs_share_op {
ZFS_SHARE_NFS = 0,
ZFS_UNSHARE_NFS = 1,
ZFS_SHARE_SMB = 2,
ZFS_UNSHARE_SMB = 3
} zfs_share_op_t;
typedef enum zfs_smb_acl_op {
ZFS_SMB_ACL_ADD,
ZFS_SMB_ACL_REMOVE,
ZFS_SMB_ACL_RENAME,
ZFS_SMB_ACL_PURGE
} zfs_smb_acl_op_t;
typedef enum zfs_cache_type {
ZFS_CACHE_NONE = 0,
ZFS_CACHE_METADATA = 1,
ZFS_CACHE_ALL = 2
} zfs_cache_type_t;
typedef enum {
ZFS_SYNC_STANDARD = 0,
ZFS_SYNC_ALWAYS = 1,
ZFS_SYNC_DISABLED = 2
} zfs_sync_type_t;
typedef enum {
ZFS_XATTR_OFF = 0,
ZFS_XATTR_DIR = 1,
ZFS_XATTR_SA = 2
} zfs_xattr_type_t;
typedef enum {
ZFS_DNSIZE_LEGACY = 0,
ZFS_DNSIZE_AUTO = 1,
ZFS_DNSIZE_1K = 1024,
ZFS_DNSIZE_2K = 2048,
ZFS_DNSIZE_4K = 4096,
ZFS_DNSIZE_8K = 8192,
ZFS_DNSIZE_16K = 16384
} zfs_dnsize_type_t;
typedef enum {
ZFS_REDUNDANT_METADATA_ALL,
ZFS_REDUNDANT_METADATA_MOST
} zfs_redundant_metadata_type_t;
typedef enum {
ZFS_VOLMODE_DEFAULT = 0,
ZFS_VOLMODE_GEOM = 1,
ZFS_VOLMODE_DEV = 2,
ZFS_VOLMODE_NONE = 3
} zfs_volmode_t;
typedef enum zfs_keystatus {
ZFS_KEYSTATUS_NONE = 0,
ZFS_KEYSTATUS_UNAVAILABLE,
ZFS_KEYSTATUS_AVAILABLE,
} zfs_keystatus_t;
typedef enum zfs_keyformat {
ZFS_KEYFORMAT_NONE = 0,
ZFS_KEYFORMAT_RAW,
ZFS_KEYFORMAT_HEX,
ZFS_KEYFORMAT_PASSPHRASE,
ZFS_KEYFORMAT_FORMATS
} zfs_keyformat_t;
typedef enum zfs_key_location {
ZFS_KEYLOCATION_NONE = 0,
ZFS_KEYLOCATION_PROMPT,
ZFS_KEYLOCATION_URI,
ZFS_KEYLOCATION_LOCATIONS
} zfs_keylocation_t;
#define DEFAULT_PBKDF2_ITERATIONS 350000
#define MIN_PBKDF2_ITERATIONS 100000
/*
* On-disk version number.
*/
#define SPA_VERSION_1 1ULL
#define SPA_VERSION_2 2ULL
#define SPA_VERSION_3 3ULL
#define SPA_VERSION_4 4ULL
#define SPA_VERSION_5 5ULL
#define SPA_VERSION_6 6ULL
#define SPA_VERSION_7 7ULL
#define SPA_VERSION_8 8ULL
#define SPA_VERSION_9 9ULL
#define SPA_VERSION_10 10ULL
#define SPA_VERSION_11 11ULL
#define SPA_VERSION_12 12ULL
#define SPA_VERSION_13 13ULL
#define SPA_VERSION_14 14ULL
#define SPA_VERSION_15 15ULL
#define SPA_VERSION_16 16ULL
#define SPA_VERSION_17 17ULL
#define SPA_VERSION_18 18ULL
#define SPA_VERSION_19 19ULL
#define SPA_VERSION_20 20ULL
#define SPA_VERSION_21 21ULL
#define SPA_VERSION_22 22ULL
#define SPA_VERSION_23 23ULL
#define SPA_VERSION_24 24ULL
#define SPA_VERSION_25 25ULL
#define SPA_VERSION_26 26ULL
#define SPA_VERSION_27 27ULL
#define SPA_VERSION_28 28ULL
#define SPA_VERSION_5000 5000ULL
/*
* When bumping up SPA_VERSION, make sure GRUB ZFS understands the on-disk
* format change. Go to usr/src/grub/grub-0.97/stage2/{zfs-include/, fsys_zfs*},
* and do the appropriate changes. Also bump the version number in
* usr/src/grub/capability.
*/
#define SPA_VERSION SPA_VERSION_5000
#define SPA_VERSION_STRING "5000"
/*
* Symbolic names for the changes that caused a SPA_VERSION switch.
* Used in the code when checking for presence or absence of a feature.
* Feel free to define multiple symbolic names for each version if there
* were multiple changes to on-disk structures during that version.
*
* NOTE: When checking the current SPA_VERSION in your code, be sure
* to use spa_version() since it reports the version of the
* last synced uberblock. Checking the in-flight version can
* be dangerous in some cases.
*/
#define SPA_VERSION_INITIAL SPA_VERSION_1
#define SPA_VERSION_DITTO_BLOCKS SPA_VERSION_2
#define SPA_VERSION_SPARES SPA_VERSION_3
#define SPA_VERSION_RAIDZ2 SPA_VERSION_3
#define SPA_VERSION_BPOBJ_ACCOUNT SPA_VERSION_3
#define SPA_VERSION_RAIDZ_DEFLATE SPA_VERSION_3
#define SPA_VERSION_DNODE_BYTES SPA_VERSION_3
#define SPA_VERSION_ZPOOL_HISTORY SPA_VERSION_4
#define SPA_VERSION_GZIP_COMPRESSION SPA_VERSION_5
#define SPA_VERSION_BOOTFS SPA_VERSION_6
#define SPA_VERSION_SLOGS SPA_VERSION_7
#define SPA_VERSION_DELEGATED_PERMS SPA_VERSION_8
#define SPA_VERSION_FUID SPA_VERSION_9
#define SPA_VERSION_REFRESERVATION SPA_VERSION_9
#define SPA_VERSION_REFQUOTA SPA_VERSION_9
#define SPA_VERSION_UNIQUE_ACCURATE SPA_VERSION_9
#define SPA_VERSION_L2CACHE SPA_VERSION_10
#define SPA_VERSION_NEXT_CLONES SPA_VERSION_11
#define SPA_VERSION_ORIGIN SPA_VERSION_11
#define SPA_VERSION_DSL_SCRUB SPA_VERSION_11
#define SPA_VERSION_SNAP_PROPS SPA_VERSION_12
#define SPA_VERSION_USED_BREAKDOWN SPA_VERSION_13
#define SPA_VERSION_PASSTHROUGH_X SPA_VERSION_14
#define SPA_VERSION_USERSPACE SPA_VERSION_15
#define SPA_VERSION_STMF_PROP SPA_VERSION_16
#define SPA_VERSION_RAIDZ3 SPA_VERSION_17
#define SPA_VERSION_USERREFS SPA_VERSION_18
#define SPA_VERSION_HOLES SPA_VERSION_19
#define SPA_VERSION_ZLE_COMPRESSION SPA_VERSION_20
#define SPA_VERSION_DEDUP SPA_VERSION_21
#define SPA_VERSION_RECVD_PROPS SPA_VERSION_22
#define SPA_VERSION_SLIM_ZIL SPA_VERSION_23
#define SPA_VERSION_SA SPA_VERSION_24
#define SPA_VERSION_SCAN SPA_VERSION_25
#define SPA_VERSION_DIR_CLONES SPA_VERSION_26
#define SPA_VERSION_DEADLISTS SPA_VERSION_26
#define SPA_VERSION_FAST_SNAP SPA_VERSION_27
#define SPA_VERSION_MULTI_REPLACE SPA_VERSION_28
#define SPA_VERSION_BEFORE_FEATURES SPA_VERSION_28
#define SPA_VERSION_FEATURES SPA_VERSION_5000
#define SPA_VERSION_IS_SUPPORTED(v) \
(((v) >= SPA_VERSION_INITIAL && (v) <= SPA_VERSION_BEFORE_FEATURES) || \
((v) >= SPA_VERSION_FEATURES && (v) <= SPA_VERSION))
/*
* ZPL version - rev'd whenever an incompatible on-disk format change
* occurs. This is independent of SPA/DMU/ZAP versioning. You must
* also update the version_table[] and help message in zfs_prop.c.
*
* When changing, be sure to teach GRUB how to read the new format!
* See usr/src/grub/grub-0.97/stage2/{zfs-include/,fsys_zfs*}
*/
#define ZPL_VERSION_1 1ULL
#define ZPL_VERSION_2 2ULL
#define ZPL_VERSION_3 3ULL
#define ZPL_VERSION_4 4ULL
#define ZPL_VERSION_5 5ULL
#define ZPL_VERSION ZPL_VERSION_5
#define ZPL_VERSION_STRING "5"
#define ZPL_VERSION_INITIAL ZPL_VERSION_1
#define ZPL_VERSION_DIRENT_TYPE ZPL_VERSION_2
#define ZPL_VERSION_FUID ZPL_VERSION_3
#define ZPL_VERSION_NORMALIZATION ZPL_VERSION_3
#define ZPL_VERSION_SYSATTR ZPL_VERSION_3
#define ZPL_VERSION_USERSPACE ZPL_VERSION_4
#define ZPL_VERSION_SA ZPL_VERSION_5
/* Rewind request information */
#define ZPOOL_NO_REWIND 1 /* No policy - default behavior */
#define ZPOOL_NEVER_REWIND 2 /* Do not search for best txg or rewind */
#define ZPOOL_TRY_REWIND 4 /* Search for best txg, but do not rewind */
#define ZPOOL_DO_REWIND 8 /* Rewind to best txg w/in deferred frees */
#define ZPOOL_EXTREME_REWIND 16 /* Allow extreme measures to find best txg */
#define ZPOOL_REWIND_MASK 28 /* All the possible rewind bits */
#define ZPOOL_REWIND_POLICIES 31 /* All the possible policy bits */
typedef struct zpool_rewind_policy {
uint32_t zrp_request; /* rewind behavior requested */
uint64_t zrp_maxmeta; /* max acceptable meta-data errors */
uint64_t zrp_maxdata; /* max acceptable data errors */
uint64_t zrp_txg; /* specific txg to load */
} zpool_rewind_policy_t;
/*
* The following are configuration names used in the nvlist describing a pool's
- * configuration.
+ * configuration. New on-disk names should be prefixed with "<reverse-DNS>:"
+ * (e.g. "org.open-zfs:") to avoid conflicting names being developed
+ * independently.
*/
#define ZPOOL_CONFIG_VERSION "version"
#define ZPOOL_CONFIG_POOL_NAME "name"
#define ZPOOL_CONFIG_POOL_STATE "state"
#define ZPOOL_CONFIG_POOL_TXG "txg"
#define ZPOOL_CONFIG_POOL_GUID "pool_guid"
#define ZPOOL_CONFIG_CREATE_TXG "create_txg"
#define ZPOOL_CONFIG_TOP_GUID "top_guid"
#define ZPOOL_CONFIG_VDEV_TREE "vdev_tree"
#define ZPOOL_CONFIG_TYPE "type"
#define ZPOOL_CONFIG_CHILDREN "children"
#define ZPOOL_CONFIG_ID "id"
#define ZPOOL_CONFIG_GUID "guid"
+#define ZPOOL_CONFIG_INDIRECT_OBJECT "com.delphix:indirect_object"
+#define ZPOOL_CONFIG_INDIRECT_BIRTHS "com.delphix:indirect_births"
+#define ZPOOL_CONFIG_PREV_INDIRECT_VDEV "com.delphix:prev_indirect_vdev"
#define ZPOOL_CONFIG_PATH "path"
#define ZPOOL_CONFIG_DEVID "devid"
#define ZPOOL_CONFIG_METASLAB_ARRAY "metaslab_array"
#define ZPOOL_CONFIG_METASLAB_SHIFT "metaslab_shift"
#define ZPOOL_CONFIG_ASHIFT "ashift"
#define ZPOOL_CONFIG_ASIZE "asize"
#define ZPOOL_CONFIG_DTL "DTL"
#define ZPOOL_CONFIG_SCAN_STATS "scan_stats" /* not stored on disk */
+#define ZPOOL_CONFIG_REMOVAL_STATS "removal_stats" /* not stored on disk */
#define ZPOOL_CONFIG_VDEV_STATS "vdev_stats" /* not stored on disk */
+#define ZPOOL_CONFIG_INDIRECT_SIZE "indirect_size" /* not stored on disk */
/* container nvlist of extended stats */
#define ZPOOL_CONFIG_VDEV_STATS_EX "vdev_stats_ex"
/* Active queue read/write stats */
#define ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE "vdev_sync_r_active_queue"
#define ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE "vdev_sync_w_active_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE "vdev_async_r_active_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE "vdev_async_w_active_queue"
#define ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE "vdev_async_scrub_active_queue"
/* Queue sizes */
#define ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE "vdev_sync_r_pend_queue"
#define ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE "vdev_sync_w_pend_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE "vdev_async_r_pend_queue"
#define ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE "vdev_async_w_pend_queue"
#define ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE "vdev_async_scrub_pend_queue"
/* Latency read/write histogram stats */
#define ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO "vdev_tot_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO "vdev_tot_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO "vdev_disk_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO "vdev_disk_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO "vdev_sync_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO "vdev_sync_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO "vdev_async_r_lat_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO "vdev_async_w_lat_histo"
#define ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO "vdev_scrub_histo"
/* Request size histograms */
#define ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO "vdev_sync_ind_r_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO "vdev_sync_ind_w_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO "vdev_async_ind_r_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO "vdev_async_ind_w_histo"
#define ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO "vdev_ind_scrub_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO "vdev_sync_agg_r_histo"
#define ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO "vdev_sync_agg_w_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO "vdev_async_agg_r_histo"
#define ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO "vdev_async_agg_w_histo"
#define ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO "vdev_agg_scrub_histo"
/* vdev enclosure sysfs path */
#define ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH "vdev_enc_sysfs_path"
#define ZPOOL_CONFIG_WHOLE_DISK "whole_disk"
#define ZPOOL_CONFIG_ERRCOUNT "error_count"
#define ZPOOL_CONFIG_NOT_PRESENT "not_present"
#define ZPOOL_CONFIG_SPARES "spares"
#define ZPOOL_CONFIG_IS_SPARE "is_spare"
#define ZPOOL_CONFIG_NPARITY "nparity"
#define ZPOOL_CONFIG_HOSTID "hostid"
#define ZPOOL_CONFIG_HOSTNAME "hostname"
#define ZPOOL_CONFIG_LOADED_TIME "initial_load_time"
#define ZPOOL_CONFIG_UNSPARE "unspare"
#define ZPOOL_CONFIG_PHYS_PATH "phys_path"
#define ZPOOL_CONFIG_IS_LOG "is_log"
#define ZPOOL_CONFIG_L2CACHE "l2cache"
#define ZPOOL_CONFIG_HOLE_ARRAY "hole_array"
#define ZPOOL_CONFIG_VDEV_CHILDREN "vdev_children"
#define ZPOOL_CONFIG_IS_HOLE "is_hole"
#define ZPOOL_CONFIG_DDT_HISTOGRAM "ddt_histogram"
#define ZPOOL_CONFIG_DDT_OBJ_STATS "ddt_object_stats"
#define ZPOOL_CONFIG_DDT_STATS "ddt_stats"
#define ZPOOL_CONFIG_SPLIT "splitcfg"
#define ZPOOL_CONFIG_ORIG_GUID "orig_guid"
#define ZPOOL_CONFIG_SPLIT_GUID "split_guid"
#define ZPOOL_CONFIG_SPLIT_LIST "guid_list"
#define ZPOOL_CONFIG_REMOVING "removing"
#define ZPOOL_CONFIG_RESILVER_TXG "resilver_txg"
#define ZPOOL_CONFIG_COMMENT "comment"
#define ZPOOL_CONFIG_SUSPENDED "suspended" /* not stored on disk */
#define ZPOOL_CONFIG_SUSPENDED_REASON "suspended_reason" /* not stored */
#define ZPOOL_CONFIG_TIMESTAMP "timestamp" /* not stored on disk */
#define ZPOOL_CONFIG_BOOTFS "bootfs" /* not stored on disk */
#define ZPOOL_CONFIG_MISSING_DEVICES "missing_vdevs" /* not stored on disk */
#define ZPOOL_CONFIG_LOAD_INFO "load_info" /* not stored on disk */
#define ZPOOL_CONFIG_REWIND_INFO "rewind_info" /* not stored on disk */
#define ZPOOL_CONFIG_UNSUP_FEAT "unsup_feat" /* not stored on disk */
#define ZPOOL_CONFIG_ENABLED_FEAT "enabled_feat" /* not stored on disk */
#define ZPOOL_CONFIG_CAN_RDONLY "can_rdonly" /* not stored on disk */
#define ZPOOL_CONFIG_FEATURES_FOR_READ "features_for_read"
#define ZPOOL_CONFIG_FEATURE_STATS "feature_stats" /* not stored on disk */
#define ZPOOL_CONFIG_ERRATA "errata" /* not stored on disk */
#define ZPOOL_CONFIG_VDEV_TOP_ZAP "com.delphix:vdev_zap_top"
#define ZPOOL_CONFIG_VDEV_LEAF_ZAP "com.delphix:vdev_zap_leaf"
#define ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS "com.delphix:has_per_vdev_zaps"
#define ZPOOL_CONFIG_MMP_STATE "mmp_state" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_TXG "mmp_txg" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_HOSTNAME "mmp_hostname" /* not stored on disk */
#define ZPOOL_CONFIG_MMP_HOSTID "mmp_hostid" /* not stored on disk */
/*
* The persistent vdev state is stored as separate values rather than a single
* 'vdev_state' entry. This is because a device can be in multiple states, such
* as offline and degraded.
*/
#define ZPOOL_CONFIG_OFFLINE "offline"
#define ZPOOL_CONFIG_FAULTED "faulted"
#define ZPOOL_CONFIG_DEGRADED "degraded"
#define ZPOOL_CONFIG_REMOVED "removed"
#define ZPOOL_CONFIG_FRU "fru"
#define ZPOOL_CONFIG_AUX_STATE "aux_state"
/* Rewind policy parameters */
#define ZPOOL_REWIND_POLICY "rewind-policy"
#define ZPOOL_REWIND_REQUEST "rewind-request"
#define ZPOOL_REWIND_REQUEST_TXG "rewind-request-txg"
#define ZPOOL_REWIND_META_THRESH "rewind-meta-thresh"
#define ZPOOL_REWIND_DATA_THRESH "rewind-data-thresh"
/* Rewind data discovered */
#define ZPOOL_CONFIG_LOAD_TIME "rewind_txg_ts"
#define ZPOOL_CONFIG_LOAD_DATA_ERRORS "verify_data_errors"
#define ZPOOL_CONFIG_REWIND_TIME "seconds_of_rewind"
#define VDEV_TYPE_ROOT "root"
#define VDEV_TYPE_MIRROR "mirror"
#define VDEV_TYPE_REPLACING "replacing"
#define VDEV_TYPE_RAIDZ "raidz"
#define VDEV_TYPE_DISK "disk"
#define VDEV_TYPE_FILE "file"
#define VDEV_TYPE_MISSING "missing"
#define VDEV_TYPE_HOLE "hole"
#define VDEV_TYPE_SPARE "spare"
#define VDEV_TYPE_LOG "log"
#define VDEV_TYPE_L2CACHE "l2cache"
+#define VDEV_TYPE_INDIRECT "indirect"
+
+/* VDEV_TOP_ZAP_* are used in top-level vdev ZAP objects. */
+#define VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM \
+ "com.delphix:indirect_obsolete_sm"
+#define VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE \
+ "com.delphix:obsolete_counts_are_precise"
/*
* This is needed in userland to report the minimum necessary device size.
*/
#define SPA_MINDEVSIZE (64ULL << 20)
/*
* Set if the fragmentation has not yet been calculated. This can happen
* because the space maps have not been upgraded or the histogram feature
* is not enabled.
*/
#define ZFS_FRAG_INVALID UINT64_MAX
/*
* The location of the pool configuration repository, shared between kernel and
* userland.
*/
#define ZPOOL_CACHE "/etc/zfs/zpool.cache"
/*
* vdev states are ordered from least to most healthy.
* A vdev that's CANT_OPEN or below is considered unusable.
*/
typedef enum vdev_state {
VDEV_STATE_UNKNOWN = 0, /* Uninitialized vdev */
VDEV_STATE_CLOSED, /* Not currently open */
VDEV_STATE_OFFLINE, /* Not allowed to open */
VDEV_STATE_REMOVED, /* Explicitly removed from system */
VDEV_STATE_CANT_OPEN, /* Tried to open, but failed */
VDEV_STATE_FAULTED, /* External request to fault device */
VDEV_STATE_DEGRADED, /* Replicated vdev with unhealthy kids */
VDEV_STATE_HEALTHY /* Presumed good */
} vdev_state_t;
#define VDEV_STATE_ONLINE VDEV_STATE_HEALTHY
/*
* vdev aux states. When a vdev is in the CANT_OPEN state, the aux field
* of the vdev stats structure uses these constants to distinguish why.
*/
typedef enum vdev_aux {
VDEV_AUX_NONE, /* no error */
VDEV_AUX_OPEN_FAILED, /* ldi_open_*() or vn_open() failed */
VDEV_AUX_CORRUPT_DATA, /* bad label or disk contents */
VDEV_AUX_NO_REPLICAS, /* insufficient number of replicas */
VDEV_AUX_BAD_GUID_SUM, /* vdev guid sum doesn't match */
VDEV_AUX_TOO_SMALL, /* vdev size is too small */
VDEV_AUX_BAD_LABEL, /* the label is OK but invalid */
VDEV_AUX_VERSION_NEWER, /* on-disk version is too new */
VDEV_AUX_VERSION_OLDER, /* on-disk version is too old */
VDEV_AUX_UNSUP_FEAT, /* unsupported features */
VDEV_AUX_SPARED, /* hot spare used in another pool */
VDEV_AUX_ERR_EXCEEDED, /* too many errors */
VDEV_AUX_IO_FAILURE, /* experienced I/O failure */
VDEV_AUX_BAD_LOG, /* cannot read log chain(s) */
VDEV_AUX_EXTERNAL, /* external diagnosis or forced fault */
VDEV_AUX_SPLIT_POOL, /* vdev was split off into another pool */
VDEV_AUX_BAD_ASHIFT, /* vdev ashift is invalid */
VDEV_AUX_EXTERNAL_PERSIST, /* persistent forced fault */
VDEV_AUX_ACTIVE, /* vdev active on a different host */
} vdev_aux_t;
/*
* pool state. The following states are written to disk as part of the normal
* SPA lifecycle: ACTIVE, EXPORTED, DESTROYED, SPARE, L2CACHE. The remaining
* states are software abstractions used at various levels to communicate
* pool state.
*/
typedef enum pool_state {
POOL_STATE_ACTIVE = 0, /* In active use */
POOL_STATE_EXPORTED, /* Explicitly exported */
POOL_STATE_DESTROYED, /* Explicitly destroyed */
POOL_STATE_SPARE, /* Reserved for hot spare use */
POOL_STATE_L2CACHE, /* Level 2 ARC device */
POOL_STATE_UNINITIALIZED, /* Internal spa_t state */
POOL_STATE_UNAVAIL, /* Internal libzfs state */
POOL_STATE_POTENTIALLY_ACTIVE /* Internal libzfs state */
} pool_state_t;
/*
* mmp state. The following states provide additional detail describing
* why a pool couldn't be safely imported.
*/
typedef enum mmp_state {
MMP_STATE_ACTIVE = 0, /* In active use */
MMP_STATE_INACTIVE, /* Inactive and safe to import */
MMP_STATE_NO_HOSTID /* System hostid is not set */
} mmp_state_t;
/*
* Scan Functions.
*/
typedef enum pool_scan_func {
POOL_SCAN_NONE,
POOL_SCAN_SCRUB,
POOL_SCAN_RESILVER,
POOL_SCAN_FUNCS
} pool_scan_func_t;
/*
* Used to control scrub pause and resume.
*/
typedef enum pool_scrub_cmd {
POOL_SCRUB_NORMAL = 0,
POOL_SCRUB_PAUSE,
POOL_SCRUB_FLAGS_END
} pool_scrub_cmd_t;
/*
* ZIO types. Needed to interpret vdev statistics below.
*/
typedef enum zio_type {
ZIO_TYPE_NULL = 0,
ZIO_TYPE_READ,
ZIO_TYPE_WRITE,
ZIO_TYPE_FREE,
ZIO_TYPE_CLAIM,
ZIO_TYPE_IOCTL,
ZIO_TYPES
} zio_type_t;
/*
* Pool statistics. Note: all fields should be 64-bit because this
* is passed between kernel and userland as an nvlist uint64 array.
*/
typedef struct pool_scan_stat {
/* values stored on disk */
uint64_t pss_func; /* pool_scan_func_t */
uint64_t pss_state; /* dsl_scan_state_t */
uint64_t pss_start_time; /* scan start time */
uint64_t pss_end_time; /* scan end time */
uint64_t pss_to_examine; /* total bytes to scan */
uint64_t pss_examined; /* total bytes located by scanner */
uint64_t pss_to_process; /* total bytes to process */
uint64_t pss_processed; /* total processed bytes */
uint64_t pss_errors; /* scan errors */
/* values not stored on disk */
uint64_t pss_pass_exam; /* examined bytes per scan pass */
uint64_t pss_pass_start; /* start time of a scan pass */
uint64_t pss_pass_scrub_pause; /* pause time of a scurb pass */
/* cumulative time scrub spent paused, needed for rate calculation */
uint64_t pss_pass_scrub_spent_paused;
uint64_t pss_pass_issued; /* issued bytes per scan pass */
uint64_t pss_issued; /* total bytes checked by scanner */
} pool_scan_stat_t;
+typedef struct pool_removal_stat {
+ uint64_t prs_state; /* dsl_scan_state_t */
+ uint64_t prs_removing_vdev;
+ uint64_t prs_start_time;
+ uint64_t prs_end_time;
+ uint64_t prs_to_copy; /* bytes that need to be copied */
+ uint64_t prs_copied; /* bytes copied so far */
+ /*
+ * bytes of memory used for indirect mappings.
+ * This includes all removed vdevs.
+ */
+ uint64_t prs_mapping_memory;
+} pool_removal_stat_t;
+
typedef enum dsl_scan_state {
DSS_NONE,
DSS_SCANNING,
DSS_FINISHED,
DSS_CANCELED,
DSS_NUM_STATES
} dsl_scan_state_t;
/*
* Errata described by http://zfsonlinux.org/msg/ZFS-8000-ER. The ordering
* of this enum must be maintained to ensure the errata identifiers map to
* the correct documentation. New errata may only be appended to the list
* and must contain corresponding documentation at the above link.
*/
typedef enum zpool_errata {
ZPOOL_ERRATA_NONE,
ZPOOL_ERRATA_ZOL_2094_SCRUB,
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY,
ZPOOL_ERRATA_ZOL_6845_ENCRYPTION,
} zpool_errata_t;
/*
* Vdev statistics. Note: all fields should be 64-bit because this
* is passed between kernel and userland as an nvlist uint64 array.
*/
typedef struct vdev_stat {
hrtime_t vs_timestamp; /* time since vdev load */
uint64_t vs_state; /* vdev state */
uint64_t vs_aux; /* see vdev_aux_t */
uint64_t vs_alloc; /* space allocated */
uint64_t vs_space; /* total capacity */
uint64_t vs_dspace; /* deflated capacity */
uint64_t vs_rsize; /* replaceable dev size */
uint64_t vs_esize; /* expandable dev size */
uint64_t vs_ops[ZIO_TYPES]; /* operation count */
uint64_t vs_bytes[ZIO_TYPES]; /* bytes read/written */
uint64_t vs_read_errors; /* read errors */
uint64_t vs_write_errors; /* write errors */
uint64_t vs_checksum_errors; /* checksum errors */
uint64_t vs_self_healed; /* self-healed bytes */
uint64_t vs_scan_removing; /* removing? */
uint64_t vs_scan_processed; /* scan processed bytes */
uint64_t vs_fragmentation; /* device fragmentation */
} vdev_stat_t;
/*
* Extended stats
*
* These are stats which aren't included in the original iostat output. For
* convenience, they are grouped together in vdev_stat_ex, although each stat
* is individually exported as an nvlist.
*/
typedef struct vdev_stat_ex {
/* Number of ZIOs issued to disk and waiting to finish */
uint64_t vsx_active_queue[ZIO_PRIORITY_NUM_QUEUEABLE];
/* Number of ZIOs pending to be issued to disk */
uint64_t vsx_pend_queue[ZIO_PRIORITY_NUM_QUEUEABLE];
/*
* Below are the histograms for various latencies. Buckets are in
* units of nanoseconds.
*/
/*
* 2^37 nanoseconds = 134s. Timeouts will probably start kicking in
* before this.
*/
#define VDEV_L_HISTO_BUCKETS 37 /* Latency histo buckets */
#define VDEV_RQ_HISTO_BUCKETS 25 /* Request size histo buckets */
/* Amount of time in ZIO queue (ns) */
uint64_t vsx_queue_histo[ZIO_PRIORITY_NUM_QUEUEABLE]
[VDEV_L_HISTO_BUCKETS];
/* Total ZIO latency (ns). Includes queuing and disk access time */
uint64_t vsx_total_histo[ZIO_TYPES][VDEV_L_HISTO_BUCKETS];
/* Amount of time to read/write the disk (ns) */
uint64_t vsx_disk_histo[ZIO_TYPES][VDEV_L_HISTO_BUCKETS];
/* "lookup the bucket for a value" histogram macros */
#define HISTO(val, buckets) (val != 0 ? MIN(highbit64(val) - 1, \
buckets - 1) : 0)
#define L_HISTO(a) HISTO(a, VDEV_L_HISTO_BUCKETS)
#define RQ_HISTO(a) HISTO(a, VDEV_RQ_HISTO_BUCKETS)
/* Physical IO histogram */
uint64_t vsx_ind_histo[ZIO_PRIORITY_NUM_QUEUEABLE]
[VDEV_RQ_HISTO_BUCKETS];
/* Delegated (aggregated) physical IO histogram */
uint64_t vsx_agg_histo[ZIO_PRIORITY_NUM_QUEUEABLE]
[VDEV_RQ_HISTO_BUCKETS];
} vdev_stat_ex_t;
/*
* DDT statistics. Note: all fields should be 64-bit because this
* is passed between kernel and userland as an nvlist uint64 array.
*/
typedef struct ddt_object {
uint64_t ddo_count; /* number of elements in ddt */
uint64_t ddo_dspace; /* size of ddt on disk */
uint64_t ddo_mspace; /* size of ddt in-core */
} ddt_object_t;
typedef struct ddt_stat {
uint64_t dds_blocks; /* blocks */
uint64_t dds_lsize; /* logical size */
uint64_t dds_psize; /* physical size */
uint64_t dds_dsize; /* deflated allocated size */
uint64_t dds_ref_blocks; /* referenced blocks */
uint64_t dds_ref_lsize; /* referenced lsize * refcnt */
uint64_t dds_ref_psize; /* referenced psize * refcnt */
uint64_t dds_ref_dsize; /* referenced dsize * refcnt */
} ddt_stat_t;
typedef struct ddt_histogram {
ddt_stat_t ddh_stat[64]; /* power-of-two histogram buckets */
} ddt_histogram_t;
#define ZVOL_DRIVER "zvol"
#define ZFS_DRIVER "zfs"
#define ZFS_DEV "/dev/zfs"
#define ZFS_SHARETAB "/etc/dfs/sharetab"
#define ZFS_SUPER_MAGIC 0x2fc12fc1
/* general zvol path */
#define ZVOL_DIR "/dev"
#define ZVOL_MAJOR 230
#define ZVOL_MINOR_BITS 4
#define ZVOL_MINOR_MASK ((1U << ZVOL_MINOR_BITS) - 1)
#define ZVOL_MINORS (1 << 4)
#define ZVOL_DEV_NAME "zd"
#define ZVOL_PROP_NAME "name"
#define ZVOL_DEFAULT_BLOCKSIZE 8192
/*
* /dev/zfs ioctl numbers.
*/
typedef enum zfs_ioc {
/*
* Illumos - 71/128 numbers reserved.
*/
ZFS_IOC_FIRST = ('Z' << 8),
ZFS_IOC = ZFS_IOC_FIRST,
ZFS_IOC_POOL_CREATE = ZFS_IOC_FIRST,
ZFS_IOC_POOL_DESTROY,
ZFS_IOC_POOL_IMPORT,
ZFS_IOC_POOL_EXPORT,
ZFS_IOC_POOL_CONFIGS,
ZFS_IOC_POOL_STATS,
ZFS_IOC_POOL_TRYIMPORT,
ZFS_IOC_POOL_SCAN,
ZFS_IOC_POOL_FREEZE,
ZFS_IOC_POOL_UPGRADE,
ZFS_IOC_POOL_GET_HISTORY,
ZFS_IOC_VDEV_ADD,
ZFS_IOC_VDEV_REMOVE,
ZFS_IOC_VDEV_SET_STATE,
ZFS_IOC_VDEV_ATTACH,
ZFS_IOC_VDEV_DETACH,
ZFS_IOC_VDEV_SETPATH,
ZFS_IOC_VDEV_SETFRU,
ZFS_IOC_OBJSET_STATS,
ZFS_IOC_OBJSET_ZPLPROPS,
ZFS_IOC_DATASET_LIST_NEXT,
ZFS_IOC_SNAPSHOT_LIST_NEXT,
ZFS_IOC_SET_PROP,
ZFS_IOC_CREATE,
ZFS_IOC_DESTROY,
ZFS_IOC_ROLLBACK,
ZFS_IOC_RENAME,
ZFS_IOC_RECV,
ZFS_IOC_SEND,
ZFS_IOC_INJECT_FAULT,
ZFS_IOC_CLEAR_FAULT,
ZFS_IOC_INJECT_LIST_NEXT,
ZFS_IOC_ERROR_LOG,
ZFS_IOC_CLEAR,
ZFS_IOC_PROMOTE,
ZFS_IOC_SNAPSHOT,
ZFS_IOC_DSOBJ_TO_DSNAME,
ZFS_IOC_OBJ_TO_PATH,
ZFS_IOC_POOL_SET_PROPS,
ZFS_IOC_POOL_GET_PROPS,
ZFS_IOC_SET_FSACL,
ZFS_IOC_GET_FSACL,
ZFS_IOC_SHARE,
ZFS_IOC_INHERIT_PROP,
ZFS_IOC_SMB_ACL,
ZFS_IOC_USERSPACE_ONE,
ZFS_IOC_USERSPACE_MANY,
ZFS_IOC_USERSPACE_UPGRADE,
ZFS_IOC_HOLD,
ZFS_IOC_RELEASE,
ZFS_IOC_GET_HOLDS,
ZFS_IOC_OBJSET_RECVD_PROPS,
ZFS_IOC_VDEV_SPLIT,
ZFS_IOC_NEXT_OBJ,
ZFS_IOC_DIFF,
ZFS_IOC_TMP_SNAPSHOT,
ZFS_IOC_OBJ_TO_STATS,
ZFS_IOC_SPACE_WRITTEN,
ZFS_IOC_SPACE_SNAPS,
ZFS_IOC_DESTROY_SNAPS,
ZFS_IOC_POOL_REGUID,
ZFS_IOC_POOL_REOPEN,
ZFS_IOC_SEND_PROGRESS,
ZFS_IOC_LOG_HISTORY,
ZFS_IOC_SEND_NEW,
ZFS_IOC_SEND_SPACE,
ZFS_IOC_CLONE,
ZFS_IOC_BOOKMARK,
ZFS_IOC_GET_BOOKMARKS,
ZFS_IOC_DESTROY_BOOKMARKS,
ZFS_IOC_CHANNEL_PROGRAM,
ZFS_IOC_RECV_NEW,
ZFS_IOC_POOL_SYNC,
ZFS_IOC_LOAD_KEY,
ZFS_IOC_UNLOAD_KEY,
ZFS_IOC_CHANGE_KEY,
+ ZFS_IOC_REMAP,
/*
* Linux - 3/64 numbers reserved.
*/
ZFS_IOC_LINUX = ('Z' << 8) + 0x80,
ZFS_IOC_EVENTS_NEXT,
ZFS_IOC_EVENTS_CLEAR,
ZFS_IOC_EVENTS_SEEK,
/*
* FreeBSD - 1/64 numbers reserved.
*/
ZFS_IOC_FREEBSD = ('Z' << 8) + 0xC0,
ZFS_IOC_LAST
} zfs_ioc_t;
/*
* zvol ioctl to get dataset name
*/
#define BLKZNAME _IOR(0x12, 125, char[ZFS_MAX_DATASET_NAME_LEN])
/*
* Internal SPA load state. Used by FMA diagnosis engine.
*/
typedef enum {
SPA_LOAD_NONE, /* no load in progress */
SPA_LOAD_OPEN, /* normal open */
SPA_LOAD_IMPORT, /* import in progress */
SPA_LOAD_TRYIMPORT, /* tryimport in progress */
SPA_LOAD_RECOVER, /* recovery requested */
SPA_LOAD_ERROR, /* load failed */
SPA_LOAD_CREATE /* creation in progress */
} spa_load_state_t;
/*
* Bookmark name values.
*/
#define ZPOOL_ERR_LIST "error list"
#define ZPOOL_ERR_DATASET "dataset"
#define ZPOOL_ERR_OBJECT "object"
#define HIS_MAX_RECORD_LEN (MAXPATHLEN + MAXPATHLEN + 1)
/*
* The following are names used in the nvlist describing
* the pool's history log.
*/
#define ZPOOL_HIST_RECORD "history record"
#define ZPOOL_HIST_TIME "history time"
#define ZPOOL_HIST_CMD "history command"
#define ZPOOL_HIST_WHO "history who"
#define ZPOOL_HIST_ZONE "history zone"
#define ZPOOL_HIST_HOST "history hostname"
#define ZPOOL_HIST_TXG "history txg"
#define ZPOOL_HIST_INT_EVENT "history internal event"
#define ZPOOL_HIST_INT_STR "history internal str"
#define ZPOOL_HIST_INT_NAME "internal_name"
#define ZPOOL_HIST_IOCTL "ioctl"
#define ZPOOL_HIST_INPUT_NVL "in_nvl"
#define ZPOOL_HIST_OUTPUT_NVL "out_nvl"
#define ZPOOL_HIST_DSNAME "dsname"
#define ZPOOL_HIST_DSID "dsid"
#define ZPOOL_HIST_ERRNO "errno"
/*
* Special nvlist name that will not have its args recorded in the pool's
* history log.
*/
#define ZPOOL_HIDDEN_ARGS "hidden_args"
/*
* Flags for ZFS_IOC_VDEV_SET_STATE
*/
#define ZFS_ONLINE_CHECKREMOVE 0x1
#define ZFS_ONLINE_UNSPARE 0x2
#define ZFS_ONLINE_FORCEFAULT 0x4
#define ZFS_ONLINE_EXPAND 0x8
#define ZFS_OFFLINE_TEMPORARY 0x1
/*
* Flags for ZFS_IOC_POOL_IMPORT
*/
#define ZFS_IMPORT_NORMAL 0x0
#define ZFS_IMPORT_VERBATIM 0x1
#define ZFS_IMPORT_ANY_HOST 0x2
#define ZFS_IMPORT_MISSING_LOG 0x4
#define ZFS_IMPORT_ONLY 0x8
#define ZFS_IMPORT_TEMP_NAME 0x10
#define ZFS_IMPORT_SKIP_MMP 0x20
#define ZFS_IMPORT_LOAD_KEYS 0x40
/*
* Channel program argument/return nvlist keys and defaults.
*/
#define ZCP_ARG_PROGRAM "program"
#define ZCP_ARG_ARGLIST "arg"
#define ZCP_ARG_SYNC "sync"
#define ZCP_ARG_INSTRLIMIT "instrlimit"
#define ZCP_ARG_MEMLIMIT "memlimit"
#define ZCP_ARG_CLIARGV "argv"
#define ZCP_RET_ERROR "error"
#define ZCP_RET_RETURN "return"
#define ZCP_DEFAULT_INSTRLIMIT (10 * 1000 * 1000)
#define ZCP_MAX_INSTRLIMIT (10 * ZCP_DEFAULT_INSTRLIMIT)
#define ZCP_DEFAULT_MEMLIMIT (10 * 1024 * 1024)
#define ZCP_MAX_MEMLIMIT (10 * ZCP_DEFAULT_MEMLIMIT)
/*
* Sysevent payload members. ZFS will generate the following sysevents with the
* given payloads:
*
* ESC_ZFS_RESILVER_START
* ESC_ZFS_RESILVER_END
* ESC_ZFS_POOL_DESTROY
* ESC_ZFS_POOL_REGUID
*
* ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64
*
* ESC_ZFS_VDEV_REMOVE
* ESC_ZFS_VDEV_CLEAR
* ESC_ZFS_VDEV_CHECK
*
* ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64
* ZFS_EV_VDEV_PATH DATA_TYPE_STRING (optional)
* ZFS_EV_VDEV_GUID DATA_TYPE_UINT64
*
* ESC_ZFS_HISTORY_EVENT
*
* ZFS_EV_POOL_NAME DATA_TYPE_STRING
* ZFS_EV_POOL_GUID DATA_TYPE_UINT64
* ZFS_EV_HIST_TIME DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_CMD DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_WHO DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_ZONE DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_HOST DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_TXG DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_INT_EVENT DATA_TYPE_UINT64 (optional)
* ZFS_EV_HIST_INT_STR DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_INT_NAME DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_IOCTL DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_DSNAME DATA_TYPE_STRING (optional)
* ZFS_EV_HIST_DSID DATA_TYPE_UINT64 (optional)
*
* The ZFS_EV_HIST_* members will correspond to the ZPOOL_HIST_* members in the
* history log nvlist. The keynames will be free of any spaces or other
* characters that could be potentially unexpected to consumers of the
* sysevents.
*/
#define ZFS_EV_POOL_NAME "pool_name"
#define ZFS_EV_POOL_GUID "pool_guid"
#define ZFS_EV_VDEV_PATH "vdev_path"
#define ZFS_EV_VDEV_GUID "vdev_guid"
#define ZFS_EV_HIST_TIME "history_time"
#define ZFS_EV_HIST_CMD "history_command"
#define ZFS_EV_HIST_WHO "history_who"
#define ZFS_EV_HIST_ZONE "history_zone"
#define ZFS_EV_HIST_HOST "history_hostname"
#define ZFS_EV_HIST_TXG "history_txg"
#define ZFS_EV_HIST_INT_EVENT "history_internal_event"
#define ZFS_EV_HIST_INT_STR "history_internal_str"
#define ZFS_EV_HIST_INT_NAME "history_internal_name"
#define ZFS_EV_HIST_IOCTL "history_ioctl"
#define ZFS_EV_HIST_DSNAME "history_dsname"
#define ZFS_EV_HIST_DSID "history_dsid"
#ifdef __cplusplus
}
#endif
#endif /* _SYS_FS_ZFS_H */
diff --git a/include/sys/metaslab.h b/include/sys/metaslab.h
index be271c7020d5..fdcf6c71be4a 100644
--- a/include/sys/metaslab.h
+++ b/include/sys/metaslab.h
@@ -1,114 +1,121 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
*/
#ifndef _SYS_METASLAB_H
#define _SYS_METASLAB_H
#include <sys/spa.h>
#include <sys/space_map.h>
#include <sys/txg.h>
#include <sys/zio.h>
#include <sys/avl.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct metaslab_ops {
uint64_t (*msop_alloc)(metaslab_t *, uint64_t);
} metaslab_ops_t;
extern metaslab_ops_t *zfs_metaslab_ops;
int metaslab_init(metaslab_group_t *, uint64_t, uint64_t, uint64_t,
metaslab_t **);
void metaslab_fini(metaslab_t *);
void metaslab_load_wait(metaslab_t *);
int metaslab_load(metaslab_t *);
void metaslab_unload(metaslab_t *);
void metaslab_sync(metaslab_t *, uint64_t);
void metaslab_sync_done(metaslab_t *, uint64_t);
void metaslab_sync_reassess(metaslab_group_t *);
uint64_t metaslab_block_maxsize(metaslab_t *);
#define METASLAB_HINTBP_FAVOR 0x0
#define METASLAB_HINTBP_AVOID 0x1
#define METASLAB_GANG_HEADER 0x2
#define METASLAB_GANG_CHILD 0x4
#define METASLAB_ASYNC_ALLOC 0x8
#define METASLAB_DONT_THROTTLE 0x10
#define METASLAB_FASTWRITE 0x20
int metaslab_alloc(spa_t *, metaslab_class_t *, uint64_t,
blkptr_t *, int, uint64_t, blkptr_t *, int, zio_alloc_list_t *, zio_t *);
+int metaslab_alloc_dva(spa_t *, metaslab_class_t *, uint64_t,
+ dva_t *, int, dva_t *, uint64_t, int, zio_alloc_list_t *);
void metaslab_free(spa_t *, const blkptr_t *, uint64_t, boolean_t);
+void metaslab_free_concrete(vdev_t *, uint64_t, uint64_t, uint64_t);
+void metaslab_free_dva(spa_t *, const dva_t *, uint64_t);
+void metaslab_free_impl_cb(uint64_t, vdev_t *, uint64_t, uint64_t, void *);
+void metaslab_unalloc_dva(spa_t *, const dva_t *, uint64_t);
int metaslab_claim(spa_t *, const blkptr_t *, uint64_t);
+int metaslab_claim_impl(vdev_t *, uint64_t, uint64_t, uint64_t);
void metaslab_check_free(spa_t *, const blkptr_t *);
void metaslab_fastwrite_mark(spa_t *, const blkptr_t *);
void metaslab_fastwrite_unmark(spa_t *, const blkptr_t *);
void metaslab_alloc_trace_init(void);
void metaslab_alloc_trace_fini(void);
void metaslab_trace_init(zio_alloc_list_t *);
void metaslab_trace_fini(zio_alloc_list_t *);
metaslab_class_t *metaslab_class_create(spa_t *, metaslab_ops_t *);
void metaslab_class_destroy(metaslab_class_t *);
int metaslab_class_validate(metaslab_class_t *);
void metaslab_class_histogram_verify(metaslab_class_t *);
uint64_t metaslab_class_fragmentation(metaslab_class_t *);
uint64_t metaslab_class_expandable_space(metaslab_class_t *);
boolean_t metaslab_class_throttle_reserve(metaslab_class_t *, int,
zio_t *, int);
void metaslab_class_throttle_unreserve(metaslab_class_t *, int, zio_t *);
void metaslab_class_space_update(metaslab_class_t *, int64_t, int64_t,
int64_t, int64_t);
uint64_t metaslab_class_get_alloc(metaslab_class_t *);
uint64_t metaslab_class_get_space(metaslab_class_t *);
uint64_t metaslab_class_get_dspace(metaslab_class_t *);
uint64_t metaslab_class_get_deferred(metaslab_class_t *);
metaslab_group_t *metaslab_group_create(metaslab_class_t *, vdev_t *);
void metaslab_group_destroy(metaslab_group_t *);
void metaslab_group_activate(metaslab_group_t *);
void metaslab_group_passivate(metaslab_group_t *);
boolean_t metaslab_group_initialized(metaslab_group_t *);
uint64_t metaslab_group_get_space(metaslab_group_t *);
void metaslab_group_histogram_verify(metaslab_group_t *);
uint64_t metaslab_group_fragmentation(metaslab_group_t *);
void metaslab_group_histogram_remove(metaslab_group_t *, metaslab_t *);
void metaslab_group_alloc_decrement(spa_t *, uint64_t, void *, int);
void metaslab_group_alloc_verify(spa_t *, const blkptr_t *, void *);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_METASLAB_H */
diff --git a/include/sys/metaslab_impl.h b/include/sys/metaslab_impl.h
index f8a713a4f1ff..76f670a4d43f 100644
--- a/include/sys/metaslab_impl.h
+++ b/include/sys/metaslab_impl.h
@@ -1,375 +1,375 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
*/
#ifndef _SYS_METASLAB_IMPL_H
#define _SYS_METASLAB_IMPL_H
#include <sys/metaslab.h>
#include <sys/space_map.h>
#include <sys/range_tree.h>
#include <sys/vdev.h>
#include <sys/txg.h>
#include <sys/avl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Metaslab allocation tracing record.
*/
typedef struct metaslab_alloc_trace {
list_node_t mat_list_node;
metaslab_group_t *mat_mg;
metaslab_t *mat_msp;
uint64_t mat_size;
uint64_t mat_weight;
uint32_t mat_dva_id;
uint64_t mat_offset;
} metaslab_alloc_trace_t;
/*
* Used by the metaslab allocation tracing facility to indicate
* error conditions. These errors are stored to the offset member
* of the metaslab_alloc_trace_t record and displayed by mdb.
*/
typedef enum trace_alloc_type {
TRACE_ALLOC_FAILURE = -1ULL,
TRACE_TOO_SMALL = -2ULL,
TRACE_FORCE_GANG = -3ULL,
TRACE_NOT_ALLOCATABLE = -4ULL,
TRACE_GROUP_FAILURE = -5ULL,
TRACE_ENOSPC = -6ULL,
TRACE_CONDENSING = -7ULL,
TRACE_VDEV_ERROR = -8ULL
} trace_alloc_type_t;
#define METASLAB_WEIGHT_PRIMARY (1ULL << 63)
#define METASLAB_WEIGHT_SECONDARY (1ULL << 62)
#define METASLAB_WEIGHT_TYPE (1ULL << 61)
#define METASLAB_ACTIVE_MASK \
(METASLAB_WEIGHT_PRIMARY | METASLAB_WEIGHT_SECONDARY)
/*
* The metaslab weight is used to encode the amount of free space in a
* metaslab, such that the "best" metaslab appears first when sorting the
* metaslabs by weight. The weight (and therefore the "best" metaslab) can
* be determined in two different ways: by computing a weighted sum of all
* the free space in the metaslab (a space based weight) or by counting only
* the free segments of the largest size (a segment based weight). We prefer
* the segment based weight because it reflects how the free space is
* comprised, but we cannot always use it -- legacy pools do not have the
* space map histogram information necessary to determine the largest
* contiguous regions. Pools that have the space map histogram determine
* the segment weight by looking at each bucket in the histogram and
* determining the free space whose size in bytes is in the range:
* [2^i, 2^(i+1))
* We then encode the largest index, i, that contains regions into the
* segment-weighted value.
*
* Space-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PS1| weighted-free space |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* space - the fragmentation-weighted space
*
* Segment-based weight:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* |PS0| idx| count of segments in region |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* PS - indicates primary and secondary activation
* idx - index for the highest bucket in the histogram
* count - number of segments in the specified bucket
*/
#define WEIGHT_GET_ACTIVE(weight) BF64_GET((weight), 62, 2)
#define WEIGHT_SET_ACTIVE(weight, x) BF64_SET((weight), 62, 2, x)
#define WEIGHT_IS_SPACEBASED(weight) \
((weight) == 0 || BF64_GET((weight), 61, 1))
#define WEIGHT_SET_SPACEBASED(weight) BF64_SET((weight), 61, 1, 1)
/*
* These macros are only applicable to segment-based weighting.
*/
#define WEIGHT_GET_INDEX(weight) BF64_GET((weight), 55, 6)
#define WEIGHT_SET_INDEX(weight, x) BF64_SET((weight), 55, 6, x)
#define WEIGHT_GET_COUNT(weight) BF64_GET((weight), 0, 55)
#define WEIGHT_SET_COUNT(weight, x) BF64_SET((weight), 0, 55, x)
/*
* A metaslab class encompasses a category of allocatable top-level vdevs.
* Each top-level vdev is associated with a metaslab group which defines
* the allocatable region for that vdev. Examples of these categories include
* "normal" for data block allocations (i.e. main pool allocations) or "log"
* for allocations designated for intent log devices (i.e. slog devices).
* When a block allocation is requested from the SPA it is associated with a
* metaslab_class_t, and only top-level vdevs (i.e. metaslab groups) belonging
* to the class can be used to satisfy that request. Allocations are done
* by traversing the metaslab groups that are linked off of the mc_rotor field.
* This rotor points to the next metaslab group where allocations will be
* attempted. Allocating a block is a 3 step process -- select the metaslab
* group, select the metaslab, and then allocate the block. The metaslab
* class defines the low-level block allocator that will be used as the
* final step in allocation. These allocators are pluggable allowing each class
* to use a block allocator that best suits that class.
*/
struct metaslab_class {
kmutex_t mc_lock;
spa_t *mc_spa;
metaslab_group_t *mc_rotor;
metaslab_ops_t *mc_ops;
uint64_t mc_aliquot;
/*
* Track the number of metaslab groups that have been initialized
* and can accept allocations. An initialized metaslab group is
* one has been completely added to the config (i.e. we have
* updated the MOS config and the space has been added to the pool).
*/
uint64_t mc_groups;
/*
* Toggle to enable/disable the allocation throttle.
*/
boolean_t mc_alloc_throttle_enabled;
/*
* The allocation throttle works on a reservation system. Whenever
* an asynchronous zio wants to perform an allocation it must
* first reserve the number of blocks that it wants to allocate.
* If there aren't sufficient slots available for the pending zio
* then that I/O is throttled until more slots free up. The current
* number of reserved allocations is maintained by the mc_alloc_slots
* refcount. The mc_alloc_max_slots value determines the maximum
* number of allocations that the system allows. Gang blocks are
* allowed to reserve slots even if we've reached the maximum
* number of allocations allowed.
*/
uint64_t mc_alloc_max_slots;
refcount_t mc_alloc_slots;
uint64_t mc_alloc_groups; /* # of allocatable groups */
uint64_t mc_alloc; /* total allocated space */
uint64_t mc_deferred; /* total deferred frees */
uint64_t mc_space; /* total space (alloc + free) */
uint64_t mc_dspace; /* total deflated space */
uint64_t mc_histogram[RANGE_TREE_HISTOGRAM_SIZE];
};
/*
* Metaslab groups encapsulate all the allocatable regions (i.e. metaslabs)
* of a top-level vdev. They are linked together to form a circular linked
* list and can belong to only one metaslab class. Metaslab groups may become
* ineligible for allocations for a number of reasons such as limited free
* space, fragmentation, or going offline. When this happens the allocator will
* simply find the next metaslab group in the linked list and attempt
* to allocate from that group instead.
*/
struct metaslab_group {
kmutex_t mg_lock;
avl_tree_t mg_metaslab_tree;
uint64_t mg_aliquot;
boolean_t mg_allocatable; /* can we allocate? */
/*
* A metaslab group is considered to be initialized only after
* we have updated the MOS config and added the space to the pool.
* We only allow allocation attempts to a metaslab group if it
* has been initialized.
*/
boolean_t mg_initialized;
uint64_t mg_free_capacity; /* percentage free */
int64_t mg_bias;
int64_t mg_activation_count;
metaslab_class_t *mg_class;
vdev_t *mg_vd;
taskq_t *mg_taskq;
metaslab_group_t *mg_prev;
metaslab_group_t *mg_next;
/*
* Each metaslab group can handle mg_max_alloc_queue_depth allocations
* which are tracked by mg_alloc_queue_depth. It's possible for a
* metaslab group to handle more allocations than its max. This
* can occur when gang blocks are required or when other groups
* are unable to handle their share of allocations.
*/
uint64_t mg_max_alloc_queue_depth;
refcount_t mg_alloc_queue_depth;
/*
* A metalab group that can no longer allocate the minimum block
* size will set mg_no_free_space. Once a metaslab group is out
* of space then its share of work must be distributed to other
* groups.
*/
boolean_t mg_no_free_space;
uint64_t mg_allocations;
uint64_t mg_failed_allocations;
uint64_t mg_fragmentation;
uint64_t mg_histogram[RANGE_TREE_HISTOGRAM_SIZE];
};
/*
* This value defines the number of elements in the ms_lbas array. The value
* of 64 was chosen as it covers all power of 2 buckets up to UINT64_MAX.
* This is the equivalent of highbit(UINT64_MAX).
*/
#define MAX_LBAS 64
/*
* Each metaslab maintains a set of in-core trees to track metaslab
* operations. The in-core free tree (ms_tree) contains the list of
* free segments which are eligible for allocation. As blocks are
- * allocated, the allocated segments are removed from the ms_tree and
- * added to a per txg allocation tree (ms_alloctree). This allows us to
- * process all allocations in syncing context where it is safe to update
- * the on-disk space maps. Frees are also processed in syncing context.
- * Most frees are generated from syncing context, and those that are not
- * are held in the spa_free_bplist for processing in syncing context.
- * An additional set of in-core trees is maintained to track deferred
- * frees (ms_defertree). Once a block is freed it will move from the
+ * allocated, the allocated segment are removed from the ms_tree and
+ * added to a per txg allocation tree (ms_alloctree). As blocks are
+ * freed, they are added to the free tree (ms_freeingtree). These trees
+ * allow us to process all allocations and frees in syncing context
+ * where it is safe to update the on-disk space maps. An additional set
+ * of in-core trees is maintained to track deferred frees
+ * (ms_defertree). Once a block is freed it will move from the
* ms_freedtree to the ms_defertree. A deferred free means that a block
* has been freed but cannot be used by the pool until TXG_DEFER_SIZE
* transactions groups later. For example, a block that is freed in txg
* 50 will not be available for reallocation until txg 52 (50 +
* TXG_DEFER_SIZE). This provides a safety net for uberblock rollback.
* A pool could be safely rolled back TXG_DEFERS_SIZE transactions
* groups and ensure that no block has been reallocated.
*
* The simplified transition diagram looks like this:
*
*
* ALLOCATE
* |
* V
* free segment (ms_tree) -----> ms_alloctree[4] ----> (write to space map)
* ^
* | ms_freeingtree <--- FREE
* | |
* | v
* | ms_freedtree
* | |
* +-------- ms_defertree[2] <-------+---------> (write to space map)
*
*
* Each metaslab's space is tracked in a single space map in the MOS,
* which is only updated in syncing context. Each time we sync a txg,
* we append the allocs and frees from that txg to the space map. The
* pool space is only updated once all metaslabs have finished syncing.
*
* To load the in-core free tree we read the space map from disk. This
* object contains a series of alloc and free records that are combined
* to make up the list of all free segments in this metaslab. These
* segments are represented in-core by the ms_tree and are stored in an
* AVL tree.
*
* As the space map grows (as a result of the appends) it will
* eventually become space-inefficient. When the metaslab's in-core
* free tree is zfs_condense_pct/100 times the size of the minimal
* on-disk representation, we rewrite it in its minimized form. If a
* metaslab needs to condense then we must set the ms_condensing flag to
* ensure that allocations are not performed on the metaslab that is
* being written.
*/
struct metaslab {
kmutex_t ms_lock;
+ kmutex_t ms_sync_lock;
kcondvar_t ms_load_cv;
space_map_t *ms_sm;
uint64_t ms_id;
uint64_t ms_start;
uint64_t ms_size;
uint64_t ms_fragmentation;
range_tree_t *ms_alloctree[TXG_SIZE];
range_tree_t *ms_tree;
/*
* The following range trees are accessed only from syncing context.
* ms_free*tree only have entries while syncing, and are empty
* between syncs.
*/
range_tree_t *ms_freeingtree; /* to free this syncing txg */
range_tree_t *ms_freedtree; /* already freed this syncing txg */
range_tree_t *ms_defertree[TXG_DEFER_SIZE];
boolean_t ms_condensing; /* condensing? */
boolean_t ms_condense_wanted;
/*
* We must hold both ms_lock and ms_group->mg_lock in order to
* modify ms_loaded.
*/
boolean_t ms_loaded;
boolean_t ms_loading;
int64_t ms_deferspace; /* sum of ms_defermap[] space */
uint64_t ms_weight; /* weight vs. others in group */
uint64_t ms_activation_weight; /* activation weight */
/*
* Track of whenever a metaslab is selected for loading or allocation.
* We use this value to determine how long the metaslab should
* stay cached.
*/
uint64_t ms_selected_txg;
uint64_t ms_alloc_txg; /* last successful alloc (debug only) */
uint64_t ms_max_size; /* maximum allocatable size */
/*
* The metaslab block allocators can optionally use a size-ordered
* range tree and/or an array of LBAs. Not all allocators use
* this functionality. The ms_size_tree should always contain the
* same number of segments as the ms_tree. The only difference
* is that the ms_size_tree is ordered by segment sizes.
*/
avl_tree_t ms_size_tree;
uint64_t ms_lbas[MAX_LBAS];
metaslab_group_t *ms_group; /* metaslab group */
avl_node_t ms_group_node; /* node in metaslab group tree */
txg_node_t ms_txg_node; /* per-txg dirty metaslab links */
};
#ifdef __cplusplus
}
#endif
#endif /* _SYS_METASLAB_IMPL_H */
diff --git a/include/sys/range_tree.h b/include/sys/range_tree.h
index 1d3bdf9e5fe0..970505628b60 100644
--- a/include/sys/range_tree.h
+++ b/include/sys/range_tree.h
@@ -1,120 +1,121 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
- * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_RANGE_TREE_H
#define _SYS_RANGE_TREE_H
#include <sys/avl.h>
#include <sys/dmu.h>
#ifdef __cplusplus
extern "C" {
#endif
#define RANGE_TREE_HISTOGRAM_SIZE 64
typedef struct range_tree_ops range_tree_ops_t;
+/*
+ * Note: the range_tree may not be accessed concurrently; consumers
+ * must provide external locking if required.
+ */
typedef struct range_tree {
avl_tree_t rt_root; /* offset-ordered segment AVL tree */
uint64_t rt_space; /* sum of all segments in the map */
uint64_t rt_gap; /* allowable inter-segment gap */
range_tree_ops_t *rt_ops;
/* rt_avl_compare should only be set if rt_arg is an AVL tree */
void *rt_arg;
int (*rt_avl_compare)(const void *, const void *);
/*
* The rt_histogram maintains a histogram of ranges. Each bucket,
* rt_histogram[i], contains the number of ranges whose size is:
* 2^i <= size of range in bytes < 2^(i+1)
*/
uint64_t rt_histogram[RANGE_TREE_HISTOGRAM_SIZE];
- kmutex_t *rt_lock; /* pointer to lock that protects map */
} range_tree_t;
typedef struct range_seg {
avl_node_t rs_node; /* AVL node */
avl_node_t rs_pp_node; /* AVL picker-private node */
uint64_t rs_start; /* starting offset of this segment */
uint64_t rs_end; /* ending offset (non-inclusive) */
uint64_t rs_fill; /* actual fill if gap mode is on */
} range_seg_t;
struct range_tree_ops {
void (*rtop_create)(range_tree_t *rt, void *arg);
void (*rtop_destroy)(range_tree_t *rt, void *arg);
void (*rtop_add)(range_tree_t *rt, range_seg_t *rs, void *arg);
void (*rtop_remove)(range_tree_t *rt, range_seg_t *rs, void *arg);
void (*rtop_vacate)(range_tree_t *rt, void *arg);
};
typedef void range_tree_func_t(void *arg, uint64_t start, uint64_t size);
void range_tree_init(void);
void range_tree_fini(void);
range_tree_t *range_tree_create_impl(range_tree_ops_t *ops, void *arg,
- int (*avl_compare) (const void *, const void *), kmutex_t *lp,
- uint64_t gap);
-range_tree_t *range_tree_create(range_tree_ops_t *ops, void *arg, kmutex_t *lp);
+ int (*avl_compare) (const void *, const void *), uint64_t gap);
+range_tree_t *range_tree_create(range_tree_ops_t *ops, void *arg);
void range_tree_destroy(range_tree_t *rt);
boolean_t range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size);
range_seg_t *range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size);
void range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
uint64_t newstart, uint64_t newsize);
uint64_t range_tree_space(range_tree_t *rt);
void range_tree_verify(range_tree_t *rt, uint64_t start, uint64_t size);
void range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst);
void range_tree_stat_verify(range_tree_t *rt);
-void range_tree_set_lock(range_tree_t *rt, kmutex_t *lp);
void range_tree_add(void *arg, uint64_t start, uint64_t size);
void range_tree_remove(void *arg, uint64_t start, uint64_t size);
void range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size);
void range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta);
void range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size);
void range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg);
void range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg);
range_seg_t *range_tree_first(range_tree_t *rt);
void rt_avl_create(range_tree_t *rt, void *arg);
void rt_avl_destroy(range_tree_t *rt, void *arg);
void rt_avl_add(range_tree_t *rt, range_seg_t *rs, void *arg);
void rt_avl_remove(range_tree_t *rt, range_seg_t *rs, void *arg);
void rt_avl_vacate(range_tree_t *rt, void *arg);
extern struct range_tree_ops rt_avl_ops;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_RANGE_TREE_H */
diff --git a/include/sys/spa.h b/include/sys/spa.h
index f93354c7881a..eb47c21f6b44 100644
--- a/include/sys/spa.h
+++ b/include/sys/spa.h
@@ -1,1105 +1,1114 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2017 Joyent, Inc.
* Copyright (c) 2017 Datto Inc.
*/
#ifndef _SYS_SPA_H
#define _SYS_SPA_H
#include <sys/avl.h>
#include <sys/zfs_context.h>
#include <sys/kstat.h>
#include <sys/nvpair.h>
#include <sys/sysmacros.h>
#include <sys/types.h>
#include <sys/fs/zfs.h>
#include <sys/spa_checksum.h>
#include <sys/dmu.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Forward references that lots of things need.
*/
typedef struct spa spa_t;
typedef struct vdev vdev_t;
typedef struct metaslab metaslab_t;
typedef struct metaslab_group metaslab_group_t;
typedef struct metaslab_class metaslab_class_t;
typedef struct zio zio_t;
typedef struct zilog zilog_t;
typedef struct spa_aux_vdev spa_aux_vdev_t;
typedef struct ddt ddt_t;
typedef struct ddt_entry ddt_entry_t;
typedef struct zbookmark_phys zbookmark_phys_t;
struct dsl_pool;
struct dsl_dataset;
struct dsl_crypto_params;
/*
* General-purpose 32-bit and 64-bit bitfield encodings.
*/
#define BF32_DECODE(x, low, len) P2PHASE((x) >> (low), 1U << (len))
#define BF64_DECODE(x, low, len) P2PHASE((x) >> (low), 1ULL << (len))
#define BF32_ENCODE(x, low, len) (P2PHASE((x), 1U << (len)) << (low))
#define BF64_ENCODE(x, low, len) (P2PHASE((x), 1ULL << (len)) << (low))
#define BF32_GET(x, low, len) BF32_DECODE(x, low, len)
#define BF64_GET(x, low, len) BF64_DECODE(x, low, len)
#define BF32_SET(x, low, len, val) do { \
ASSERT3U(val, <, 1U << (len)); \
ASSERT3U(low + len, <=, 32); \
(x) ^= BF32_ENCODE((x >> low) ^ (val), low, len); \
_NOTE(CONSTCOND) } while (0)
#define BF64_SET(x, low, len, val) do { \
ASSERT3U(val, <, 1ULL << (len)); \
ASSERT3U(low + len, <=, 64); \
((x) ^= BF64_ENCODE((x >> low) ^ (val), low, len)); \
_NOTE(CONSTCOND) } while (0)
#define BF32_GET_SB(x, low, len, shift, bias) \
((BF32_GET(x, low, len) + (bias)) << (shift))
#define BF64_GET_SB(x, low, len, shift, bias) \
((BF64_GET(x, low, len) + (bias)) << (shift))
#define BF32_SET_SB(x, low, len, shift, bias, val) do { \
ASSERT(IS_P2ALIGNED(val, 1U << shift)); \
ASSERT3S((val) >> (shift), >=, bias); \
BF32_SET(x, low, len, ((val) >> (shift)) - (bias)); \
_NOTE(CONSTCOND) } while (0)
#define BF64_SET_SB(x, low, len, shift, bias, val) do { \
ASSERT(IS_P2ALIGNED(val, 1ULL << shift)); \
ASSERT3S((val) >> (shift), >=, bias); \
BF64_SET(x, low, len, ((val) >> (shift)) - (bias)); \
_NOTE(CONSTCOND) } while (0)
/*
* We currently support block sizes from 512 bytes to 16MB.
* The benefits of larger blocks, and thus larger IO, need to be weighed
* against the cost of COWing a giant block to modify one byte, and the
* large latency of reading or writing a large block.
*
* Note that although blocks up to 16MB are supported, the recordsize
* property can not be set larger than zfs_max_recordsize (default 1MB).
* See the comment near zfs_max_recordsize in dsl_dataset.c for details.
*
* Note that although the LSIZE field of the blkptr_t can store sizes up
* to 32MB, the dnode's dn_datablkszsec can only store sizes up to
* 32MB - 512 bytes. Therefore, we limit SPA_MAXBLOCKSIZE to 16MB.
*/
#define SPA_MINBLOCKSHIFT 9
#define SPA_OLD_MAXBLOCKSHIFT 17
#define SPA_MAXBLOCKSHIFT 24
#define SPA_MINBLOCKSIZE (1ULL << SPA_MINBLOCKSHIFT)
#define SPA_OLD_MAXBLOCKSIZE (1ULL << SPA_OLD_MAXBLOCKSHIFT)
#define SPA_MAXBLOCKSIZE (1ULL << SPA_MAXBLOCKSHIFT)
/*
* Alignment Shift (ashift) is an immutable, internal top-level vdev property
* which can only be set at vdev creation time. Physical writes are always done
* according to it, which makes 2^ashift the smallest possible IO on a vdev.
*
* We currently allow values ranging from 512 bytes (2^9 = 512) to 64 KiB
* (2^16 = 65,536).
*/
#define ASHIFT_MIN 9
#define ASHIFT_MAX 16
/*
* Size of block to hold the configuration data (a packed nvlist)
*/
#define SPA_CONFIG_BLOCKSIZE (1ULL << 14)
/*
* The DVA size encodings for LSIZE and PSIZE support blocks up to 32MB.
* The ASIZE encoding should be at least 64 times larger (6 more bits)
* to support up to 4-way RAID-Z mirror mode with worst-case gang block
* overhead, three DVAs per bp, plus one more bit in case we do anything
* else that expands the ASIZE.
*/
#define SPA_LSIZEBITS 16 /* LSIZE up to 32M (2^16 * 512) */
#define SPA_PSIZEBITS 16 /* PSIZE up to 32M (2^16 * 512) */
#define SPA_ASIZEBITS 24 /* ASIZE up to 64 times larger */
#define SPA_COMPRESSBITS 7
/*
* All SPA data is represented by 128-bit data virtual addresses (DVAs).
* The members of the dva_t should be considered opaque outside the SPA.
*/
typedef struct dva {
uint64_t dva_word[2];
} dva_t;
/*
* Some checksums/hashes need a 256-bit initialization salt. This salt is kept
* secret and is suitable for use in MAC algorithms as the key.
*/
typedef struct zio_cksum_salt {
uint8_t zcs_bytes[32];
} zio_cksum_salt_t;
/*
* Each block is described by its DVAs, time of birth, checksum, etc.
* The word-by-word, bit-by-bit layout of the blkptr is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | vdev3 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 |G| offset3 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | checksum[2] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | checksum[3] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* vdev virtual device ID
* offset offset into virtual device
* LSIZE logical size
* PSIZE physical size (after compression)
* ASIZE allocated size (including RAID-Z parity and gang block headers)
* GRID RAID-Z layout information (reserved for future use)
* cksum checksum function
* comp compression function
* G gang block indicator
* B byteorder (endianness)
* D dedup
* X encryption
* E blkptr_t contains embedded data (see below)
* lvl level of indirection
* type DMU object type
- * phys birth txg of block allocation; zero if same as logical birth txg
+ * phys birth txg when dva[0] was written; zero if same as logical birth txg
+ * note that typically all the dva's would be written in this
+ * txg, but they could be different if they were moved by
+ * device removal.
* log. birth transaction group in which the block was logically born
* fill count number of non-zero blocks under this bp
* checksum[4] 256-bit checksum of the data this bp describes
*/
/*
* The blkptr_t's of encrypted blocks also need to store the encryption
* parameters so that the block can be decrypted. This layout is as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | vdev1 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 1 |G| offset1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 2 | vdev2 | GRID | ASIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 3 |G| offset2 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 4 | salt |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 5 | IV1 |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | cksum |E| comp| PSIZE | LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 8 | padding |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 9 | physical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | IV2 | fill count |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* c | checksum[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* d | checksum[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* e | MAC[0] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* f | MAC[1] |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* salt Salt for generating encryption keys
* IV1 First 64 bits of encryption IV
* X Block requires encryption handling (set to 1)
* E blkptr_t contains embedded data (set to 0, see below)
* fill count number of non-zero blocks under this bp (truncated to 32 bits)
* IV2 Last 32 bits of encryption IV
* checksum[2] 128-bit checksum of the data this bp describes
* MAC[2] 128-bit message authentication code for this data
*
* The X bit being set indicates that this block is one of 3 types. If this is
* a level 0 block with an encrypted object type, the block is encrypted
* (see BP_IS_ENCRYPTED()). If this is a level 0 block with an unencrypted
* object type, this block is authenticated with an HMAC (see
* BP_IS_AUTHENTICATED()). Otherwise (if level > 0), this bp will use the MAC
* words to store a checksum-of-MACs from the level below (see
* BP_HAS_INDIRECT_MAC_CKSUM()). For convenience in the code, BP_IS_PROTECTED()
* refers to both encrypted and authenticated blocks and BP_USES_CRYPT()
* refers to any of these 3 kinds of blocks.
*
* The additional encryption parameters are the salt, IV, and MAC which are
* explained in greater detail in the block comment at the top of zio_crypt.c.
* The MAC occupies half of the checksum space since it serves a very similar
* purpose: to prevent data corruption on disk. The only functional difference
* is that the checksum is used to detect on-disk corruption whether or not the
* encryption key is loaded and the MAC provides additional protection against
* malicious disk tampering. We use the 3rd DVA to store the salt and first
* 64 bits of the IV. As a result encrypted blocks can only have 2 copies
* maximum instead of the normal 3. The last 32 bits of the IV are stored in
* the upper bits of what is usually the fill count. Note that only blocks at
* level 0 or -2 are ever encrypted, which allows us to guarantee that these
* 32 bits are not trampled over by other code (see zio_crypt.c for details).
* The salt and IV are not used for authenticated bps or bps with an indirect
* MAC checksum, so these blocks can utilize all 3 DVAs and the full 64 bits
* for the fill count.
*/
/*
* "Embedded" blkptr_t's don't actually point to a block, instead they
* have a data payload embedded in the blkptr_t itself. See the comment
* in blkptr.c for more details.
*
* The blkptr_t is laid out as follows:
*
* 64 56 48 40 32 24 16 8 0
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 0 | payload |
* 1 | payload |
* 2 | payload |
* 3 | payload |
* 4 | payload |
* 5 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 6 |BDX|lvl| type | etype |E| comp| PSIZE| LSIZE |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* 7 | payload |
* 8 | payload |
* 9 | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* a | logical birth txg |
* +-------+-------+-------+-------+-------+-------+-------+-------+
* b | payload |
* c | payload |
* d | payload |
* e | payload |
* f | payload |
* +-------+-------+-------+-------+-------+-------+-------+-------+
*
* Legend:
*
* payload contains the embedded data
* B (byteorder) byteorder (endianness)
* D (dedup) padding (set to zero)
* X encryption (set to zero)
* E (embedded) set to one
* lvl indirection level
* type DMU object type
* etype how to interpret embedded data (BP_EMBEDDED_TYPE_*)
* comp compression function of payload
* PSIZE size of payload after compression, in bytes
* LSIZE logical size of payload, in bytes
* note that 25 bits is enough to store the largest
* "normal" BP's LSIZE (2^16 * 2^9) in bytes
* log. birth transaction group in which the block was logically born
*
* Note that LSIZE and PSIZE are stored in bytes, whereas for non-embedded
* bp's they are stored in units of SPA_MINBLOCKSHIFT.
* Generally, the generic BP_GET_*() macros can be used on embedded BP's.
* The B, D, X, lvl, type, and comp fields are stored the same as with normal
* BP's so the BP_SET_* macros can be used with them. etype, PSIZE, LSIZE must
* be set with the BPE_SET_* macros. BP_SET_EMBEDDED() should be called before
* other macros, as they assert that they are only used on BP's of the correct
* "embedded-ness". Encrypted blkptr_t's cannot be embedded because they use
* the payload space for encryption parameters (see the comment above on
* how encryption parameters are stored).
*/
#define BPE_GET_ETYPE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET((bp)->blk_prop, 40, 8))
#define BPE_SET_ETYPE(bp, t) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, t); \
_NOTE(CONSTCOND) } while (0)
#define BPE_GET_LSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 0, 25, 0, 1))
#define BPE_SET_LSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 0, 25, 0, 1, x); \
_NOTE(CONSTCOND) } while (0)
#define BPE_GET_PSIZE(bp) \
(ASSERT(BP_IS_EMBEDDED(bp)), \
BF64_GET_SB((bp)->blk_prop, 25, 7, 0, 1))
#define BPE_SET_PSIZE(bp, x) do { \
ASSERT(BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, 25, 7, 0, 1, x); \
_NOTE(CONSTCOND) } while (0)
typedef enum bp_embedded_type {
BP_EMBEDDED_TYPE_DATA,
BP_EMBEDDED_TYPE_RESERVED, /* Reserved for an unintegrated feature. */
NUM_BP_EMBEDDED_TYPES = BP_EMBEDDED_TYPE_RESERVED
} bp_embedded_type_t;
#define BPE_NUM_WORDS 14
#define BPE_PAYLOAD_SIZE (BPE_NUM_WORDS * sizeof (uint64_t))
#define BPE_IS_PAYLOADWORD(bp, wp) \
((wp) != &(bp)->blk_prop && (wp) != &(bp)->blk_birth)
#define SPA_BLKPTRSHIFT 7 /* blkptr_t is 128 bytes */
#define SPA_DVAS_PER_BP 3 /* Number of DVAs in a bp */
/*
* A block is a hole when it has either 1) never been written to, or
* 2) is zero-filled. In both cases, ZFS can return all zeroes for all reads
* without physically allocating disk space. Holes are represented in the
* blkptr_t structure by zeroed blk_dva. Correct checking for holes is
* done through the BP_IS_HOLE macro. For holes, the logical size, level,
* DMU object type, and birth times are all also stored for holes that
* were written to at some point (i.e. were punched after having been filled).
*/
typedef struct blkptr {
dva_t blk_dva[SPA_DVAS_PER_BP]; /* Data Virtual Addresses */
uint64_t blk_prop; /* size, compression, type, etc */
uint64_t blk_pad[2]; /* Extra space for the future */
uint64_t blk_phys_birth; /* txg when block was allocated */
uint64_t blk_birth; /* transaction group at birth */
uint64_t blk_fill; /* fill count */
zio_cksum_t blk_cksum; /* 256-bit checksum */
} blkptr_t;
/*
* Macros to get and set fields in a bp or DVA.
*/
#define DVA_GET_ASIZE(dva) \
BF64_GET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_ASIZE(dva, x) \
BF64_SET_SB((dva)->dva_word[0], 0, SPA_ASIZEBITS, \
SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GRID(dva) BF64_GET((dva)->dva_word[0], 24, 8)
#define DVA_SET_GRID(dva, x) BF64_SET((dva)->dva_word[0], 24, 8, x)
#define DVA_GET_VDEV(dva) BF64_GET((dva)->dva_word[0], 32, 32)
#define DVA_SET_VDEV(dva, x) BF64_SET((dva)->dva_word[0], 32, 32, x)
#define DVA_GET_OFFSET(dva) \
BF64_GET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0)
#define DVA_SET_OFFSET(dva, x) \
BF64_SET_SB((dva)->dva_word[1], 0, 63, SPA_MINBLOCKSHIFT, 0, x)
#define DVA_GET_GANG(dva) BF64_GET((dva)->dva_word[1], 63, 1)
#define DVA_SET_GANG(dva, x) BF64_SET((dva)->dva_word[1], 63, 1, x)
#define BP_GET_LSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? \
(BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA ? BPE_GET_LSIZE(bp) : 0): \
BF64_GET_SB((bp)->blk_prop, 0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_LSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
0, SPA_LSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
_NOTE(CONSTCOND) } while (0)
#define BP_GET_PSIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
BF64_GET_SB((bp)->blk_prop, 16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1))
#define BP_SET_PSIZE(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET_SB((bp)->blk_prop, \
16, SPA_PSIZEBITS, SPA_MINBLOCKSHIFT, 1, x); \
_NOTE(CONSTCOND) } while (0)
#define BP_GET_COMPRESS(bp) \
BF64_GET((bp)->blk_prop, 32, SPA_COMPRESSBITS)
#define BP_SET_COMPRESS(bp, x) \
BF64_SET((bp)->blk_prop, 32, SPA_COMPRESSBITS, x)
#define BP_IS_EMBEDDED(bp) BF64_GET((bp)->blk_prop, 39, 1)
#define BP_SET_EMBEDDED(bp, x) BF64_SET((bp)->blk_prop, 39, 1, x)
#define BP_GET_CHECKSUM(bp) \
(BP_IS_EMBEDDED(bp) ? ZIO_CHECKSUM_OFF : \
BF64_GET((bp)->blk_prop, 40, 8))
#define BP_SET_CHECKSUM(bp, x) do { \
ASSERT(!BP_IS_EMBEDDED(bp)); \
BF64_SET((bp)->blk_prop, 40, 8, x); \
_NOTE(CONSTCOND) } while (0)
#define BP_GET_TYPE(bp) BF64_GET((bp)->blk_prop, 48, 8)
#define BP_SET_TYPE(bp, x) BF64_SET((bp)->blk_prop, 48, 8, x)
#define BP_GET_LEVEL(bp) BF64_GET((bp)->blk_prop, 56, 5)
#define BP_SET_LEVEL(bp, x) BF64_SET((bp)->blk_prop, 56, 5, x)
/* encrypted, authenticated, and MAC cksum bps use the same bit */
#define BP_USES_CRYPT(bp) BF64_GET((bp)->blk_prop, 61, 1)
#define BP_SET_CRYPT(bp, x) BF64_SET((bp)->blk_prop, 61, 1, x)
#define BP_IS_ENCRYPTED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_IS_AUTHENTICATED(bp) \
(BP_USES_CRYPT(bp) && \
BP_GET_LEVEL(bp) <= 0 && \
!DMU_OT_IS_ENCRYPTED(BP_GET_TYPE(bp)))
#define BP_HAS_INDIRECT_MAC_CKSUM(bp) \
(BP_USES_CRYPT(bp) && BP_GET_LEVEL(bp) > 0)
#define BP_IS_PROTECTED(bp) \
(BP_IS_ENCRYPTED(bp) || BP_IS_AUTHENTICATED(bp))
#define BP_GET_DEDUP(bp) BF64_GET((bp)->blk_prop, 62, 1)
#define BP_SET_DEDUP(bp, x) BF64_SET((bp)->blk_prop, 62, 1, x)
#define BP_GET_BYTEORDER(bp) BF64_GET((bp)->blk_prop, 63, 1)
#define BP_SET_BYTEORDER(bp, x) BF64_SET((bp)->blk_prop, 63, 1, x)
#define BP_PHYSICAL_BIRTH(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(bp)->blk_phys_birth ? (bp)->blk_phys_birth : (bp)->blk_birth)
#define BP_SET_BIRTH(bp, logical, physical) \
{ \
ASSERT(!BP_IS_EMBEDDED(bp)); \
(bp)->blk_birth = (logical); \
(bp)->blk_phys_birth = ((logical) == (physical) ? 0 : (physical)); \
}
#define BP_GET_FILL(bp) \
((BP_IS_ENCRYPTED(bp)) ? BF64_GET((bp)->blk_fill, 0, 32) : \
((BP_IS_EMBEDDED(bp)) ? 1 : (bp)->blk_fill))
#define BP_SET_FILL(bp, fill) \
{ \
if (BP_IS_ENCRYPTED(bp)) \
BF64_SET((bp)->blk_fill, 0, 32, fill); \
else \
(bp)->blk_fill = fill; \
}
#define BP_GET_IV2(bp) \
(ASSERT(BP_IS_ENCRYPTED(bp)), \
BF64_GET((bp)->blk_fill, 32, 32))
#define BP_SET_IV2(bp, iv2) \
{ \
ASSERT(BP_IS_ENCRYPTED(bp)); \
BF64_SET((bp)->blk_fill, 32, 32, iv2); \
}
#define BP_IS_METADATA(bp) \
(BP_GET_LEVEL(bp) > 0 || DMU_OT_IS_METADATA(BP_GET_TYPE(bp)))
#define BP_GET_ASIZE(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_GET_UCSIZE(bp) \
(BP_IS_METADATA(bp) ? BP_GET_PSIZE(bp) : BP_GET_LSIZE(bp))
#define BP_GET_NDVAS(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
!!DVA_GET_ASIZE(&(bp)->blk_dva[0]) + \
!!DVA_GET_ASIZE(&(bp)->blk_dva[1]) + \
(!!DVA_GET_ASIZE(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp)))
#define BP_COUNT_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? 0 : \
(DVA_GET_GANG(&(bp)->blk_dva[0]) + \
DVA_GET_GANG(&(bp)->blk_dva[1]) + \
(DVA_GET_GANG(&(bp)->blk_dva[2]) * !BP_IS_ENCRYPTED(bp))))
#define DVA_EQUAL(dva1, dva2) \
((dva1)->dva_word[1] == (dva2)->dva_word[1] && \
(dva1)->dva_word[0] == (dva2)->dva_word[0])
#define BP_EQUAL(bp1, bp2) \
(BP_PHYSICAL_BIRTH(bp1) == BP_PHYSICAL_BIRTH(bp2) && \
(bp1)->blk_birth == (bp2)->blk_birth && \
DVA_EQUAL(&(bp1)->blk_dva[0], &(bp2)->blk_dva[0]) && \
DVA_EQUAL(&(bp1)->blk_dva[1], &(bp2)->blk_dva[1]) && \
DVA_EQUAL(&(bp1)->blk_dva[2], &(bp2)->blk_dva[2]))
#define DVA_IS_VALID(dva) (DVA_GET_ASIZE(dva) != 0)
#define BP_IDENTITY(bp) (ASSERT(!BP_IS_EMBEDDED(bp)), &(bp)->blk_dva[0])
#define BP_IS_GANG(bp) \
(BP_IS_EMBEDDED(bp) ? B_FALSE : DVA_GET_GANG(BP_IDENTITY(bp)))
#define DVA_IS_EMPTY(dva) ((dva)->dva_word[0] == 0ULL && \
(dva)->dva_word[1] == 0ULL)
#define BP_IS_HOLE(bp) \
(!BP_IS_EMBEDDED(bp) && DVA_IS_EMPTY(BP_IDENTITY(bp)))
/* BP_IS_RAIDZ(bp) assumes no block compression */
#define BP_IS_RAIDZ(bp) (DVA_GET_ASIZE(&(bp)->blk_dva[0]) > \
BP_GET_PSIZE(bp))
#define BP_ZERO(bp) \
{ \
(bp)->blk_dva[0].dva_word[0] = 0; \
(bp)->blk_dva[0].dva_word[1] = 0; \
(bp)->blk_dva[1].dva_word[0] = 0; \
(bp)->blk_dva[1].dva_word[1] = 0; \
(bp)->blk_dva[2].dva_word[0] = 0; \
(bp)->blk_dva[2].dva_word[1] = 0; \
(bp)->blk_prop = 0; \
(bp)->blk_pad[0] = 0; \
(bp)->blk_pad[1] = 0; \
(bp)->blk_phys_birth = 0; \
(bp)->blk_birth = 0; \
(bp)->blk_fill = 0; \
ZIO_SET_CHECKSUM(&(bp)->blk_cksum, 0, 0, 0, 0); \
}
#ifdef _BIG_ENDIAN
#define ZFS_HOST_BYTEORDER (0ULL)
#else
#define ZFS_HOST_BYTEORDER (1ULL)
#endif
#define BP_SHOULD_BYTESWAP(bp) (BP_GET_BYTEORDER(bp) != ZFS_HOST_BYTEORDER)
#define BP_SPRINTF_LEN 400
/*
* This macro allows code sharing between zfs, libzpool, and mdb.
* 'func' is either snprintf() or mdb_snprintf().
* 'ws' (whitespace) can be ' ' for single-line format, '\n' for multi-line.
*/
#define SNPRINTF_BLKPTR(func, ws, buf, size, bp, type, checksum, compress) \
{ \
static const char *copyname[] = \
{ "zero", "single", "double", "triple" }; \
int len = 0; \
int copies = 0; \
const char *crypt_type; \
if (bp != NULL) { \
if (BP_IS_ENCRYPTED(bp)) { \
crypt_type = "encrypted"; \
/* LINTED E_SUSPICIOUS_COMPARISON */ \
} else if (BP_IS_AUTHENTICATED(bp)) { \
crypt_type = "authenticated"; \
} else if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) { \
crypt_type = "indirect-MAC"; \
} else { \
crypt_type = "unencrypted"; \
} \
} \
if (bp == NULL) { \
len += func(buf + len, size - len, "<NULL>"); \
} else if (BP_IS_HOLE(bp)) { \
len += func(buf + len, size - len, \
"HOLE [L%llu %s] " \
"size=%llxL birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else if (BP_IS_EMBEDDED(bp)) { \
len = func(buf + len, size - len, \
"EMBEDDED [L%llu %s] et=%u %s " \
"size=%llxL/%llxP birth=%lluL", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
(int)BPE_GET_ETYPE(bp), \
compress, \
(u_longlong_t)BPE_GET_LSIZE(bp), \
(u_longlong_t)BPE_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth); \
} else { \
for (int d = 0; d < BP_GET_NDVAS(bp); d++) { \
const dva_t *dva = &bp->blk_dva[d]; \
if (DVA_IS_VALID(dva)) \
copies++; \
len += func(buf + len, size - len, \
"DVA[%d]=<%llu:%llx:%llx>%c", d, \
(u_longlong_t)DVA_GET_VDEV(dva), \
(u_longlong_t)DVA_GET_OFFSET(dva), \
(u_longlong_t)DVA_GET_ASIZE(dva), \
ws); \
} \
if (BP_IS_ENCRYPTED(bp)) { \
len += func(buf + len, size - len, \
"salt=%llx iv=%llx:%llx%c", \
(u_longlong_t)bp->blk_dva[2].dva_word[0], \
(u_longlong_t)bp->blk_dva[2].dva_word[1], \
(u_longlong_t)BP_GET_IV2(bp), \
ws); \
} \
if (BP_IS_GANG(bp) && \
DVA_GET_ASIZE(&bp->blk_dva[2]) <= \
DVA_GET_ASIZE(&bp->blk_dva[1]) / 2) \
copies--; \
len += func(buf + len, size - len, \
"[L%llu %s] %s %s %s %s %s %s %s%c" \
"size=%llxL/%llxP birth=%lluL/%lluP fill=%llu%c" \
"cksum=%llx:%llx:%llx:%llx", \
(u_longlong_t)BP_GET_LEVEL(bp), \
type, \
checksum, \
compress, \
crypt_type, \
BP_GET_BYTEORDER(bp) == 0 ? "BE" : "LE", \
BP_IS_GANG(bp) ? "gang" : "contiguous", \
BP_GET_DEDUP(bp) ? "dedup" : "unique", \
copyname[copies], \
ws, \
(u_longlong_t)BP_GET_LSIZE(bp), \
(u_longlong_t)BP_GET_PSIZE(bp), \
(u_longlong_t)bp->blk_birth, \
(u_longlong_t)BP_PHYSICAL_BIRTH(bp), \
(u_longlong_t)BP_GET_FILL(bp), \
ws, \
(u_longlong_t)bp->blk_cksum.zc_word[0], \
(u_longlong_t)bp->blk_cksum.zc_word[1], \
(u_longlong_t)bp->blk_cksum.zc_word[2], \
(u_longlong_t)bp->blk_cksum.zc_word[3]); \
} \
ASSERT(len < size); \
}
#define BP_GET_BUFC_TYPE(bp) \
(BP_IS_METADATA(bp) ? ARC_BUFC_METADATA : ARC_BUFC_DATA)
typedef enum spa_import_type {
SPA_IMPORT_EXISTING,
SPA_IMPORT_ASSEMBLE
} spa_import_type_t;
/* state manipulation functions */
extern int spa_open(const char *pool, spa_t **, void *tag);
extern int spa_open_rewind(const char *pool, spa_t **, void *tag,
nvlist_t *policy, nvlist_t **config);
extern int spa_get_stats(const char *pool, nvlist_t **config, char *altroot,
size_t buflen);
extern int spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, struct dsl_crypto_params *dcp);
extern int spa_import(char *pool, nvlist_t *config, nvlist_t *props,
uint64_t flags);
extern nvlist_t *spa_tryimport(nvlist_t *tryconfig);
extern int spa_destroy(char *pool);
extern int spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce);
extern int spa_reset(char *pool);
extern void spa_async_request(spa_t *spa, int flag);
extern void spa_async_unrequest(spa_t *spa, int flag);
extern void spa_async_suspend(spa_t *spa);
extern void spa_async_resume(spa_t *spa);
extern spa_t *spa_inject_addref(char *pool);
extern void spa_inject_delref(spa_t *spa);
extern void spa_scan_stat_init(spa_t *spa);
extern int spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps);
#define SPA_ASYNC_CONFIG_UPDATE 0x01
#define SPA_ASYNC_REMOVE 0x02
#define SPA_ASYNC_PROBE 0x04
#define SPA_ASYNC_RESILVER_DONE 0x08
#define SPA_ASYNC_RESILVER 0x10
#define SPA_ASYNC_AUTOEXPAND 0x20
#define SPA_ASYNC_REMOVE_DONE 0x40
#define SPA_ASYNC_REMOVE_STOP 0x80
/*
* Controls the behavior of spa_vdev_remove().
*/
#define SPA_REMOVE_UNSPARE 0x01
#define SPA_REMOVE_DONE 0x02
/* device manipulation */
extern int spa_vdev_add(spa_t *spa, nvlist_t *nvroot);
extern int spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot,
int replacing);
extern int spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid,
int replace_done);
extern int spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare);
extern boolean_t spa_vdev_remove_active(spa_t *spa);
extern int spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath);
extern int spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru);
extern int spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp);
/* spare state (which is global across all pools) */
extern void spa_spare_add(vdev_t *vd);
extern void spa_spare_remove(vdev_t *vd);
extern boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt);
extern void spa_spare_activate(vdev_t *vd);
/* L2ARC state (which is global across all pools) */
extern void spa_l2cache_add(vdev_t *vd);
extern void spa_l2cache_remove(vdev_t *vd);
extern boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool);
extern void spa_l2cache_activate(vdev_t *vd);
extern void spa_l2cache_drop(spa_t *spa);
/* scanning */
extern int spa_scan(spa_t *spa, pool_scan_func_t func);
extern int spa_scan_stop(spa_t *spa);
extern int spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t flag);
/* spa syncing */
extern void spa_sync(spa_t *spa, uint64_t txg); /* only for DMU use */
extern void spa_sync_allpools(void);
extern int zfs_sync_pass_deferred_free;
/* spa namespace global mutex */
extern kmutex_t spa_namespace_lock;
/*
* SPA configuration functions in spa_config.c
*/
#define SPA_CONFIG_UPDATE_POOL 0
#define SPA_CONFIG_UPDATE_VDEVS 1
-extern void spa_config_sync(spa_t *, boolean_t, boolean_t);
+extern void spa_write_cachefile(spa_t *, boolean_t, boolean_t);
extern void spa_config_load(void);
extern nvlist_t *spa_all_configs(uint64_t *);
extern void spa_config_set(spa_t *spa, nvlist_t *config);
extern nvlist_t *spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg,
int getstats);
extern void spa_config_update(spa_t *spa, int what);
/*
* Miscellaneous SPA routines in spa_misc.c
*/
/* Namespace manipulation */
extern spa_t *spa_lookup(const char *name);
extern spa_t *spa_add(const char *name, nvlist_t *config, const char *altroot);
extern void spa_remove(spa_t *spa);
extern spa_t *spa_next(spa_t *prev);
/* Refcount functions */
extern void spa_open_ref(spa_t *spa, void *tag);
extern void spa_close(spa_t *spa, void *tag);
extern void spa_async_close(spa_t *spa, void *tag);
extern boolean_t spa_refcount_zero(spa_t *spa);
#define SCL_NONE 0x00
#define SCL_CONFIG 0x01
#define SCL_STATE 0x02
#define SCL_L2ARC 0x04 /* hack until L2ARC 2.0 */
#define SCL_ALLOC 0x08
#define SCL_ZIO 0x10
#define SCL_FREE 0x20
#define SCL_VDEV 0x40
#define SCL_LOCKS 7
#define SCL_ALL ((1 << SCL_LOCKS) - 1)
#define SCL_STATE_ALL (SCL_STATE | SCL_L2ARC | SCL_ZIO)
/* Historical pool statistics */
typedef struct spa_stats_history {
kmutex_t lock;
uint64_t count;
uint64_t size;
kstat_t *kstat;
void *private;
list_t list;
} spa_stats_history_t;
typedef struct spa_stats {
spa_stats_history_t read_history;
spa_stats_history_t txg_history;
spa_stats_history_t tx_assign_histogram;
spa_stats_history_t io_history;
spa_stats_history_t mmp_history;
} spa_stats_t;
typedef enum txg_state {
TXG_STATE_BIRTH = 0,
TXG_STATE_OPEN = 1,
TXG_STATE_QUIESCED = 2,
TXG_STATE_WAIT_FOR_SYNC = 3,
TXG_STATE_SYNCED = 4,
TXG_STATE_COMMITTED = 5,
} txg_state_t;
typedef struct txg_stat {
vdev_stat_t vs1;
vdev_stat_t vs2;
uint64_t txg;
uint64_t ndirty;
} txg_stat_t;
extern void spa_stats_init(spa_t *spa);
extern void spa_stats_destroy(spa_t *spa);
extern void spa_read_history_add(spa_t *spa, const zbookmark_phys_t *zb,
uint32_t aflags);
extern void spa_txg_history_add(spa_t *spa, uint64_t txg, hrtime_t birth_time);
extern int spa_txg_history_set(spa_t *spa, uint64_t txg,
txg_state_t completed_state, hrtime_t completed_time);
extern txg_stat_t *spa_txg_history_init_io(spa_t *, uint64_t,
struct dsl_pool *);
extern void spa_txg_history_fini_io(spa_t *, txg_stat_t *);
extern void spa_tx_assign_add_nsecs(spa_t *spa, uint64_t nsecs);
extern int spa_mmp_history_set_skip(spa_t *spa, uint64_t mmp_kstat_id);
extern int spa_mmp_history_set(spa_t *spa, uint64_t mmp_kstat_id, int io_error,
hrtime_t duration);
extern void *spa_mmp_history_add(spa_t *spa, uint64_t txg, uint64_t timestamp,
uint64_t mmp_delay, vdev_t *vd, int label, uint64_t mmp_kstat_id,
int error);
/* Pool configuration locks */
extern int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw);
extern void spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw);
extern void spa_config_exit(spa_t *spa, int locks, void *tag);
extern int spa_config_held(spa_t *spa, int locks, krw_t rw);
/* Pool vdev add/remove lock */
extern uint64_t spa_vdev_enter(spa_t *spa);
extern uint64_t spa_vdev_config_enter(spa_t *spa);
extern void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg,
int error, char *tag);
extern int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error);
/* Pool vdev state change lock */
extern void spa_vdev_state_enter(spa_t *spa, int oplock);
extern int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error);
/* Log state */
typedef enum spa_log_state {
SPA_LOG_UNKNOWN = 0, /* unknown log state */
SPA_LOG_MISSING, /* missing log(s) */
SPA_LOG_CLEAR, /* clear the log(s) */
SPA_LOG_GOOD, /* log(s) are good */
} spa_log_state_t;
extern spa_log_state_t spa_get_log_state(spa_t *spa);
extern void spa_set_log_state(spa_t *spa, spa_log_state_t state);
-extern int spa_offline_log(spa_t *spa);
+extern int spa_reset_logs(spa_t *spa);
/* Log claim callback */
extern void spa_claim_notify(zio_t *zio);
extern void spa_deadman(void *);
/* Accessor functions */
extern boolean_t spa_shutting_down(spa_t *spa);
extern struct dsl_pool *spa_get_dsl(spa_t *spa);
extern boolean_t spa_is_initializing(spa_t *spa);
+extern boolean_t spa_indirect_vdevs_loaded(spa_t *spa);
extern blkptr_t *spa_get_rootblkptr(spa_t *spa);
extern void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp);
extern void spa_altroot(spa_t *, char *, size_t);
extern int spa_sync_pass(spa_t *spa);
extern char *spa_name(spa_t *spa);
extern uint64_t spa_guid(spa_t *spa);
extern uint64_t spa_load_guid(spa_t *spa);
extern uint64_t spa_last_synced_txg(spa_t *spa);
extern uint64_t spa_first_txg(spa_t *spa);
extern uint64_t spa_syncing_txg(spa_t *spa);
extern uint64_t spa_final_dirty_txg(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern pool_state_t spa_state(spa_t *spa);
extern spa_load_state_t spa_load_state(spa_t *spa);
extern uint64_t spa_freeze_txg(spa_t *spa);
extern uint64_t spa_get_worst_case_asize(spa_t *spa, uint64_t lsize);
extern uint64_t spa_get_dspace(spa_t *spa);
extern uint64_t spa_get_slop_space(spa_t *spa);
extern void spa_update_dspace(spa_t *spa);
extern uint64_t spa_version(spa_t *spa);
extern boolean_t spa_deflate(spa_t *spa);
extern metaslab_class_t *spa_normal_class(spa_t *spa);
extern metaslab_class_t *spa_log_class(spa_t *spa);
extern void spa_evicting_os_register(spa_t *, objset_t *os);
extern void spa_evicting_os_deregister(spa_t *, objset_t *os);
extern void spa_evicting_os_wait(spa_t *spa);
extern int spa_max_replication(spa_t *spa);
extern int spa_prev_software_version(spa_t *spa);
extern uint64_t spa_get_failmode(spa_t *spa);
extern uint64_t spa_get_deadman_failmode(spa_t *spa);
extern void spa_set_deadman_failmode(spa_t *spa, const char *failmode);
extern boolean_t spa_suspended(spa_t *spa);
extern uint64_t spa_bootfs(spa_t *spa);
extern uint64_t spa_delegation(spa_t *spa);
extern objset_t *spa_meta_objset(spa_t *spa);
extern uint64_t spa_deadman_synctime(spa_t *spa);
extern uint64_t spa_deadman_ziotime(spa_t *spa);
/* Miscellaneous support routines */
extern void spa_activate_mos_feature(spa_t *spa, const char *feature,
dmu_tx_t *tx);
extern void spa_deactivate_mos_feature(spa_t *spa, const char *feature);
extern int spa_rename(const char *oldname, const char *newname);
extern spa_t *spa_by_guid(uint64_t pool_guid, uint64_t device_guid);
extern boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid);
extern char *spa_strdup(const char *);
extern void spa_strfree(char *);
extern uint64_t spa_get_random(uint64_t range);
extern uint64_t spa_generate_guid(spa_t *spa);
extern void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp);
extern void spa_freeze(spa_t *spa);
extern int spa_change_guid(spa_t *spa);
extern void spa_upgrade(spa_t *spa, uint64_t version);
extern void spa_evict_all(void);
extern vdev_t *spa_lookup_by_guid(spa_t *spa, uint64_t guid,
boolean_t l2cache);
extern boolean_t spa_has_spare(spa_t *, uint64_t guid);
extern uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva);
extern uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp);
extern uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp);
extern boolean_t spa_has_slogs(spa_t *spa);
extern boolean_t spa_is_root(spa_t *spa);
extern boolean_t spa_writeable(spa_t *spa);
extern boolean_t spa_has_pending_synctask(spa_t *spa);
extern int spa_maxblocksize(spa_t *spa);
extern int spa_maxdnodesize(spa_t *spa);
extern void zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp);
+typedef void (*spa_remap_cb_t)(uint64_t vdev, uint64_t offset, uint64_t size,
+ void *arg);
+extern boolean_t spa_remap_blkptr(spa_t *spa, blkptr_t *bp,
+ spa_remap_cb_t callback, void *arg);
+extern uint64_t spa_get_last_removal_txg(spa_t *spa);
extern boolean_t spa_multihost(spa_t *spa);
extern unsigned long spa_get_hostid(void);
extern int spa_mode(spa_t *spa);
extern uint64_t zfs_strtonum(const char *str, char **nptr);
extern char *spa_his_ievent_table[];
extern void spa_history_create_obj(spa_t *spa, dmu_tx_t *tx);
extern int spa_history_get(spa_t *spa, uint64_t *offset, uint64_t *len_read,
char *his_buf);
extern int spa_history_log(spa_t *spa, const char *his_buf);
extern int spa_history_log_nvl(spa_t *spa, nvlist_t *nvl);
extern void spa_history_log_version(spa_t *spa, const char *operation,
dmu_tx_t *tx);
extern void spa_history_log_internal(spa_t *spa, const char *operation,
dmu_tx_t *tx, const char *fmt, ...);
extern void spa_history_log_internal_ds(struct dsl_dataset *ds, const char *op,
dmu_tx_t *tx, const char *fmt, ...);
extern void spa_history_log_internal_dd(dsl_dir_t *dd, const char *operation,
dmu_tx_t *tx, const char *fmt, ...);
/* error handling */
struct zbookmark_phys;
extern void spa_log_error(spa_t *spa, const zbookmark_phys_t *zb);
extern void zfs_ereport_post(const char *class, spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, zio_t *zio, uint64_t stateoroffset,
uint64_t length);
extern nvlist_t *zfs_event_create(spa_t *spa, vdev_t *vd, const char *type,
const char *name, nvlist_t *aux);
extern void zfs_post_remove(spa_t *spa, vdev_t *vd);
extern void zfs_post_state_change(spa_t *spa, vdev_t *vd, uint64_t laststate);
extern void zfs_post_autoreplace(spa_t *spa, vdev_t *vd);
extern uint64_t spa_get_errlog_size(spa_t *spa);
extern int spa_get_errlog(spa_t *spa, void *uaddr, size_t *count);
extern void spa_errlog_rotate(spa_t *spa);
extern void spa_errlog_drain(spa_t *spa);
extern void spa_errlog_sync(spa_t *spa, uint64_t txg);
extern void spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub);
/* vdev cache */
extern void vdev_cache_stat_init(void);
extern void vdev_cache_stat_fini(void);
/* vdev mirror */
extern void vdev_mirror_stat_init(void);
extern void vdev_mirror_stat_fini(void);
/* Initialization and termination */
extern void spa_init(int flags);
extern void spa_fini(void);
extern void spa_boot_init(void);
/* properties */
extern int spa_prop_set(spa_t *spa, nvlist_t *nvp);
extern int spa_prop_get(spa_t *spa, nvlist_t **nvp);
extern void spa_prop_clear_bootfs(spa_t *spa, uint64_t obj, dmu_tx_t *tx);
extern void spa_configfile_set(spa_t *, nvlist_t *, boolean_t);
/* asynchronous event notification */
extern void spa_event_notify(spa_t *spa, vdev_t *vdev, nvlist_t *hist_nvl,
const char *name);
#ifdef ZFS_DEBUG
#define dprintf_bp(bp, fmt, ...) do { \
if (zfs_flags & ZFS_DEBUG_DPRINTF) { \
char *__blkbuf = kmem_alloc(BP_SPRINTF_LEN, KM_SLEEP); \
snprintf_blkptr(__blkbuf, BP_SPRINTF_LEN, (bp)); \
dprintf(fmt " %s\n", __VA_ARGS__, __blkbuf); \
kmem_free(__blkbuf, BP_SPRINTF_LEN); \
} \
_NOTE(CONSTCOND) } while (0)
#else
#define dprintf_bp(bp, fmt, ...)
#endif
extern boolean_t spa_debug_enabled(spa_t *spa);
#define spa_dbgmsg(spa, ...) \
{ \
if (spa_debug_enabled(spa)) \
zfs_dbgmsg(__VA_ARGS__); \
}
extern int spa_mode_global; /* mode, e.g. FREAD | FWRITE */
extern int zfs_deadman_enabled;
extern unsigned long zfs_deadman_synctime_ms;
extern unsigned long zfs_deadman_ziotime_ms;
extern unsigned long zfs_deadman_checktime_ms;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_H */
diff --git a/include/sys/spa_impl.h b/include/sys/spa_impl.h
index 77625d4b0072..1741eb9e5c69 100644
--- a/include/sys/spa_impl.h
+++ b/include/sys/spa_impl.h
@@ -1,311 +1,383 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2017 Datto Inc.
*/
#ifndef _SYS_SPA_IMPL_H
#define _SYS_SPA_IMPL_H
#include <sys/spa.h>
#include <sys/vdev.h>
+#include <sys/vdev_removal.h>
#include <sys/metaslab.h>
#include <sys/dmu.h>
#include <sys/dsl_pool.h>
#include <sys/uberblock_impl.h>
#include <sys/zfs_context.h>
#include <sys/avl.h>
#include <sys/refcount.h>
#include <sys/bplist.h>
#include <sys/bpobj.h>
#include <sys/dsl_crypt.h>
#include <sys/zfeature.h>
#include <zfeature_common.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef struct spa_error_entry {
zbookmark_phys_t se_bookmark;
char *se_name;
avl_node_t se_avl;
} spa_error_entry_t;
typedef struct spa_history_phys {
uint64_t sh_pool_create_len; /* ending offset of zpool create */
uint64_t sh_phys_max_off; /* physical EOF */
uint64_t sh_bof; /* logical BOF */
uint64_t sh_eof; /* logical EOF */
uint64_t sh_records_lost; /* num of records overwritten */
} spa_history_phys_t;
+/*
+ * All members must be uint64_t, for byteswap purposes.
+ */
+typedef struct spa_removing_phys {
+ uint64_t sr_state; /* dsl_scan_state_t */
+
+ /*
+ * The vdev ID that we most recently attempted to remove,
+ * or -1 if no removal has been attempted.
+ */
+ uint64_t sr_removing_vdev;
+
+ /*
+ * The vdev ID that we most recently successfully removed,
+ * or -1 if no devices have been removed.
+ */
+ uint64_t sr_prev_indirect_vdev;
+
+ uint64_t sr_start_time;
+ uint64_t sr_end_time;
+
+ /*
+ * Note that we can not use the space map's or indirect mapping's
+ * accounting as a substitute for these values, because we need to
+ * count frees of not-yet-copied data as though it did the copy.
+ * Otherwise, we could get into a situation where copied > to_copy,
+ * or we complete before copied == to_copy.
+ */
+ uint64_t sr_to_copy; /* bytes that need to be copied */
+ uint64_t sr_copied; /* bytes that have been copied or freed */
+} spa_removing_phys_t;
+
+/*
+ * This struct is stored as an entry in the DMU_POOL_DIRECTORY_OBJECT
+ * (with key DMU_POOL_CONDENSING_INDIRECT). It is present if a condense
+ * of an indirect vdev's mapping object is in progress.
+ */
+typedef struct spa_condensing_indirect_phys {
+ /*
+ * The vdev ID of the indirect vdev whose indirect mapping is
+ * being condensed.
+ */
+ uint64_t scip_vdev;
+
+ /*
+ * The vdev's old obsolete spacemap. This spacemap's contents are
+ * being integrated into the new mapping.
+ */
+ uint64_t scip_prev_obsolete_sm_object;
+
+ /*
+ * The new mapping object that is being created.
+ */
+ uint64_t scip_next_mapping_object;
+} spa_condensing_indirect_phys_t;
+
struct spa_aux_vdev {
uint64_t sav_object; /* MOS object for device list */
nvlist_t *sav_config; /* cached device config */
vdev_t **sav_vdevs; /* devices */
int sav_count; /* number devices */
boolean_t sav_sync; /* sync the device list */
nvlist_t **sav_pending; /* pending device additions */
uint_t sav_npending; /* # pending devices */
};
typedef struct spa_config_lock {
kmutex_t scl_lock;
kthread_t *scl_writer;
int scl_write_wanted;
kcondvar_t scl_cv;
refcount_t scl_count;
} spa_config_lock_t;
typedef struct spa_config_dirent {
list_node_t scd_link;
char *scd_path;
} spa_config_dirent_t;
typedef enum zio_taskq_type {
ZIO_TASKQ_ISSUE = 0,
ZIO_TASKQ_ISSUE_HIGH,
ZIO_TASKQ_INTERRUPT,
ZIO_TASKQ_INTERRUPT_HIGH,
ZIO_TASKQ_TYPES
} zio_taskq_type_t;
/*
* State machine for the zpool-poolname process. The states transitions
* are done as follows:
*
* From To Routine
* PROC_NONE -> PROC_CREATED spa_activate()
* PROC_CREATED -> PROC_ACTIVE spa_thread()
* PROC_ACTIVE -> PROC_DEACTIVATE spa_deactivate()
* PROC_DEACTIVATE -> PROC_GONE spa_thread()
* PROC_GONE -> PROC_NONE spa_deactivate()
*/
typedef enum spa_proc_state {
SPA_PROC_NONE, /* spa_proc = &p0, no process created */
SPA_PROC_CREATED, /* spa_activate() has proc, is waiting */
SPA_PROC_ACTIVE, /* taskqs created, spa_proc set */
SPA_PROC_DEACTIVATE, /* spa_deactivate() requests process exit */
SPA_PROC_GONE /* spa_thread() is exiting, spa_proc = &p0 */
} spa_proc_state_t;
typedef struct spa_taskqs {
uint_t stqs_count;
taskq_t **stqs_taskq;
} spa_taskqs_t;
typedef enum spa_all_vdev_zap_action {
AVZ_ACTION_NONE = 0,
AVZ_ACTION_DESTROY, /* Destroy all per-vdev ZAPs and the AVZ. */
AVZ_ACTION_REBUILD, /* Populate the new AVZ, see spa_avz_rebuild */
AVZ_ACTION_INITIALIZE
} spa_avz_action_t;
struct spa {
/*
* Fields protected by spa_namespace_lock.
*/
char spa_name[ZFS_MAX_DATASET_NAME_LEN]; /* pool name */
char *spa_comment; /* comment */
avl_node_t spa_avl; /* node in spa_namespace_avl */
nvlist_t *spa_config; /* last synced config */
nvlist_t *spa_config_syncing; /* currently syncing config */
nvlist_t *spa_config_splitting; /* config for splitting */
nvlist_t *spa_load_info; /* info and errors from load */
uint64_t spa_config_txg; /* txg of last config change */
int spa_sync_pass; /* iterate-to-convergence */
pool_state_t spa_state; /* pool state */
int spa_inject_ref; /* injection references */
uint8_t spa_sync_on; /* sync threads are running */
spa_load_state_t spa_load_state; /* current load operation */
+ boolean_t spa_indirect_vdevs_loaded; /* mappings loaded? */
uint64_t spa_import_flags; /* import specific flags */
spa_taskqs_t spa_zio_taskq[ZIO_TYPES][ZIO_TASKQ_TYPES];
dsl_pool_t *spa_dsl_pool;
boolean_t spa_is_initializing; /* true while opening pool */
metaslab_class_t *spa_normal_class; /* normal data class */
metaslab_class_t *spa_log_class; /* intent log data class */
uint64_t spa_first_txg; /* first txg after spa_open() */
uint64_t spa_final_txg; /* txg of export/destroy */
uint64_t spa_freeze_txg; /* freeze pool at this txg */
uint64_t spa_load_max_txg; /* best initial ub_txg */
uint64_t spa_claim_max_txg; /* highest claimed birth txg */
timespec_t spa_loaded_ts; /* 1st successful open time */
objset_t *spa_meta_objset; /* copy of dp->dp_meta_objset */
kmutex_t spa_evicting_os_lock; /* Evicting objset list lock */
list_t spa_evicting_os_list; /* Objsets being evicted. */
kcondvar_t spa_evicting_os_cv; /* Objset Eviction Completion */
txg_list_t spa_vdev_txg_list; /* per-txg dirty vdev list */
vdev_t *spa_root_vdev; /* top-level vdev container */
int spa_min_ashift; /* of vdevs in normal class */
int spa_max_ashift; /* of vdevs in normal class */
uint64_t spa_config_guid; /* config pool guid */
uint64_t spa_load_guid; /* spa_load initialized guid */
uint64_t spa_last_synced_guid; /* last synced guid */
list_t spa_config_dirty_list; /* vdevs with dirty config */
list_t spa_state_dirty_list; /* vdevs with dirty state */
kmutex_t spa_alloc_lock;
avl_tree_t spa_alloc_tree;
spa_aux_vdev_t spa_spares; /* hot spares */
spa_aux_vdev_t spa_l2cache; /* L2ARC cache devices */
nvlist_t *spa_label_features; /* Features for reading MOS */
uint64_t spa_config_object; /* MOS object for pool config */
uint64_t spa_config_generation; /* config generation number */
uint64_t spa_syncing_txg; /* txg currently syncing */
bpobj_t spa_deferred_bpobj; /* deferred-free bplist */
bplist_t spa_free_bplist[TXG_SIZE]; /* bplist of stuff to free */
zio_cksum_salt_t spa_cksum_salt; /* secret salt for cksum */
/* checksum context templates */
kmutex_t spa_cksum_tmpls_lock;
void *spa_cksum_tmpls[ZIO_CHECKSUM_FUNCTIONS];
uberblock_t spa_ubsync; /* last synced uberblock */
uberblock_t spa_uberblock; /* current uberblock */
boolean_t spa_extreme_rewind; /* rewind past deferred frees */
kmutex_t spa_scrub_lock; /* resilver/scrub lock */
uint64_t spa_scrub_inflight; /* in-flight scrub bytes */
uint64_t spa_load_verify_ios; /* in-flight verification IOs */
kcondvar_t spa_scrub_io_cv; /* scrub I/O completion */
uint8_t spa_scrub_active; /* active or suspended? */
uint8_t spa_scrub_type; /* type of scrub we're doing */
uint8_t spa_scrub_finished; /* indicator to rotate logs */
uint8_t spa_scrub_started; /* started since last boot */
uint8_t spa_scrub_reopen; /* scrub doing vdev_reopen */
uint64_t spa_scan_pass_start; /* start time per pass/reboot */
uint64_t spa_scan_pass_scrub_pause; /* scrub pause time */
uint64_t spa_scan_pass_scrub_spent_paused; /* total paused */
uint64_t spa_scan_pass_exam; /* examined bytes per pass */
uint64_t spa_scan_pass_issued; /* issued bytes per pass */
kmutex_t spa_async_lock; /* protect async state */
kthread_t *spa_async_thread; /* thread doing async task */
int spa_async_suspended; /* async tasks suspended */
kcondvar_t spa_async_cv; /* wait for thread_exit() */
uint16_t spa_async_tasks; /* async task mask */
+
+ spa_removing_phys_t spa_removing_phys;
+ spa_vdev_removal_t *spa_vdev_removal;
+
+ spa_condensing_indirect_phys_t spa_condensing_indirect_phys;
+ spa_condensing_indirect_t *spa_condensing_indirect;
+ kthread_t *spa_condense_thread; /* thread doing condense. */
+
char *spa_root; /* alternate root directory */
uint64_t spa_ena; /* spa-wide ereport ENA */
int spa_last_open_failed; /* error if last open failed */
uint64_t spa_last_ubsync_txg; /* "best" uberblock txg */
uint64_t spa_last_ubsync_txg_ts; /* timestamp from that ub */
uint64_t spa_load_txg; /* ub txg that loaded */
uint64_t spa_load_txg_ts; /* timestamp from that ub */
uint64_t spa_load_meta_errors; /* verify metadata err count */
uint64_t spa_load_data_errors; /* verify data err count */
uint64_t spa_verify_min_txg; /* start txg of verify scrub */
kmutex_t spa_errlog_lock; /* error log lock */
uint64_t spa_errlog_last; /* last error log object */
uint64_t spa_errlog_scrub; /* scrub error log object */
kmutex_t spa_errlist_lock; /* error list/ereport lock */
avl_tree_t spa_errlist_last; /* last error list */
avl_tree_t spa_errlist_scrub; /* scrub error list */
uint64_t spa_deflate; /* should we deflate? */
uint64_t spa_history; /* history object */
kmutex_t spa_history_lock; /* history lock */
vdev_t *spa_pending_vdev; /* pending vdev additions */
kmutex_t spa_props_lock; /* property lock */
uint64_t spa_pool_props_object; /* object for properties */
uint64_t spa_bootfs; /* default boot filesystem */
uint64_t spa_failmode; /* failure mode for the pool */
uint64_t spa_deadman_failmode; /* failure mode for deadman */
uint64_t spa_delegation; /* delegation on/off */
list_t spa_config_list; /* previous cache file(s) */
/* per-CPU array of root of async I/O: */
zio_t **spa_async_zio_root;
zio_t *spa_suspend_zio_root; /* root of all suspended I/O */
+ zio_t *spa_txg_zio[TXG_SIZE]; /* spa_sync() waits for this */
kmutex_t spa_suspend_lock; /* protects suspend_zio_root */
kcondvar_t spa_suspend_cv; /* notification of resume */
zio_suspend_reason_t spa_suspended; /* pool is suspended */
uint8_t spa_claiming; /* pool is doing zil_claim() */
boolean_t spa_debug; /* debug enabled? */
boolean_t spa_is_root; /* pool is root */
int spa_minref; /* num refs when first opened */
int spa_mode; /* FREAD | FWRITE */
spa_log_state_t spa_log_state; /* log state */
uint64_t spa_autoexpand; /* lun expansion on/off */
ddt_t *spa_ddt[ZIO_CHECKSUM_FUNCTIONS]; /* in-core DDTs */
uint64_t spa_ddt_stat_object; /* DDT statistics */
uint64_t spa_dedup_dspace; /* Cache get_dedup_dspace() */
uint64_t spa_dedup_ditto; /* dedup ditto threshold */
uint64_t spa_dedup_checksum; /* default dedup checksum */
uint64_t spa_dspace; /* dspace in normal class */
kmutex_t spa_vdev_top_lock; /* dueling offline/remove */
kmutex_t spa_proc_lock; /* protects spa_proc* */
kcondvar_t spa_proc_cv; /* spa_proc_state transitions */
spa_proc_state_t spa_proc_state; /* see definition */
proc_t *spa_proc; /* "zpool-poolname" process */
uint64_t spa_did; /* if procp != p0, did of t1 */
boolean_t spa_autoreplace; /* autoreplace set in open */
int spa_vdev_locks; /* locks grabbed */
uint64_t spa_creation_version; /* version at pool creation */
uint64_t spa_prev_software_version; /* See ub_software_version */
uint64_t spa_feat_for_write_obj; /* required to write to pool */
uint64_t spa_feat_for_read_obj; /* required to read from pool */
uint64_t spa_feat_desc_obj; /* Feature descriptions */
uint64_t spa_feat_enabled_txg_obj; /* Feature enabled txg */
kmutex_t spa_feat_stats_lock; /* protects spa_feat_stats */
nvlist_t *spa_feat_stats; /* Cache of enabled features */
/* cache feature refcounts */
uint64_t spa_feat_refcount_cache[SPA_FEATURES];
taskqid_t spa_deadman_tqid; /* Task id */
uint64_t spa_deadman_calls; /* number of deadman calls */
hrtime_t spa_sync_starttime; /* starting time of spa_sync */
uint64_t spa_deadman_synctime; /* deadman sync expiration */
uint64_t spa_deadman_ziotime; /* deadman zio expiration */
uint64_t spa_all_vdev_zaps; /* ZAP of per-vd ZAP obj #s */
spa_avz_action_t spa_avz_action; /* destroy/rebuild AVZ? */
uint64_t spa_errata; /* errata issues detected */
spa_stats_t spa_stats; /* assorted spa statistics */
spa_keystore_t spa_keystore; /* loaded crypto keys */
hrtime_t spa_ccw_fail_time; /* Conf cache write fail time */
taskq_t *spa_zvol_taskq; /* Taskq for minor management */
taskq_t *spa_prefetch_taskq; /* Taskq for prefetch threads */
uint64_t spa_multihost; /* multihost aware (mmp) */
mmp_thread_t spa_mmp; /* multihost mmp thread */
/*
* spa_refcount & spa_config_lock must be the last elements
* because refcount_t changes size based on compilation options.
* In order for the MDB module to function correctly, the other
* fields must remain in the same location.
*/
spa_config_lock_t spa_config_lock[SCL_LOCKS]; /* config changes */
refcount_t spa_refcount; /* number of opens */
taskq_t *spa_upgrade_taskq; /* taskq for upgrade jobs */
};
extern char *spa_config_path;
extern void spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent);
extern void spa_taskq_dispatch_sync(spa_t *, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags);
+extern void spa_load_spares(spa_t *spa);
+extern void spa_load_l2cache(spa_t *spa);
+extern sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
+ const char *name);
+extern void spa_event_post(sysevent_t *ev);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPA_IMPL_H */
diff --git a/include/sys/space_map.h b/include/sys/space_map.h
index a59e6d37d43a..457300d05328 100644
--- a/include/sys/space_map.h
+++ b/include/sys/space_map.h
@@ -1,164 +1,171 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
- * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_SPACE_MAP_H
#define _SYS_SPACE_MAP_H
#include <sys/avl.h>
#include <sys/range_tree.h>
#include <sys/dmu.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* The size of the space map object has increased to include a histogram.
* The SPACE_MAP_SIZE_V0 designates the original size and is used to
* maintain backward compatibility.
*/
#define SPACE_MAP_SIZE_V0 (3 * sizeof (uint64_t))
#define SPACE_MAP_HISTOGRAM_SIZE 32
/*
* The space_map_phys is the on-disk representation of the space map.
* Consumers of space maps should never reference any of the members of this
* structure directly. These members may only be updated in syncing context.
*
* Note the smp_object is no longer used but remains in the structure
* for backward compatibility.
*/
typedef struct space_map_phys {
uint64_t smp_object; /* on-disk space map object */
uint64_t smp_objsize; /* size of the object */
uint64_t smp_alloc; /* space allocated from the map */
uint64_t smp_pad[5]; /* reserved */
/*
* The smp_histogram maintains a histogram of free regions. Each
* bucket, smp_histogram[i], contains the number of free regions
* whose size is:
* 2^(i+sm_shift) <= size of free region in bytes < 2^(i+sm_shift+1)
*/
uint64_t smp_histogram[SPACE_MAP_HISTOGRAM_SIZE];
} space_map_phys_t;
/*
* The space map object defines a region of space, its size, how much is
* allocated, and the on-disk object that stores this information.
* Consumers of space maps may only access the members of this structure.
+ *
+ * Note: the space_map may not be accessed concurrently; consumers
+ * must provide external locking if required.
*/
typedef struct space_map {
uint64_t sm_start; /* start of map */
uint64_t sm_size; /* size of map */
uint8_t sm_shift; /* unit shift */
uint64_t sm_length; /* synced length */
uint64_t sm_alloc; /* synced space allocated */
objset_t *sm_os; /* objset for this map */
uint64_t sm_object; /* object id for this map */
uint32_t sm_blksz; /* block size for space map */
dmu_buf_t *sm_dbuf; /* space_map_phys_t dbuf */
space_map_phys_t *sm_phys; /* on-disk space map */
- kmutex_t *sm_lock; /* pointer to lock that protects map */
} space_map_t;
/*
* debug entry
*
* 1 3 10 50
* ,---+--------+------------+---------------------------------.
* | 1 | action | syncpass | txg (lower bits) |
* `---+--------+------------+---------------------------------'
* 63 62 60 59 50 49 0
*
*
* non-debug entry
*
* 1 47 1 15
* ,-----------------------------------------------------------.
* | 0 | offset (sm_shift units) | type | run |
* `-----------------------------------------------------------'
* 63 62 17 16 15 0
*/
/* All this stuff takes and returns bytes */
#define SM_RUN_DECODE(x) (BF64_DECODE(x, 0, 15) + 1)
#define SM_RUN_ENCODE(x) BF64_ENCODE((x) - 1, 0, 15)
#define SM_TYPE_DECODE(x) BF64_DECODE(x, 15, 1)
#define SM_TYPE_ENCODE(x) BF64_ENCODE(x, 15, 1)
#define SM_OFFSET_DECODE(x) BF64_DECODE(x, 16, 47)
#define SM_OFFSET_ENCODE(x) BF64_ENCODE(x, 16, 47)
#define SM_DEBUG_DECODE(x) BF64_DECODE(x, 63, 1)
#define SM_DEBUG_ENCODE(x) BF64_ENCODE(x, 63, 1)
#define SM_DEBUG_ACTION_DECODE(x) BF64_DECODE(x, 60, 3)
#define SM_DEBUG_ACTION_ENCODE(x) BF64_ENCODE(x, 60, 3)
#define SM_DEBUG_SYNCPASS_DECODE(x) BF64_DECODE(x, 50, 10)
#define SM_DEBUG_SYNCPASS_ENCODE(x) BF64_ENCODE(x, 50, 10)
#define SM_DEBUG_TXG_DECODE(x) BF64_DECODE(x, 0, 50)
#define SM_DEBUG_TXG_ENCODE(x) BF64_ENCODE(x, 0, 50)
#define SM_RUN_MAX SM_RUN_DECODE(~0ULL)
typedef enum {
SM_ALLOC,
SM_FREE
} maptype_t;
+typedef int (*sm_cb_t)(maptype_t type, uint64_t offset, uint64_t size,
+ void *arg);
+
int space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype);
+int space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg);
void space_map_histogram_clear(space_map_t *sm);
void space_map_histogram_add(space_map_t *sm, range_tree_t *rt,
dmu_tx_t *tx);
void space_map_update(space_map_t *sm);
uint64_t space_map_object(space_map_t *sm);
uint64_t space_map_allocated(space_map_t *sm);
uint64_t space_map_length(space_map_t *sm);
void space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
dmu_tx_t *tx);
void space_map_truncate(space_map_t *sm, dmu_tx_t *tx);
uint64_t space_map_alloc(objset_t *os, dmu_tx_t *tx);
void space_map_free(space_map_t *sm, dmu_tx_t *tx);
+void space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx);
int space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
- uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp);
+ uint64_t start, uint64_t size, uint8_t shift);
void space_map_close(space_map_t *sm);
int64_t space_map_alloc_delta(space_map_t *sm);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_SPACE_MAP_H */
diff --git a/include/sys/trace_vdev.h b/include/sys/trace_vdev.h
new file mode 100644
index 000000000000..d7af44c25397
--- /dev/null
+++ b/include/sys/trace_vdev.h
@@ -0,0 +1,119 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+#if defined(_KERNEL) && defined(HAVE_DECLARE_EVENT_CLASS)
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM zfs
+
+#undef TRACE_SYSTEM_VAR
+#define TRACE_SYSTEM_VAR zfs_vdev
+
+#if !defined(_TRACE_VDEV_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_VDEV_H
+
+#include <linux/tracepoint.h>
+#include <sys/types.h>
+
+/*
+ * Generic support for three argument tracepoints of the form:
+ *
+ * DTRACE_PROBE3(...,
+ * spa_t *, ...,
+ * uint64_t, ...,
+ * uint64_t, ...);
+ */
+/* BEGIN CSTYLED */
+DECLARE_EVENT_CLASS(zfs_removing_class_3,
+ TP_PROTO(spa_t *spa, uint64_t offset, uint64_t size),
+ TP_ARGS(spa, offset, size),
+ TP_STRUCT__entry(
+ __field(spa_t *, vdev_spa)
+ __field(uint64_t, vdev_offset)
+ __field(uint64_t, vdev_size)
+ ),
+ TP_fast_assign(
+ __entry->vdev_spa = spa;
+ __entry->vdev_offset = offset;
+ __entry->vdev_size = size;
+ ),
+ TP_printk("spa %p offset %llu size %llu",
+ __entry->vdev_spa, __entry->vdev_offset,
+ __entry->vdev_size)
+);
+/* END CSTYLED */
+
+/* BEGIN CSTYLED */
+#define DEFINE_REMOVE_FREE_EVENT(name) \
+DEFINE_EVENT(zfs_removing_class_3, name, \
+ TP_PROTO(spa_t *spa, uint64_t offset, uint64_t size), \
+ TP_ARGS(spa, offset, size))
+/* END CSTYLED */
+DEFINE_REMOVE_FREE_EVENT(zfs_remove__free__synced);
+DEFINE_REMOVE_FREE_EVENT(zfs_remove__free__unvisited);
+
+/*
+ * Generic support for four argument tracepoints of the form:
+ *
+ * DTRACE_PROBE4(...,
+ * spa_t *, ...,
+ * uint64_t, ...,
+ * uint64_t, ...,
+ * uint64_t, ...);
+ */
+/* BEGIN CSTYLED */
+DECLARE_EVENT_CLASS(zfs_removing_class_4,
+ TP_PROTO(spa_t *spa, uint64_t offset, uint64_t size, uint64_t txg),
+ TP_ARGS(spa, offset, size, txg),
+ TP_STRUCT__entry(
+ __field(spa_t *, vdev_spa)
+ __field(uint64_t, vdev_offset)
+ __field(uint64_t, vdev_size)
+ __field(uint64_t, vdev_txg)
+ ),
+ TP_fast_assign(
+ __entry->vdev_spa = spa;
+ __entry->vdev_offset = offset;
+ __entry->vdev_size = size;
+ __entry->vdev_txg = txg;
+ ),
+ TP_printk("spa %p offset %llu size %llu txg %llu",
+ __entry->vdev_spa, __entry->vdev_offset,
+ __entry->vdev_size, __entry->vdev_txg)
+);
+
+/* BEGIN CSTYLED */
+#define DEFINE_REMOVE_FREE_EVENT_TXG(name) \
+DEFINE_EVENT(zfs_removing_class_4, name, \
+ TP_PROTO(spa_t *spa, uint64_t offset, uint64_t size,uint64_t txg), \
+ TP_ARGS(spa, offset, size, txg))
+/* END CSTYLED */
+DEFINE_REMOVE_FREE_EVENT_TXG(zfs_remove__free__inflight);
+
+#endif /* _TRACE_VDEV_H */
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH sys
+#define TRACE_INCLUDE_FILE trace_vdev
+#include <trace/define_trace.h>
+
+#endif /* _KERNEL && HAVE_DECLARE_EVENT_CLASS */
diff --git a/include/sys/vdev.h b/include/sys/vdev.h
index 022713096d63..511d4d0b6308 100644
--- a/include/sys/vdev.h
+++ b/include/sys/vdev.h
@@ -1,176 +1,181 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_VDEV_H
#define _SYS_VDEV_H
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu.h>
#include <sys/space_map.h>
#include <sys/fs/zfs.h>
#ifdef __cplusplus
extern "C" {
#endif
typedef enum vdev_dtl_type {
DTL_MISSING, /* 0% replication: no copies of the data */
DTL_PARTIAL, /* less than 100% replication: some copies missing */
DTL_SCRUB, /* unable to fully repair during scrub/resilver */
DTL_OUTAGE, /* temporarily missing (used to attempt detach) */
DTL_TYPES
} vdev_dtl_type_t;
extern int zfs_nocacheflush;
extern int vdev_open(vdev_t *);
extern void vdev_open_children(vdev_t *);
extern int vdev_validate(vdev_t *, boolean_t);
extern void vdev_close(vdev_t *);
extern int vdev_create(vdev_t *, uint64_t txg, boolean_t isreplace);
extern void vdev_reopen(vdev_t *);
extern int vdev_validate_aux(vdev_t *vd);
extern zio_t *vdev_probe(vdev_t *vd, zio_t *pio);
-
+extern boolean_t vdev_is_concrete(vdev_t *vd);
extern boolean_t vdev_is_bootable(vdev_t *vd);
extern vdev_t *vdev_lookup_top(spa_t *spa, uint64_t vdev);
extern vdev_t *vdev_lookup_by_guid(vdev_t *vd, uint64_t guid);
extern int vdev_count_leaves(spa_t *spa);
extern void vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t d,
uint64_t txg, uint64_t size);
extern boolean_t vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t d);
extern boolean_t vdev_dtl_need_resilver(vdev_t *vd, uint64_t off, size_t size);
extern void vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg,
int scrub_done);
extern boolean_t vdev_dtl_required(vdev_t *vd);
extern boolean_t vdev_resilver_needed(vdev_t *vd,
uint64_t *minp, uint64_t *maxp);
extern void vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj,
dmu_tx_t *tx);
extern uint64_t vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx);
extern void vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx);
+extern void vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx);
+extern void vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset,
+ uint64_t size, uint64_t txg);
+extern void spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev,
+ uint64_t offset, uint64_t size, dmu_tx_t *tx);
extern void vdev_hold(vdev_t *);
extern void vdev_rele(vdev_t *);
extern int vdev_metaslab_init(vdev_t *vd, uint64_t txg);
extern void vdev_metaslab_fini(vdev_t *vd);
extern void vdev_metaslab_set_size(vdev_t *);
extern void vdev_expand(vdev_t *vd, uint64_t txg);
extern void vdev_split(vdev_t *vd);
extern void vdev_deadman(vdev_t *vd, char *tag);
extern void vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx);
extern void vdev_get_stats(vdev_t *vd, vdev_stat_t *vs);
extern void vdev_clear_stats(vdev_t *vd);
extern void vdev_stat_update(zio_t *zio, uint64_t psize);
extern void vdev_scan_stat_init(vdev_t *vd);
extern void vdev_propagate_state(vdev_t *vd);
extern void vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state,
vdev_aux_t aux);
extern void vdev_space_update(vdev_t *vd,
int64_t alloc_delta, int64_t defer_delta, int64_t space_delta);
extern uint64_t vdev_psize_to_asize(vdev_t *vd, uint64_t psize);
extern int vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux);
extern int vdev_online(spa_t *spa, uint64_t guid, uint64_t flags,
vdev_state_t *);
extern int vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags);
extern void vdev_clear(spa_t *spa, vdev_t *vd);
extern boolean_t vdev_is_dead(vdev_t *vd);
extern boolean_t vdev_readable(vdev_t *vd);
extern boolean_t vdev_writeable(vdev_t *vd);
extern boolean_t vdev_allocatable(vdev_t *vd);
extern boolean_t vdev_accessible(vdev_t *vd, zio_t *zio);
extern void vdev_cache_init(vdev_t *vd);
extern void vdev_cache_fini(vdev_t *vd);
extern boolean_t vdev_cache_read(zio_t *zio);
extern void vdev_cache_write(zio_t *zio);
extern void vdev_cache_purge(vdev_t *vd);
extern void vdev_queue_init(vdev_t *vd);
extern void vdev_queue_fini(vdev_t *vd);
extern zio_t *vdev_queue_io(zio_t *zio);
extern void vdev_queue_io_done(zio_t *zio);
extern void vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority);
extern int vdev_queue_length(vdev_t *vd);
extern uint64_t vdev_queue_last_offset(vdev_t *vd);
extern void vdev_config_dirty(vdev_t *vd);
extern void vdev_config_clean(vdev_t *vd);
extern int vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg);
extern void vdev_state_dirty(vdev_t *vd);
extern void vdev_state_clean(vdev_t *vd);
typedef enum vdev_config_flag {
VDEV_CONFIG_SPARE = 1 << 0,
VDEV_CONFIG_L2CACHE = 1 << 1,
VDEV_CONFIG_REMOVING = 1 << 2,
VDEV_CONFIG_MOS = 1 << 3
} vdev_config_flag_t;
extern void vdev_top_config_generate(spa_t *spa, nvlist_t *config);
extern nvlist_t *vdev_config_generate(spa_t *spa, vdev_t *vd,
boolean_t getstats, vdev_config_flag_t flags);
/*
* Label routines
*/
struct uberblock;
extern uint64_t vdev_label_offset(uint64_t psize, int l, uint64_t offset);
extern int vdev_label_number(uint64_t psise, uint64_t offset);
extern nvlist_t *vdev_label_read_config(vdev_t *vd, uint64_t txg);
extern void vdev_uberblock_load(vdev_t *, struct uberblock *, nvlist_t **);
extern void vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv);
extern void vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t
offset, uint64_t size, zio_done_func_t *done, void *private, int flags);
typedef enum {
VDEV_LABEL_CREATE, /* create/add a new device */
VDEV_LABEL_REPLACE, /* replace an existing device */
VDEV_LABEL_SPARE, /* add a new hot spare */
VDEV_LABEL_REMOVE, /* remove an existing device */
VDEV_LABEL_L2CACHE, /* add an L2ARC cache device */
VDEV_LABEL_SPLIT /* generating new label for split-off dev */
} vdev_labeltype_t;
extern int vdev_label_init(vdev_t *vd, uint64_t txg, vdev_labeltype_t reason);
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_H */
diff --git a/include/sys/vdev_impl.h b/include/sys/vdev_impl.h
index 2ad3510ea368..b933f9ab8d6a 100644
--- a/include/sys/vdev_impl.h
+++ b/include/sys/vdev_impl.h
@@ -1,396 +1,489 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
*/
#ifndef _SYS_VDEV_IMPL_H
#define _SYS_VDEV_IMPL_H
#include <sys/avl.h>
+#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/metaslab.h>
#include <sys/nvpair.h>
#include <sys/space_map.h>
#include <sys/vdev.h>
#include <sys/dkio.h>
#include <sys/uberblock_impl.h>
+#include <sys/vdev_indirect_mapping.h>
+#include <sys/vdev_indirect_births.h>
+#include <sys/vdev_removal.h>
#include <sys/zfs_ratelimit.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Virtual device descriptors.
*
* All storage pool operations go through the virtual device framework,
* which provides data replication and I/O scheduling.
*/
/*
* Forward declarations that lots of things need.
*/
typedef struct vdev_queue vdev_queue_t;
typedef struct vdev_cache vdev_cache_t;
typedef struct vdev_cache_entry vdev_cache_entry_t;
struct abd;
extern int zfs_vdev_queue_depth_pct;
extern uint32_t zfs_vdev_async_write_max_active;
/*
* Virtual device operations
*/
typedef int vdev_open_func_t(vdev_t *vd, uint64_t *size, uint64_t *max_size,
uint64_t *ashift);
typedef void vdev_close_func_t(vdev_t *vd);
typedef uint64_t vdev_asize_func_t(vdev_t *vd, uint64_t psize);
typedef void vdev_io_start_func_t(zio_t *zio);
typedef void vdev_io_done_func_t(zio_t *zio);
typedef void vdev_state_change_func_t(vdev_t *vd, int, int);
typedef boolean_t vdev_need_resilver_func_t(vdev_t *vd, uint64_t, size_t);
typedef void vdev_hold_func_t(vdev_t *vd);
typedef void vdev_rele_func_t(vdev_t *vd);
+typedef void vdev_remap_cb_t(uint64_t inner_offset, vdev_t *vd,
+ uint64_t offset, uint64_t size, void *arg);
+typedef void vdev_remap_func_t(vdev_t *vd, uint64_t offset, uint64_t size,
+ vdev_remap_cb_t callback, void *arg);
+
typedef const struct vdev_ops {
vdev_open_func_t *vdev_op_open;
vdev_close_func_t *vdev_op_close;
vdev_asize_func_t *vdev_op_asize;
vdev_io_start_func_t *vdev_op_io_start;
vdev_io_done_func_t *vdev_op_io_done;
vdev_state_change_func_t *vdev_op_state_change;
vdev_need_resilver_func_t *vdev_op_need_resilver;
vdev_hold_func_t *vdev_op_hold;
vdev_rele_func_t *vdev_op_rele;
+ vdev_remap_func_t *vdev_op_remap;
char vdev_op_type[16];
boolean_t vdev_op_leaf;
} vdev_ops_t;
/*
* Virtual device properties
*/
struct vdev_cache_entry {
struct abd *ve_abd;
uint64_t ve_offset;
clock_t ve_lastused;
avl_node_t ve_offset_node;
avl_node_t ve_lastused_node;
uint32_t ve_hits;
uint16_t ve_missed_update;
zio_t *ve_fill_io;
};
struct vdev_cache {
avl_tree_t vc_offset_tree;
avl_tree_t vc_lastused_tree;
kmutex_t vc_lock;
};
typedef struct vdev_queue_class {
uint32_t vqc_active;
/*
* Sorted by offset or timestamp, depending on if the queue is
* LBA-ordered vs FIFO.
*/
avl_tree_t vqc_queued_tree;
} vdev_queue_class_t;
struct vdev_queue {
vdev_t *vq_vdev;
vdev_queue_class_t vq_class[ZIO_PRIORITY_NUM_QUEUEABLE];
avl_tree_t vq_active_tree;
avl_tree_t vq_read_offset_tree;
avl_tree_t vq_write_offset_tree;
uint64_t vq_last_offset;
hrtime_t vq_io_complete_ts; /* time last i/o completed */
hrtime_t vq_io_delta_ts;
zio_t vq_io_search; /* used as local for stack reduction */
kmutex_t vq_lock;
};
+/*
+ * On-disk indirect vdev state.
+ *
+ * An indirect vdev is described exclusively in the MOS config of a pool.
+ * The config for an indirect vdev includes several fields, which are
+ * accessed in memory by a vdev_indirect_config_t.
+ */
+typedef struct vdev_indirect_config {
+ /*
+ * Object (in MOS) which contains the indirect mapping. This object
+ * contains an array of vdev_indirect_mapping_entry_phys_t ordered by
+ * vimep_src. The bonus buffer for this object is a
+ * vdev_indirect_mapping_phys_t. This object is allocated when a vdev
+ * removal is initiated.
+ *
+ * Note that this object can be empty if none of the data on the vdev
+ * has been copied yet.
+ */
+ uint64_t vic_mapping_object;
+
+ /*
+ * Object (in MOS) which contains the birth times for the mapping
+ * entries. This object contains an array of
+ * vdev_indirect_birth_entry_phys_t sorted by vibe_offset. The bonus
+ * buffer for this object is a vdev_indirect_birth_phys_t. This object
+ * is allocated when a vdev removal is initiated.
+ *
+ * Note that this object can be empty if none of the vdev has yet been
+ * copied.
+ */
+ uint64_t vic_births_object;
+
+ /*
+ * This is the vdev ID which was removed previous to this vdev, or
+ * UINT64_MAX if there are no previously removed vdevs.
+ */
+ uint64_t vic_prev_indirect_vdev;
+} vdev_indirect_config_t;
+
/*
* Virtual device descriptor
*/
struct vdev {
/*
* Common to all vdev types.
*/
uint64_t vdev_id; /* child number in vdev parent */
uint64_t vdev_guid; /* unique ID for this vdev */
uint64_t vdev_guid_sum; /* self guid + all child guids */
uint64_t vdev_orig_guid; /* orig. guid prior to remove */
uint64_t vdev_asize; /* allocatable device capacity */
uint64_t vdev_min_asize; /* min acceptable asize */
uint64_t vdev_max_asize; /* max acceptable asize */
uint64_t vdev_ashift; /* block alignment shift */
uint64_t vdev_state; /* see VDEV_STATE_* #defines */
uint64_t vdev_prevstate; /* used when reopening a vdev */
vdev_ops_t *vdev_ops; /* vdev operations */
spa_t *vdev_spa; /* spa for this vdev */
void *vdev_tsd; /* type-specific data */
vnode_t *vdev_name_vp; /* vnode for pathname */
vnode_t *vdev_devid_vp; /* vnode for devid */
vdev_t *vdev_top; /* top-level vdev */
vdev_t *vdev_parent; /* parent vdev */
vdev_t **vdev_child; /* array of children */
uint64_t vdev_children; /* number of children */
vdev_stat_t vdev_stat; /* virtual device statistics */
vdev_stat_ex_t vdev_stat_ex; /* extended statistics */
boolean_t vdev_expanding; /* expand the vdev? */
boolean_t vdev_reopening; /* reopen in progress? */
boolean_t vdev_nonrot; /* true if solid state */
int vdev_open_error; /* error on last open */
kthread_t *vdev_open_thread; /* thread opening children */
uint64_t vdev_crtxg; /* txg when top-level was added */
/*
* Top-level vdev state.
*/
uint64_t vdev_ms_array; /* metaslab array object */
uint64_t vdev_ms_shift; /* metaslab size shift */
uint64_t vdev_ms_count; /* number of metaslabs */
metaslab_group_t *vdev_mg; /* metaslab group */
metaslab_t **vdev_ms; /* metaslab array */
uint64_t vdev_pending_fastwrite; /* allocated fastwrites */
txg_list_t vdev_ms_list; /* per-txg dirty metaslab lists */
txg_list_t vdev_dtl_list; /* per-txg dirty DTL lists */
txg_node_t vdev_txg_node; /* per-txg dirty vdev linkage */
boolean_t vdev_remove_wanted; /* async remove wanted? */
boolean_t vdev_probe_wanted; /* async probe wanted? */
list_node_t vdev_config_dirty_node; /* config dirty list */
list_node_t vdev_state_dirty_node; /* state dirty list */
uint64_t vdev_deflate_ratio; /* deflation ratio (x512) */
uint64_t vdev_islog; /* is an intent log device */
uint64_t vdev_removing; /* device is being removed? */
boolean_t vdev_ishole; /* is a hole in the namespace */
kmutex_t vdev_queue_lock; /* protects vdev_queue_depth */
uint64_t vdev_top_zap;
+ /*
+ * Values stored in the config for an indirect or removing vdev.
+ */
+ vdev_indirect_config_t vdev_indirect_config;
+
+ /*
+ * The vdev_indirect_rwlock protects the vdev_indirect_mapping
+ * pointer from changing on indirect vdevs (when it is condensed).
+ * Note that removing (not yet indirect) vdevs have different
+ * access patterns (the mapping is not accessed from open context,
+ * e.g. from zio_read) and locking strategy (e.g. svr_lock).
+ */
+ krwlock_t vdev_indirect_rwlock;
+ vdev_indirect_mapping_t *vdev_indirect_mapping;
+ vdev_indirect_births_t *vdev_indirect_births;
+
+ /*
+ * In memory data structures used to manage the obsolete sm, for
+ * indirect or removing vdevs.
+ *
+ * The vdev_obsolete_segments is the in-core record of the segments
+ * that are no longer referenced anywhere in the pool (due to
+ * being freed or remapped and not referenced by any snapshots).
+ * During a sync, segments are added to vdev_obsolete_segments
+ * via vdev_indirect_mark_obsolete(); at the end of each sync
+ * pass, this is appended to vdev_obsolete_sm via
+ * vdev_indirect_sync_obsolete(). The vdev_obsolete_lock
+ * protects against concurrent modifications of vdev_obsolete_segments
+ * from multiple zio threads.
+ */
+ kmutex_t vdev_obsolete_lock;
+ range_tree_t *vdev_obsolete_segments;
+ space_map_t *vdev_obsolete_sm;
+
/*
* The queue depth parameters determine how many async writes are
* still pending (i.e. allocated by net yet issued to disk) per
* top-level (vdev_async_write_queue_depth) and the maximum allowed
* (vdev_max_async_write_queue_depth). These values only apply to
* top-level vdevs.
*/
uint64_t vdev_async_write_queue_depth;
uint64_t vdev_max_async_write_queue_depth;
/*
* Protects the vdev_scan_io_queue field itself as well as the
* structure's contents (when present).
*/
kmutex_t vdev_scan_io_queue_lock;
struct dsl_scan_io_queue *vdev_scan_io_queue;
/*
* Leaf vdev state.
*/
range_tree_t *vdev_dtl[DTL_TYPES]; /* dirty time logs */
space_map_t *vdev_dtl_sm; /* dirty time log space map */
txg_node_t vdev_dtl_node; /* per-txg dirty DTL linkage */
uint64_t vdev_dtl_object; /* DTL object */
uint64_t vdev_psize; /* physical device capacity */
uint64_t vdev_wholedisk; /* true if this is a whole disk */
uint64_t vdev_offline; /* persistent offline state */
uint64_t vdev_faulted; /* persistent faulted state */
uint64_t vdev_degraded; /* persistent degraded state */
uint64_t vdev_removed; /* persistent removed state */
uint64_t vdev_resilver_txg; /* persistent resilvering state */
uint64_t vdev_nparity; /* number of parity devices for raidz */
char *vdev_path; /* vdev path (if any) */
char *vdev_devid; /* vdev devid (if any) */
char *vdev_physpath; /* vdev device path (if any) */
char *vdev_enc_sysfs_path; /* enclosure sysfs path */
char *vdev_fru; /* physical FRU location */
uint64_t vdev_not_present; /* not present during import */
uint64_t vdev_unspare; /* unspare when resilvering done */
boolean_t vdev_nowritecache; /* true if flushwritecache failed */
boolean_t vdev_checkremove; /* temporary online test */
boolean_t vdev_forcefault; /* force online fault */
boolean_t vdev_splitting; /* split or repair in progress */
boolean_t vdev_delayed_close; /* delayed device close? */
boolean_t vdev_tmpoffline; /* device taken offline temporarily? */
boolean_t vdev_detached; /* device detached? */
boolean_t vdev_cant_read; /* vdev is failing all reads */
boolean_t vdev_cant_write; /* vdev is failing all writes */
boolean_t vdev_isspare; /* was a hot spare */
boolean_t vdev_isl2cache; /* was a l2cache device */
boolean_t vdev_copy_uberblocks; /* post expand copy uberblocks */
vdev_queue_t vdev_queue; /* I/O deadline schedule queue */
vdev_cache_t vdev_cache; /* physical block cache */
spa_aux_vdev_t *vdev_aux; /* for l2cache and spares vdevs */
zio_t *vdev_probe_zio; /* root of current probe */
vdev_aux_t vdev_label_aux; /* on-disk aux state */
uint64_t vdev_leaf_zap;
hrtime_t vdev_mmp_pending; /* 0 if write finished */
uint64_t vdev_mmp_kstat_id; /* to find kstat entry */
/*
* For DTrace to work in userland (libzpool) context, these fields must
* remain at the end of the structure. DTrace will use the kernel's
* CTF definition for 'struct vdev', and since the size of a kmutex_t is
* larger in userland, the offsets for the rest of the fields would be
* incorrect.
*/
kmutex_t vdev_dtl_lock; /* vdev_dtl_{map,resilver} */
kmutex_t vdev_stat_lock; /* vdev_stat */
kmutex_t vdev_probe_lock; /* protects vdev_probe_zio */
/*
* We rate limit ZIO delay and ZIO checksum events, since they
* can flood ZED with tons of events when a drive is acting up.
*/
zfs_ratelimit_t vdev_delay_rl;
zfs_ratelimit_t vdev_checksum_rl;
};
#define VDEV_RAIDZ_MAXPARITY 3
#define VDEV_PAD_SIZE (8 << 10)
/* 2 padding areas (vl_pad1 and vl_pad2) to skip */
#define VDEV_SKIP_SIZE VDEV_PAD_SIZE * 2
#define VDEV_PHYS_SIZE (112 << 10)
#define VDEV_UBERBLOCK_RING (128 << 10)
/*
* MMP blocks occupy the last MMP_BLOCKS_PER_LABEL slots in the uberblock
* ring when MMP is enabled.
*/
#define MMP_BLOCKS_PER_LABEL 1
/* The largest uberblock we support is 8k. */
#define MAX_UBERBLOCK_SHIFT (13)
#define VDEV_UBERBLOCK_SHIFT(vd) \
MIN(MAX((vd)->vdev_top->vdev_ashift, UBERBLOCK_SHIFT), \
MAX_UBERBLOCK_SHIFT)
#define VDEV_UBERBLOCK_COUNT(vd) \
(VDEV_UBERBLOCK_RING >> VDEV_UBERBLOCK_SHIFT(vd))
#define VDEV_UBERBLOCK_OFFSET(vd, n) \
offsetof(vdev_label_t, vl_uberblock[(n) << VDEV_UBERBLOCK_SHIFT(vd)])
#define VDEV_UBERBLOCK_SIZE(vd) (1ULL << VDEV_UBERBLOCK_SHIFT(vd))
typedef struct vdev_phys {
char vp_nvlist[VDEV_PHYS_SIZE - sizeof (zio_eck_t)];
zio_eck_t vp_zbt;
} vdev_phys_t;
typedef struct vdev_label {
char vl_pad1[VDEV_PAD_SIZE]; /* 8K */
char vl_pad2[VDEV_PAD_SIZE]; /* 8K */
vdev_phys_t vl_vdev_phys; /* 112K */
char vl_uberblock[VDEV_UBERBLOCK_RING]; /* 128K */
} vdev_label_t; /* 256K total */
/*
* vdev_dirty() flags
*/
#define VDD_METASLAB 0x01
#define VDD_DTL 0x02
/* Offset of embedded boot loader region on each label */
#define VDEV_BOOT_OFFSET (2 * sizeof (vdev_label_t))
/*
* Size of embedded boot loader region on each label.
* The total size of the first two labels plus the boot area is 4MB.
*/
#define VDEV_BOOT_SIZE (7ULL << 19) /* 3.5M */
/*
* Size of label regions at the start and end of each leaf device.
*/
#define VDEV_LABEL_START_SIZE (2 * sizeof (vdev_label_t) + VDEV_BOOT_SIZE)
#define VDEV_LABEL_END_SIZE (2 * sizeof (vdev_label_t))
#define VDEV_LABELS 4
#define VDEV_BEST_LABEL VDEV_LABELS
#define VDEV_ALLOC_LOAD 0
#define VDEV_ALLOC_ADD 1
#define VDEV_ALLOC_SPARE 2
#define VDEV_ALLOC_L2CACHE 3
#define VDEV_ALLOC_ROOTPOOL 4
#define VDEV_ALLOC_SPLIT 5
#define VDEV_ALLOC_ATTACH 6
/*
* Allocate or free a vdev
*/
extern vdev_t *vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid,
vdev_ops_t *ops);
extern int vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *config,
vdev_t *parent, uint_t id, int alloctype);
extern void vdev_free(vdev_t *vd);
/*
* Add or remove children and parents
*/
extern void vdev_add_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_remove_child(vdev_t *pvd, vdev_t *cvd);
extern void vdev_compact_children(vdev_t *pvd);
extern vdev_t *vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops);
extern void vdev_remove_parent(vdev_t *cvd);
/*
* vdev sync load and sync
*/
extern void vdev_load_log_state(vdev_t *nvd, vdev_t *ovd);
extern boolean_t vdev_log_state_valid(vdev_t *vd);
-extern void vdev_load(vdev_t *vd);
+extern int vdev_load(vdev_t *vd);
extern int vdev_dtl_load(vdev_t *vd);
extern void vdev_sync(vdev_t *vd, uint64_t txg);
extern void vdev_sync_done(vdev_t *vd, uint64_t txg);
extern void vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg);
extern void vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg);
/*
* Available vdev types.
*/
extern vdev_ops_t vdev_root_ops;
extern vdev_ops_t vdev_mirror_ops;
extern vdev_ops_t vdev_replacing_ops;
extern vdev_ops_t vdev_raidz_ops;
extern vdev_ops_t vdev_disk_ops;
extern vdev_ops_t vdev_file_ops;
extern vdev_ops_t vdev_missing_ops;
extern vdev_ops_t vdev_hole_ops;
extern vdev_ops_t vdev_spare_ops;
+extern vdev_ops_t vdev_indirect_ops;
/*
* Common size functions
*/
extern uint64_t vdev_default_asize(vdev_t *vd, uint64_t psize);
extern uint64_t vdev_get_min_asize(vdev_t *vd);
extern void vdev_set_min_asize(vdev_t *vd);
/*
* Global variables
*/
/* zdb uses this tunable, so it must be declared here to make lint happy. */
extern int zfs_vdev_cache_size;
+/*
+ * Functions from vdev_indirect.c
+ */
+extern void vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx);
+extern boolean_t vdev_indirect_should_condense(vdev_t *vd);
+extern void spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx);
+extern int vdev_obsolete_sm_object(vdev_t *vd);
+extern boolean_t vdev_obsolete_counts_are_precise(vdev_t *vd);
+
#ifdef __cplusplus
}
#endif
#endif /* _SYS_VDEV_IMPL_H */
diff --git a/include/sys/vdev_indirect_births.h b/include/sys/vdev_indirect_births.h
new file mode 100644
index 000000000000..987b14485d2b
--- /dev/null
+++ b/include/sys/vdev_indirect_births.h
@@ -0,0 +1,80 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2015 by Delphix. All rights reserved.
+ */
+
+#ifndef _SYS_VDEV_INDIRECT_BIRTHS_H
+#define _SYS_VDEV_INDIRECT_BIRTHS_H
+
+#include <sys/dmu.h>
+#include <sys/spa.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct vdev_indirect_birth_entry_phys {
+ uint64_t vibe_offset;
+ uint64_t vibe_phys_birth_txg;
+} vdev_indirect_birth_entry_phys_t;
+
+typedef struct vdev_indirect_birth_phys {
+ uint64_t vib_count; /* count of v_i_b_entry_phys_t's */
+} vdev_indirect_birth_phys_t;
+
+typedef struct vdev_indirect_births {
+ uint64_t vib_object;
+
+ /*
+ * Each entry indicates that everything up to but not including
+ * vibe_offset was copied in vibe_phys_birth_txg. Entries are sorted
+ * by increasing phys_birth, and also by increasing offset. See
+ * vdev_indirect_births_physbirth for usage.
+ */
+ vdev_indirect_birth_entry_phys_t *vib_entries;
+
+ objset_t *vib_objset;
+
+ dmu_buf_t *vib_dbuf;
+ vdev_indirect_birth_phys_t *vib_phys;
+} vdev_indirect_births_t;
+
+extern vdev_indirect_births_t *vdev_indirect_births_open(objset_t *os,
+ uint64_t object);
+extern void vdev_indirect_births_close(vdev_indirect_births_t *vib);
+extern boolean_t vdev_indirect_births_is_open(vdev_indirect_births_t *vib);
+extern uint64_t vdev_indirect_births_alloc(objset_t *os, dmu_tx_t *tx);
+extern void vdev_indirect_births_free(objset_t *os, uint64_t object,
+ dmu_tx_t *tx);
+
+extern uint64_t vdev_indirect_births_count(vdev_indirect_births_t *vib);
+extern uint64_t vdev_indirect_births_object(vdev_indirect_births_t *vib);
+
+extern void vdev_indirect_births_add_entry(vdev_indirect_births_t *vib,
+ uint64_t offset, uint64_t txg, dmu_tx_t *tx);
+
+extern uint64_t vdev_indirect_births_physbirth(vdev_indirect_births_t *vib,
+ uint64_t offset, uint64_t asize);
+
+extern uint64_t vdev_indirect_births_last_entry_txg(
+ vdev_indirect_births_t *vib);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_VDEV_INDIRECT_BIRTHS_H */
diff --git a/include/sys/vdev_indirect_mapping.h b/include/sys/vdev_indirect_mapping.h
new file mode 100644
index 000000000000..7e42c1019504
--- /dev/null
+++ b/include/sys/vdev_indirect_mapping.h
@@ -0,0 +1,141 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2015 by Delphix. All rights reserved.
+ */
+
+#ifndef _SYS_VDEV_INDIRECT_MAPPING_H
+#define _SYS_VDEV_INDIRECT_MAPPING_H
+
+#include <sys/dmu.h>
+#include <sys/list.h>
+#include <sys/spa.h>
+#include <sys/space_map.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct vdev_indirect_mapping_entry_phys {
+ /*
+ * Decode with DVA_MAPPING_* macros.
+ * Contains:
+ * the source offset (low 63 bits)
+ * the one-bit "mark", used for garbage collection (by zdb)
+ */
+ uint64_t vimep_src;
+
+ /*
+ * Note: the DVA's asize is 24 bits, and can thus store ranges
+ * up to 8GB.
+ */
+ dva_t vimep_dst;
+} vdev_indirect_mapping_entry_phys_t;
+
+#define DVA_MAPPING_GET_SRC_OFFSET(vimep) \
+ BF64_GET_SB((vimep)->vimep_src, 0, 63, SPA_MINBLOCKSHIFT, 0)
+#define DVA_MAPPING_SET_SRC_OFFSET(vimep, x) \
+ BF64_SET_SB((vimep)->vimep_src, 0, 63, SPA_MINBLOCKSHIFT, 0, x)
+
+typedef struct vdev_indirect_mapping_entry {
+ vdev_indirect_mapping_entry_phys_t vime_mapping;
+ uint32_t vime_obsolete_count;
+ list_node_t vime_node;
+} vdev_indirect_mapping_entry_t;
+
+/*
+ * This is stored in the bonus buffer of the mapping object, see comment of
+ * vdev_indirect_config for more details.
+ */
+typedef struct vdev_indirect_mapping_phys {
+ uint64_t vimp_max_offset;
+ uint64_t vimp_bytes_mapped;
+ uint64_t vimp_num_entries; /* number of v_i_m_entry_phys_t's */
+
+ /*
+ * For each entry in the mapping object, this object contains an
+ * entry representing the number of bytes of that mapping entry
+ * that were no longer in use by the pool at the time this indirect
+ * vdev was last condensed.
+ */
+ uint64_t vimp_counts_object;
+} vdev_indirect_mapping_phys_t;
+
+#define VDEV_INDIRECT_MAPPING_SIZE_V0 (3 * sizeof (uint64_t))
+
+typedef struct vdev_indirect_mapping {
+ uint64_t vim_object;
+ boolean_t vim_havecounts;
+
+ /*
+ * An ordered array of all mapping entries, sorted by source offset.
+ * Note that vim_entries is needed during a removal (and contains
+ * mappings that have been synced to disk so far) to handle frees
+ * from the removing device.
+ */
+ vdev_indirect_mapping_entry_phys_t *vim_entries;
+
+ objset_t *vim_objset;
+
+ dmu_buf_t *vim_dbuf;
+ vdev_indirect_mapping_phys_t *vim_phys;
+} vdev_indirect_mapping_t;
+
+extern vdev_indirect_mapping_t *vdev_indirect_mapping_open(objset_t *os,
+ uint64_t object);
+extern void vdev_indirect_mapping_close(vdev_indirect_mapping_t *vim);
+extern uint64_t vdev_indirect_mapping_alloc(objset_t *os, dmu_tx_t *tx);
+extern void vdev_indirect_mapping_free(objset_t *os, uint64_t obj,
+ dmu_tx_t *tx);
+
+extern uint64_t vdev_indirect_mapping_num_entries(vdev_indirect_mapping_t *vim);
+extern uint64_t vdev_indirect_mapping_max_offset(vdev_indirect_mapping_t *vim);
+extern uint64_t vdev_indirect_mapping_object(vdev_indirect_mapping_t *vim);
+extern uint64_t vdev_indirect_mapping_bytes_mapped(
+ vdev_indirect_mapping_t *vim);
+extern uint64_t vdev_indirect_mapping_size(vdev_indirect_mapping_t *vim);
+
+/*
+ * Writes the given list of vdev_indirect_mapping_entry_t to the mapping
+ * then updates internal state.
+ */
+extern void vdev_indirect_mapping_add_entries(vdev_indirect_mapping_t *vim,
+ list_t *vime_list, dmu_tx_t *tx);
+
+extern vdev_indirect_mapping_entry_phys_t *
+ vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim,
+ uint64_t offset);
+
+extern vdev_indirect_mapping_entry_phys_t *
+ vdev_indirect_mapping_entry_for_offset_or_next(vdev_indirect_mapping_t *vim,
+ uint64_t offset);
+
+extern uint32_t *vdev_indirect_mapping_load_obsolete_counts(
+ vdev_indirect_mapping_t *vim);
+extern void vdev_indirect_mapping_load_obsolete_spacemap(
+ vdev_indirect_mapping_t *vim,
+ uint32_t *counts, space_map_t *obsolete_space_sm);
+extern void vdev_indirect_mapping_increment_obsolete_count(
+ vdev_indirect_mapping_t *vim,
+ uint64_t offset, uint64_t asize, uint32_t *counts);
+extern void vdev_indirect_mapping_free_obsolete_counts(
+ vdev_indirect_mapping_t *vim, uint32_t *counts);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_VDEV_INDIRECT_MAPPING_H */
diff --git a/include/sys/vdev_removal.h b/include/sys/vdev_removal.h
new file mode 100644
index 000000000000..5b1e3056be14
--- /dev/null
+++ b/include/sys/vdev_removal.h
@@ -0,0 +1,93 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
+ */
+
+#ifndef _SYS_VDEV_REMOVAL_H
+#define _SYS_VDEV_REMOVAL_H
+
+#include <sys/spa.h>
+#include <sys/bpobj.h>
+#include <sys/vdev_indirect_mapping.h>
+#include <sys/vdev_indirect_births.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+typedef struct spa_vdev_removal {
+ vdev_t *svr_vdev;
+ uint64_t svr_max_offset_to_sync[TXG_SIZE];
+ /* Thread performing a vdev removal. */
+ kthread_t *svr_thread;
+ /* Segments left to copy from the current metaslab. */
+ range_tree_t *svr_allocd_segs;
+ kmutex_t svr_lock;
+ kcondvar_t svr_cv;
+ boolean_t svr_thread_exit;
+
+ /*
+ * New mappings to write out each txg.
+ */
+ list_t svr_new_segments[TXG_SIZE];
+
+ /*
+ * Ranges that were freed while a mapping was in flight. This is
+ * a subset of the ranges covered by vdev_im_new_segments.
+ */
+ range_tree_t *svr_frees[TXG_SIZE];
+
+ /*
+ * Number of bytes which we have finished our work for
+ * in each txg. This could be data copied (which will be part of
+ * the mappings in vdev_im_new_segments), or data freed before
+ * we got around to copying it.
+ */
+ uint64_t svr_bytes_done[TXG_SIZE];
+
+ /* List of leaf zap objects to be unlinked */
+ nvlist_t *svr_zaplist;
+} spa_vdev_removal_t;
+
+typedef struct spa_condensing_indirect {
+ /*
+ * New mappings to write out each txg.
+ */
+ list_t sci_new_mapping_entries[TXG_SIZE];
+
+ vdev_indirect_mapping_t *sci_new_mapping;
+} spa_condensing_indirect_t;
+
+extern int spa_remove_init(spa_t *);
+extern void spa_restart_removal(spa_t *);
+extern int spa_condense_init(spa_t *);
+extern void spa_condense_fini(spa_t *);
+extern void spa_condense_indirect_restart(spa_t *);
+extern void spa_vdev_condense_suspend(spa_t *);
+extern int spa_vdev_remove(spa_t *, uint64_t, boolean_t);
+extern void free_from_removing_vdev(vdev_t *, uint64_t, uint64_t, uint64_t);
+extern int spa_removal_get_stats(spa_t *, pool_removal_stat_t *);
+extern void svr_sync(spa_t *spa, dmu_tx_t *tx);
+extern void spa_vdev_remove_suspend(spa_t *);
+extern int spa_vdev_remove_cancel(spa_t *);
+extern void spa_vdev_removal_destroy(spa_vdev_removal_t *svr);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SYS_VDEV_REMOVAL_H */
diff --git a/include/sys/zfs_debug.h b/include/sys/zfs_debug.h
index 7e05c2a35f83..91b161ff86aa 100644
--- a/include/sys/zfs_debug.h
+++ b/include/sys/zfs_debug.h
@@ -1,97 +1,98 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2016 by Delphix. All rights reserved.
*/
#ifndef _SYS_ZFS_DEBUG_H
#define _SYS_ZFS_DEBUG_H
#ifdef __cplusplus
extern "C" {
#endif
#ifndef TRUE
#define TRUE 1
#endif
#ifndef FALSE
#define FALSE 0
#endif
extern int zfs_flags;
extern int zfs_recover;
extern int zfs_free_leak_on_eio;
extern int zfs_dbgmsg_enable;
#define ZFS_DEBUG_DPRINTF (1 << 0)
#define ZFS_DEBUG_DBUF_VERIFY (1 << 1)
#define ZFS_DEBUG_DNODE_VERIFY (1 << 2)
#define ZFS_DEBUG_SNAPNAMES (1 << 3)
#define ZFS_DEBUG_MODIFY (1 << 4)
#define ZFS_DEBUG_SPA (1 << 5)
#define ZFS_DEBUG_ZIO_FREE (1 << 6)
#define ZFS_DEBUG_HISTOGRAM_VERIFY (1 << 7)
#define ZFS_DEBUG_METASLAB_VERIFY (1 << 8)
#define ZFS_DEBUG_SET_ERROR (1 << 9)
+#define ZFS_DEBUG_INDIRECT_REMAP (1 << 10)
extern void __dprintf(const char *file, const char *func,
int line, const char *fmt, ...);
#define zfs_dbgmsg(...) \
if (zfs_dbgmsg_enable) \
__dprintf(__FILE__, __func__, __LINE__, __VA_ARGS__)
#ifdef ZFS_DEBUG
/*
* To enable this:
*
* $ echo 1 >/sys/module/zfs/parameters/zfs_flags
*/
#define dprintf(...) \
if (zfs_flags & ZFS_DEBUG_DPRINTF) \
__dprintf(__FILE__, __func__, __LINE__, __VA_ARGS__)
#else
#define dprintf(...) ((void)0)
#endif /* ZFS_DEBUG */
extern void zfs_panic_recover(const char *fmt, ...);
typedef struct zfs_dbgmsg {
list_node_t zdm_node;
time_t zdm_timestamp;
int zdm_size;
char zdm_msg[1]; /* variable length allocation */
} zfs_dbgmsg_t;
extern void zfs_dbgmsg_init(void);
extern void zfs_dbgmsg_fini(void);
#ifndef _KERNEL
extern int dprintf_find_string(const char *string);
extern void zfs_dbgmsg_print(const char *tag);
#endif
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ZFS_DEBUG_H */
diff --git a/include/sys/zil.h b/include/sys/zil.h
index fc0cc506592e..fb7b38a066f4 100644
--- a/include/sys/zil.h
+++ b/include/sys/zil.h
@@ -1,524 +1,524 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#ifndef _SYS_ZIL_H
#define _SYS_ZIL_H
#include <sys/types.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu.h>
#include <sys/zio_crypt.h>
#ifdef __cplusplus
extern "C" {
#endif
struct dsl_pool;
struct dsl_dataset;
struct lwb;
/*
* Intent log format:
*
* Each objset has its own intent log. The log header (zil_header_t)
* for objset N's intent log is kept in the Nth object of the SPA's
* intent_log objset. The log header points to a chain of log blocks,
* each of which contains log records (i.e., transactions) followed by
* a log block trailer (zil_trailer_t). The format of a log record
* depends on the record (or transaction) type, but all records begin
* with a common structure that defines the type, length, and txg.
*/
/*
* Intent log header - this on disk structure holds fields to manage
* the log. All fields are 64 bit to easily handle cross architectures.
*/
typedef struct zil_header {
uint64_t zh_claim_txg; /* txg in which log blocks were claimed */
uint64_t zh_replay_seq; /* highest replayed sequence number */
blkptr_t zh_log; /* log chain */
uint64_t zh_claim_blk_seq; /* highest claimed block sequence number */
uint64_t zh_flags; /* header flags */
uint64_t zh_claim_lr_seq; /* highest claimed lr sequence number */
uint64_t zh_pad[3];
} zil_header_t;
/*
* zh_flags bit settings
*/
#define ZIL_REPLAY_NEEDED 0x1 /* replay needed - internal only */
#define ZIL_CLAIM_LR_SEQ_VALID 0x2 /* zh_claim_lr_seq field is valid */
/*
* Log block chaining.
*
* Log blocks are chained together. Originally they were chained at the
* end of the block. For performance reasons the chain was moved to the
* beginning of the block which allows writes for only the data being used.
* The older position is supported for backwards compatability.
*
* The zio_eck_t contains a zec_cksum which for the intent log is
* the sequence number of this log block. A seq of 0 is invalid.
* The zec_cksum is checked by the SPA against the sequence
* number passed in the blk_cksum field of the blkptr_t
*/
typedef struct zil_chain {
uint64_t zc_pad;
blkptr_t zc_next_blk; /* next block in chain */
uint64_t zc_nused; /* bytes in log block used */
zio_eck_t zc_eck; /* block trailer */
} zil_chain_t;
#define ZIL_MIN_BLKSZ 4096ULL
/*
* ziltest is by and large an ugly hack, but very useful in
* checking replay without tedious work.
* When running ziltest we want to keep all itx's and so maintain
* a single list in the zl_itxg[] that uses a high txg: ZILTEST_TXG
* We subtract TXG_CONCURRENT_STATES to allow for common code.
*/
#define ZILTEST_TXG (UINT64_MAX - TXG_CONCURRENT_STATES)
/*
* The words of a log block checksum.
*/
#define ZIL_ZC_GUID_0 0
#define ZIL_ZC_GUID_1 1
#define ZIL_ZC_OBJSET 2
#define ZIL_ZC_SEQ 3
typedef enum zil_create {
Z_FILE,
Z_DIR,
Z_XATTRDIR,
} zil_create_t;
/*
* size of xvattr log section.
* its composed of lr_attr_t + xvattr bitmap + 2 64 bit timestamps
* for create time and a single 64 bit integer for all of the attributes,
* and 4 64 bit integers (32 bytes) for the scanstamp.
*
*/
#define ZIL_XVAT_SIZE(mapsize) \
sizeof (lr_attr_t) + (sizeof (uint32_t) * (mapsize - 1)) + \
(sizeof (uint64_t) * 7)
/*
* Size of ACL in log. The ACE data is padded out to properly align
* on 8 byte boundary.
*/
#define ZIL_ACE_LENGTH(x) (roundup(x, sizeof (uint64_t)))
/*
* Intent log transaction types and record structures
*/
#define TX_COMMIT 0 /* Commit marker (no on-disk state) */
#define TX_CREATE 1 /* Create file */
#define TX_MKDIR 2 /* Make directory */
#define TX_MKXATTR 3 /* Make XATTR directory */
#define TX_SYMLINK 4 /* Create symbolic link to a file */
#define TX_REMOVE 5 /* Remove file */
#define TX_RMDIR 6 /* Remove directory */
#define TX_LINK 7 /* Create hard link to a file */
#define TX_RENAME 8 /* Rename a file */
#define TX_WRITE 9 /* File write */
#define TX_TRUNCATE 10 /* Truncate a file */
#define TX_SETATTR 11 /* Set file attributes */
#define TX_ACL_V0 12 /* Set old formatted ACL */
#define TX_ACL 13 /* Set ACL */
#define TX_CREATE_ACL 14 /* create with ACL */
#define TX_CREATE_ATTR 15 /* create + attrs */
#define TX_CREATE_ACL_ATTR 16 /* create with ACL + attrs */
#define TX_MKDIR_ACL 17 /* mkdir with ACL */
#define TX_MKDIR_ATTR 18 /* mkdir with attr */
#define TX_MKDIR_ACL_ATTR 19 /* mkdir with ACL + attrs */
#define TX_WRITE2 20 /* dmu_sync EALREADY write */
#define TX_MAX_TYPE 21 /* Max transaction type */
/*
* The transactions for mkdir, symlink, remove, rmdir, link, and rename
* may have the following bit set, indicating the original request
* specified case-insensitive handling of names.
*/
#define TX_CI ((uint64_t)0x1 << 63) /* case-insensitive behavior requested */
/*
* Transactions for write, truncate, setattr, acl_v0, and acl can be logged
* out of order. For convenience in the code, all such records must have
* lr_foid at the same offset.
*/
#define TX_OOO(txtype) \
((txtype) == TX_WRITE || \
(txtype) == TX_TRUNCATE || \
(txtype) == TX_SETATTR || \
(txtype) == TX_ACL_V0 || \
(txtype) == TX_ACL || \
(txtype) == TX_WRITE2)
/*
* The number of dnode slots consumed by the object is stored in the 8
* unused upper bits of the object ID. We subtract 1 from the value
* stored on disk for compatibility with implementations that don't
* support large dnodes. The slot count for a single-slot dnode will
* contain 0 for those bits to preserve the log record format for
* "small" dnodes.
*/
#define LR_FOID_GET_SLOTS(oid) (BF64_GET((oid), 56, 8) + 1)
#define LR_FOID_SET_SLOTS(oid, x) BF64_SET((oid), 56, 8, (x) - 1)
#define LR_FOID_GET_OBJ(oid) BF64_GET((oid), 0, DN_MAX_OBJECT_SHIFT)
#define LR_FOID_SET_OBJ(oid, x) BF64_SET((oid), 0, DN_MAX_OBJECT_SHIFT, (x))
/*
* Format of log records.
* The fields are carefully defined to allow them to be aligned
* and sized the same on sparc & intel architectures.
* Each log record has a common structure at the beginning.
*
* The log record on disk (lrc_seq) holds the sequence number of all log
* records which is used to ensure we don't replay the same record.
*/
typedef struct { /* common log record header */
uint64_t lrc_txtype; /* intent log transaction type */
uint64_t lrc_reclen; /* transaction record length */
uint64_t lrc_txg; /* dmu transaction group number */
uint64_t lrc_seq; /* see comment above */
} lr_t;
/*
* Common start of all out-of-order record types (TX_OOO() above).
*/
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* object id */
} lr_ooo_t;
/*
* Handle option extended vattr attributes.
*
* Whenever new attributes are added the version number
* will need to be updated as will code in
* zfs_log.c and zfs_replay.c
*/
typedef struct {
uint32_t lr_attr_masksize; /* number of elements in array */
uint32_t lr_attr_bitmap; /* First entry of array */
/* remainder of array and any additional fields */
} lr_attr_t;
/*
* log record for creates without optional ACL.
* This log record does support optional xvattr_t attributes.
*/
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_doid; /* object id of directory */
uint64_t lr_foid; /* object id of created file object */
uint64_t lr_mode; /* mode of object */
uint64_t lr_uid; /* uid of object */
uint64_t lr_gid; /* gid of object */
uint64_t lr_gen; /* generation (txg of creation) */
uint64_t lr_crtime[2]; /* creation time */
uint64_t lr_rdev; /* rdev of object to create */
/* name of object to create follows this */
/* for symlinks, link content follows name */
/* for creates with xvattr data, the name follows the xvattr info */
} lr_create_t;
/*
* FUID ACL record will be an array of ACEs from the original ACL.
* If this array includes ephemeral IDs, the record will also include
* an array of log-specific FUIDs to replace the ephemeral IDs.
* Only one copy of each unique domain will be present, so the log-specific
* FUIDs will use an index into a compressed domain table. On replay this
* information will be used to construct real FUIDs (and bypass idmap,
* since it may not be available).
*/
/*
* Log record for creates with optional ACL
* This log record is also used for recording any FUID
* information needed for replaying the create. If the
* file doesn't have any actual ACEs then the lr_aclcnt
* would be zero.
*
* After lr_acl_flags, there are a lr_acl_bytes number of variable sized ace's.
* If create is also setting xvattr's, then acl data follows xvattr.
* If ACE FUIDs are needed then they will follow the xvattr_t. Following
* the FUIDs will be the domain table information. The FUIDs for the owner
* and group will be in lr_create. Name follows ACL data.
*/
typedef struct {
lr_create_t lr_create; /* common create portion */
uint64_t lr_aclcnt; /* number of ACEs in ACL */
uint64_t lr_domcnt; /* number of unique domains */
uint64_t lr_fuidcnt; /* number of real fuids */
uint64_t lr_acl_bytes; /* number of bytes in ACL */
uint64_t lr_acl_flags; /* ACL flags */
} lr_acl_create_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_doid; /* obj id of directory */
/* name of object to remove follows this */
} lr_remove_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_doid; /* obj id of directory */
uint64_t lr_link_obj; /* obj id of link */
/* name of object to link follows this */
} lr_link_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_sdoid; /* obj id of source directory */
uint64_t lr_tdoid; /* obj id of target directory */
/* 2 strings: names of source and destination follow this */
} lr_rename_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* file object to write */
uint64_t lr_offset; /* offset to write to */
uint64_t lr_length; /* user data length to write */
uint64_t lr_blkoff; /* no longer used */
blkptr_t lr_blkptr; /* spa block pointer for replay */
/* write data will follow for small writes */
} lr_write_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* object id of file to truncate */
uint64_t lr_offset; /* offset to truncate from */
uint64_t lr_length; /* length to truncate */
} lr_truncate_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* file object to change attributes */
uint64_t lr_mask; /* mask of attributes to set */
uint64_t lr_mode; /* mode to set */
uint64_t lr_uid; /* uid to set */
uint64_t lr_gid; /* gid to set */
uint64_t lr_size; /* size to set */
uint64_t lr_atime[2]; /* access time */
uint64_t lr_mtime[2]; /* modification time */
/* optional attribute lr_attr_t may be here */
} lr_setattr_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* obj id of file */
uint64_t lr_aclcnt; /* number of acl entries */
/* lr_aclcnt number of ace_t entries follow this */
} lr_acl_v0_t;
typedef struct {
lr_t lr_common; /* common portion of log record */
uint64_t lr_foid; /* obj id of file */
uint64_t lr_aclcnt; /* number of ACEs in ACL */
uint64_t lr_domcnt; /* number of unique domains */
uint64_t lr_fuidcnt; /* number of real fuids */
uint64_t lr_acl_bytes; /* number of bytes in ACL */
uint64_t lr_acl_flags; /* ACL flags */
/* lr_acl_bytes number of variable sized ace's follows */
} lr_acl_t;
/*
* ZIL structure definitions, interface function prototype and globals.
*/
/*
* Writes are handled in three different ways:
*
* WR_INDIRECT:
* In this mode, if we need to commit the write later, then the block
* is immediately written into the file system (using dmu_sync),
* and a pointer to the block is put into the log record.
* When the txg commits the block is linked in.
* This saves additionally writing the data into the log record.
* There are a few requirements for this to occur:
* - write is greater than zfs/zvol_immediate_write_sz
* - not using slogs (as slogs are assumed to always be faster
* than writing into the main pool)
* - the write occupies only one block
* WR_COPIED:
* If we know we'll immediately be committing the
* transaction (FSYNC or FDSYNC), then we allocate a larger
* log record here for the data and copy the data in.
* WR_NEED_COPY:
* Otherwise we don't allocate a buffer, and *if* we need to
* flush the write later then a buffer is allocated and
* we retrieve the data using the dmu.
*/
typedef enum {
WR_INDIRECT, /* indirect - a large write (dmu_sync() data */
/* and put blkptr in log, rather than actual data) */
WR_COPIED, /* immediate - data is copied into lr_write_t */
WR_NEED_COPY, /* immediate - data needs to be copied if pushed */
WR_NUM_STATES /* number of states */
} itx_wr_state_t;
typedef void (*zil_callback_t)(void *data);
typedef struct itx {
list_node_t itx_node; /* linkage on zl_itx_list */
void *itx_private; /* type-specific opaque data */
itx_wr_state_t itx_wr_state; /* write state */
uint8_t itx_sync; /* synchronous transaction */
zil_callback_t itx_callback; /* Called when the itx is persistent */
void *itx_callback_data; /* User data for the callback */
size_t itx_size; /* allocated itx structure size */
uint64_t itx_oid; /* object id */
lr_t itx_lr; /* common part of log record */
/* followed by type-specific part of lr_xx_t and its immediate data */
} itx_t;
/*
* Used for zil kstat.
*/
typedef struct zil_stats {
/*
* Number of times a ZIL commit (e.g. fsync) has been requested.
*/
kstat_named_t zil_commit_count;
/*
* Number of times the ZIL has been flushed to stable storage.
* This is less than zil_commit_count when commits are "merged"
* (see the documentation above zil_commit()).
*/
kstat_named_t zil_commit_writer_count;
/*
* Number of transactions (reads, writes, renames, etc.)
* that have been commited.
*/
kstat_named_t zil_itx_count;
/*
* See the documentation for itx_wr_state_t above.
* Note that "bytes" accumulates the length of the transactions
* (i.e. data), not the actual log record sizes.
*/
kstat_named_t zil_itx_indirect_count;
kstat_named_t zil_itx_indirect_bytes;
kstat_named_t zil_itx_copied_count;
kstat_named_t zil_itx_copied_bytes;
kstat_named_t zil_itx_needcopy_count;
kstat_named_t zil_itx_needcopy_bytes;
/*
* Transactions which have been allocated to the "normal"
* (i.e. not slog) storage pool. Note that "bytes" accumulate
* the actual log record sizes - which do not include the actual
* data in case of indirect writes.
*/
kstat_named_t zil_itx_metaslab_normal_count;
kstat_named_t zil_itx_metaslab_normal_bytes;
/*
* Transactions which have been allocated to the "slog" storage pool.
* If there are no separate log devices, this is the same as the
* "normal" pool.
*/
kstat_named_t zil_itx_metaslab_slog_count;
kstat_named_t zil_itx_metaslab_slog_bytes;
} zil_stats_t;
extern zil_stats_t zil_stats;
#define ZIL_STAT_INCR(stat, val) \
atomic_add_64(&zil_stats.stat.value.ui64, (val));
#define ZIL_STAT_BUMP(stat) \
ZIL_STAT_INCR(stat, 1);
typedef int zil_parse_blk_func_t(zilog_t *zilog, blkptr_t *bp, void *arg,
uint64_t txg);
typedef int zil_parse_lr_func_t(zilog_t *zilog, lr_t *lr, void *arg,
uint64_t txg);
typedef int zil_replay_func_t(void *arg1, void *arg2, boolean_t byteswap);
typedef int zil_get_data_t(void *arg, lr_write_t *lr, char *dbuf,
struct lwb *lwb, zio_t *zio);
extern int zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt);
extern void zil_init(void);
extern void zil_fini(void);
extern zilog_t *zil_alloc(objset_t *os, zil_header_t *zh_phys);
extern void zil_free(zilog_t *zilog);
extern zilog_t *zil_open(objset_t *os, zil_get_data_t *get_data);
extern void zil_close(zilog_t *zilog);
extern void zil_replay(objset_t *os, void *arg,
zil_replay_func_t *replay_func[TX_MAX_TYPE]);
extern boolean_t zil_replaying(zilog_t *zilog, dmu_tx_t *tx);
extern void zil_destroy(zilog_t *zilog, boolean_t keep_first);
extern void zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx);
extern itx_t *zil_itx_create(uint64_t txtype, size_t lrsize);
extern void zil_itx_destroy(itx_t *itx);
extern void zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx);
extern void zil_commit(zilog_t *zilog, uint64_t oid);
extern void zil_commit_impl(zilog_t *zilog, uint64_t oid);
-extern int zil_vdev_offline(const char *osname, void *txarg);
+extern int zil_reset(const char *osname, void *txarg);
extern int zil_claim(struct dsl_pool *dp,
struct dsl_dataset *ds, void *txarg);
extern int zil_check_log_chain(struct dsl_pool *dp,
struct dsl_dataset *ds, void *tx);
extern void zil_sync(zilog_t *zilog, dmu_tx_t *tx);
extern void zil_clean(zilog_t *zilog, uint64_t synced_txg);
extern int zil_suspend(const char *osname, void **cookiep);
extern void zil_resume(void *cookie);
extern void zil_lwb_add_block(struct lwb *lwb, const blkptr_t *bp);
extern void zil_lwb_add_txg(struct lwb *lwb, uint64_t txg);
extern int zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp);
extern void zil_set_sync(zilog_t *zilog, uint64_t syncval);
extern void zil_set_logbias(zilog_t *zilog, uint64_t slogval);
extern int zil_replay_disable;
#ifdef __cplusplus
}
#endif
#endif /* _SYS_ZIL_H */
diff --git a/include/sys/zio.h b/include/sys/zio.h
index be8e18b4bba7..a275b16de9cc 100644
--- a/include/sys/zio.h
+++ b/include/sys/zio.h
@@ -1,678 +1,681 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright 2016 Toomas Soome <tsoome@me.com>
*/
#ifndef _ZIO_H
#define _ZIO_H
#include <sys/zio_priority.h>
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/fs/zfs.h>
#include <sys/zio_impl.h>
#ifdef __cplusplus
extern "C" {
#endif
/*
* Embedded checksum
*/
#define ZEC_MAGIC 0x210da7ab10c7a11ULL
typedef struct zio_eck {
uint64_t zec_magic; /* for validation, endianness */
zio_cksum_t zec_cksum; /* 256-bit checksum */
} zio_eck_t;
/*
* Gang block headers are self-checksumming and contain an array
* of block pointers.
*/
#define SPA_GANGBLOCKSIZE SPA_MINBLOCKSIZE
#define SPA_GBH_NBLKPTRS ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t)) / sizeof (blkptr_t))
#define SPA_GBH_FILLER ((SPA_GANGBLOCKSIZE - \
sizeof (zio_eck_t) - \
(SPA_GBH_NBLKPTRS * sizeof (blkptr_t))) /\
sizeof (uint64_t))
typedef struct zio_gbh {
blkptr_t zg_blkptr[SPA_GBH_NBLKPTRS];
uint64_t zg_filler[SPA_GBH_FILLER];
zio_eck_t zg_tail;
} zio_gbh_phys_t;
enum zio_checksum {
ZIO_CHECKSUM_INHERIT = 0,
ZIO_CHECKSUM_ON,
ZIO_CHECKSUM_OFF,
ZIO_CHECKSUM_LABEL,
ZIO_CHECKSUM_GANG_HEADER,
ZIO_CHECKSUM_ZILOG,
ZIO_CHECKSUM_FLETCHER_2,
ZIO_CHECKSUM_FLETCHER_4,
ZIO_CHECKSUM_SHA256,
ZIO_CHECKSUM_ZILOG2,
ZIO_CHECKSUM_NOPARITY,
ZIO_CHECKSUM_SHA512,
ZIO_CHECKSUM_SKEIN,
ZIO_CHECKSUM_EDONR,
ZIO_CHECKSUM_FUNCTIONS
};
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_CHECKSUM_LEGACY_FUNCTIONS ZIO_CHECKSUM_ZILOG2
#define ZIO_CHECKSUM_ON_VALUE ZIO_CHECKSUM_FLETCHER_4
#define ZIO_CHECKSUM_DEFAULT ZIO_CHECKSUM_ON
#define ZIO_CHECKSUM_MASK 0xffULL
#define ZIO_CHECKSUM_VERIFY (1 << 8)
#define ZIO_DEDUPCHECKSUM ZIO_CHECKSUM_SHA256
#define ZIO_DEDUPDITTO_MIN 100
/* supported encryption algorithms */
enum zio_encrypt {
ZIO_CRYPT_INHERIT = 0,
ZIO_CRYPT_ON,
ZIO_CRYPT_OFF,
ZIO_CRYPT_AES_128_CCM,
ZIO_CRYPT_AES_192_CCM,
ZIO_CRYPT_AES_256_CCM,
ZIO_CRYPT_AES_128_GCM,
ZIO_CRYPT_AES_192_GCM,
ZIO_CRYPT_AES_256_GCM,
ZIO_CRYPT_FUNCTIONS
};
#define ZIO_CRYPT_ON_VALUE ZIO_CRYPT_AES_256_CCM
#define ZIO_CRYPT_DEFAULT ZIO_CRYPT_OFF
/* macros defining encryption lengths */
#define ZIO_OBJSET_MAC_LEN 32
#define ZIO_DATA_IV_LEN 12
#define ZIO_DATA_SALT_LEN 8
#define ZIO_DATA_MAC_LEN 16
/*
* The number of "legacy" compression functions which can be set on individual
* objects.
*/
#define ZIO_COMPRESS_LEGACY_FUNCTIONS ZIO_COMPRESS_LZ4
/*
* The meaning of "compress = on" selected by the compression features enabled
* on a given pool.
*/
#define ZIO_COMPRESS_LEGACY_ON_VALUE ZIO_COMPRESS_LZJB
#define ZIO_COMPRESS_LZ4_ON_VALUE ZIO_COMPRESS_LZ4
#define ZIO_COMPRESS_DEFAULT ZIO_COMPRESS_OFF
#define BOOTFS_COMPRESS_VALID(compress) \
((compress) == ZIO_COMPRESS_LZJB || \
(compress) == ZIO_COMPRESS_LZ4 || \
(compress) == ZIO_COMPRESS_GZIP_1 || \
(compress) == ZIO_COMPRESS_GZIP_2 || \
(compress) == ZIO_COMPRESS_GZIP_3 || \
(compress) == ZIO_COMPRESS_GZIP_4 || \
(compress) == ZIO_COMPRESS_GZIP_5 || \
(compress) == ZIO_COMPRESS_GZIP_6 || \
(compress) == ZIO_COMPRESS_GZIP_7 || \
(compress) == ZIO_COMPRESS_GZIP_8 || \
(compress) == ZIO_COMPRESS_GZIP_9 || \
(compress) == ZIO_COMPRESS_ZLE || \
(compress) == ZIO_COMPRESS_ON || \
(compress) == ZIO_COMPRESS_OFF)
/*
* Default Linux timeout for a sd device.
*/
#define ZIO_DELAY_MAX (30 * MILLISEC)
#define ZIO_FAILURE_MODE_WAIT 0
#define ZIO_FAILURE_MODE_CONTINUE 1
#define ZIO_FAILURE_MODE_PANIC 2
typedef enum zio_suspend_reason {
ZIO_SUSPEND_NONE = 0,
ZIO_SUSPEND_IOERR,
ZIO_SUSPEND_MMP,
} zio_suspend_reason_t;
enum zio_flag {
/*
* Flags inherited by gang, ddt, and vdev children,
* and that must be equal for two zios to aggregate
*/
ZIO_FLAG_DONT_AGGREGATE = 1 << 0,
ZIO_FLAG_IO_REPAIR = 1 << 1,
ZIO_FLAG_SELF_HEAL = 1 << 2,
ZIO_FLAG_RESILVER = 1 << 3,
ZIO_FLAG_SCRUB = 1 << 4,
ZIO_FLAG_SCAN_THREAD = 1 << 5,
ZIO_FLAG_PHYSICAL = 1 << 6,
#define ZIO_FLAG_AGG_INHERIT (ZIO_FLAG_CANFAIL - 1)
/*
* Flags inherited by ddt, gang, and vdev children.
*/
ZIO_FLAG_CANFAIL = 1 << 7, /* must be first for INHERIT */
ZIO_FLAG_SPECULATIVE = 1 << 8,
ZIO_FLAG_CONFIG_WRITER = 1 << 9,
ZIO_FLAG_DONT_RETRY = 1 << 10,
ZIO_FLAG_DONT_CACHE = 1 << 11,
ZIO_FLAG_NODATA = 1 << 12,
ZIO_FLAG_INDUCE_DAMAGE = 1 << 13,
ZIO_FLAG_IO_ALLOCATING = 1 << 14,
#define ZIO_FLAG_DDT_INHERIT (ZIO_FLAG_IO_RETRY - 1)
#define ZIO_FLAG_GANG_INHERIT (ZIO_FLAG_IO_RETRY - 1)
/*
* Flags inherited by vdev children.
*/
ZIO_FLAG_IO_RETRY = 1 << 15, /* must be first for INHERIT */
ZIO_FLAG_PROBE = 1 << 16,
ZIO_FLAG_TRYHARD = 1 << 17,
ZIO_FLAG_OPTIONAL = 1 << 18,
#define ZIO_FLAG_VDEV_INHERIT (ZIO_FLAG_DONT_QUEUE - 1)
/*
* Flags not inherited by any children.
*/
ZIO_FLAG_DONT_QUEUE = 1 << 19, /* must be first for INHERIT */
ZIO_FLAG_DONT_PROPAGATE = 1 << 20,
ZIO_FLAG_IO_BYPASS = 1 << 21,
ZIO_FLAG_IO_REWRITE = 1 << 22,
ZIO_FLAG_RAW_COMPRESS = 1 << 23,
ZIO_FLAG_RAW_ENCRYPT = 1 << 24,
ZIO_FLAG_GANG_CHILD = 1 << 25,
ZIO_FLAG_DDT_CHILD = 1 << 26,
ZIO_FLAG_GODFATHER = 1 << 27,
ZIO_FLAG_NOPWRITE = 1 << 28,
ZIO_FLAG_REEXECUTED = 1 << 29,
ZIO_FLAG_DELEGATED = 1 << 30,
ZIO_FLAG_FASTWRITE = 1 << 31,
};
#define ZIO_FLAG_MUSTSUCCEED 0
#define ZIO_FLAG_RAW (ZIO_FLAG_RAW_COMPRESS | ZIO_FLAG_RAW_ENCRYPT)
#define ZIO_DDT_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_DDT_INHERIT) | \
ZIO_FLAG_DDT_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_GANG_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_GANG_INHERIT) | \
ZIO_FLAG_GANG_CHILD | ZIO_FLAG_CANFAIL)
#define ZIO_VDEV_CHILD_FLAGS(zio) \
(((zio)->io_flags & ZIO_FLAG_VDEV_INHERIT) | \
- ZIO_FLAG_CANFAIL)
+ ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_CANFAIL)
#define ZIO_CHILD_BIT(x) (1 << (x))
#define ZIO_CHILD_BIT_IS_SET(val, x) ((val) & (1 << (x)))
enum zio_child {
ZIO_CHILD_VDEV = 0,
ZIO_CHILD_GANG,
ZIO_CHILD_DDT,
ZIO_CHILD_LOGICAL,
ZIO_CHILD_TYPES
};
#define ZIO_CHILD_VDEV_BIT ZIO_CHILD_BIT(ZIO_CHILD_VDEV)
#define ZIO_CHILD_GANG_BIT ZIO_CHILD_BIT(ZIO_CHILD_GANG)
#define ZIO_CHILD_DDT_BIT ZIO_CHILD_BIT(ZIO_CHILD_DDT)
#define ZIO_CHILD_LOGICAL_BIT ZIO_CHILD_BIT(ZIO_CHILD_LOGICAL)
#define ZIO_CHILD_ALL_BITS \
(ZIO_CHILD_VDEV_BIT | ZIO_CHILD_GANG_BIT | \
ZIO_CHILD_DDT_BIT | ZIO_CHILD_LOGICAL_BIT)
enum zio_wait_type {
ZIO_WAIT_READY = 0,
ZIO_WAIT_DONE,
ZIO_WAIT_TYPES
};
/*
* We'll take the unused errnos, 'EBADE' and 'EBADR' (from the Convergent
* graveyard) to indicate checksum errors and fragmentation.
*/
#define ECKSUM EBADE
#define EFRAGS EBADR
+/* Similar for ENOACTIVE */
+#define ENOTACTIVE ENOANO
+
typedef void zio_done_func_t(zio_t *zio);
extern int zio_dva_throttle_enabled;
extern const char *zio_type_name[ZIO_TYPES];
/*
* A bookmark is a four-tuple <objset, object, level, blkid> that uniquely
* identifies any block in the pool. By convention, the meta-objset (MOS)
* is objset 0, and the meta-dnode is object 0. This covers all blocks
* except root blocks and ZIL blocks, which are defined as follows:
*
* Root blocks (objset_phys_t) are object 0, level -1: <objset, 0, -1, 0>.
* ZIL blocks are bookmarked <objset, 0, -2, blkid == ZIL sequence number>.
* dmu_sync()ed ZIL data blocks are bookmarked <objset, object, -2, blkid>.
* dnode visit bookmarks are <objset, object id of dnode, -3, 0>.
*
* Note: this structure is called a bookmark because its original purpose
* was to remember where to resume a pool-wide traverse.
*
* Note: this structure is passed between userland and the kernel, and is
* stored on disk (by virtue of being incorporated into other on-disk
* structures, e.g. dsl_scan_phys_t).
*/
struct zbookmark_phys {
uint64_t zb_objset;
uint64_t zb_object;
int64_t zb_level;
uint64_t zb_blkid;
};
#define SET_BOOKMARK(zb, objset, object, level, blkid) \
{ \
(zb)->zb_objset = objset; \
(zb)->zb_object = object; \
(zb)->zb_level = level; \
(zb)->zb_blkid = blkid; \
}
#define ZB_DESTROYED_OBJSET (-1ULL)
#define ZB_ROOT_OBJECT (0ULL)
#define ZB_ROOT_LEVEL (-1LL)
#define ZB_ROOT_BLKID (0ULL)
#define ZB_ZIL_OBJECT (0ULL)
#define ZB_ZIL_LEVEL (-2LL)
#define ZB_DNODE_LEVEL (-3LL)
#define ZB_DNODE_BLKID (0ULL)
#define ZB_IS_ZERO(zb) \
((zb)->zb_objset == 0 && (zb)->zb_object == 0 && \
(zb)->zb_level == 0 && (zb)->zb_blkid == 0)
#define ZB_IS_ROOT(zb) \
((zb)->zb_object == ZB_ROOT_OBJECT && \
(zb)->zb_level == ZB_ROOT_LEVEL && \
(zb)->zb_blkid == ZB_ROOT_BLKID)
typedef struct zio_prop {
enum zio_checksum zp_checksum;
enum zio_compress zp_compress;
dmu_object_type_t zp_type;
uint8_t zp_level;
uint8_t zp_copies;
boolean_t zp_dedup;
boolean_t zp_dedup_verify;
boolean_t zp_nopwrite;
boolean_t zp_encrypt;
boolean_t zp_byteorder;
uint8_t zp_salt[ZIO_DATA_SALT_LEN];
uint8_t zp_iv[ZIO_DATA_IV_LEN];
uint8_t zp_mac[ZIO_DATA_MAC_LEN];
} zio_prop_t;
typedef struct zio_cksum_report zio_cksum_report_t;
typedef void zio_cksum_finish_f(zio_cksum_report_t *rep,
const abd_t *good_data);
typedef void zio_cksum_free_f(void *cbdata, size_t size);
struct zio_bad_cksum; /* defined in zio_checksum.h */
struct dnode_phys;
struct abd;
struct zio_cksum_report {
struct zio_cksum_report *zcr_next;
nvlist_t *zcr_ereport;
nvlist_t *zcr_detector;
void *zcr_cbdata;
size_t zcr_cbinfo; /* passed to zcr_free() */
uint64_t zcr_align;
uint64_t zcr_length;
zio_cksum_finish_f *zcr_finish;
zio_cksum_free_f *zcr_free;
/* internal use only */
struct zio_bad_cksum *zcr_ckinfo; /* information from failure */
};
typedef void zio_vsd_cksum_report_f(zio_t *zio, zio_cksum_report_t *zcr,
void *arg);
zio_vsd_cksum_report_f zio_vsd_default_cksum_report;
typedef struct zio_vsd_ops {
zio_done_func_t *vsd_free;
zio_vsd_cksum_report_f *vsd_cksum_report;
} zio_vsd_ops_t;
typedef struct zio_gang_node {
zio_gbh_phys_t *gn_gbh;
struct zio_gang_node *gn_child[SPA_GBH_NBLKPTRS];
} zio_gang_node_t;
typedef zio_t *zio_gang_issue_func_t(zio_t *zio, blkptr_t *bp,
zio_gang_node_t *gn, struct abd *data, uint64_t offset);
typedef void zio_transform_func_t(zio_t *zio, struct abd *data, uint64_t size);
typedef struct zio_transform {
struct abd *zt_orig_abd;
uint64_t zt_orig_size;
uint64_t zt_bufsize;
zio_transform_func_t *zt_transform;
struct zio_transform *zt_next;
} zio_transform_t;
typedef int zio_pipe_stage_t(zio_t *zio);
/*
* The io_reexecute flags are distinct from io_flags because the child must
* be able to propagate them to the parent. The normal io_flags are local
* to the zio, not protected by any lock, and not modifiable by children;
* the reexecute flags are protected by io_lock, modifiable by children,
* and always propagated -- even when ZIO_FLAG_DONT_PROPAGATE is set.
*/
#define ZIO_REEXECUTE_NOW 0x01
#define ZIO_REEXECUTE_SUSPEND 0x02
typedef struct zio_alloc_list {
list_t zal_list;
uint64_t zal_size;
} zio_alloc_list_t;
typedef struct zio_link {
zio_t *zl_parent;
zio_t *zl_child;
list_node_t zl_parent_node;
list_node_t zl_child_node;
} zio_link_t;
struct zio {
/* Core information about this I/O */
zbookmark_phys_t io_bookmark;
zio_prop_t io_prop;
zio_type_t io_type;
enum zio_child io_child_type;
int io_cmd;
zio_priority_t io_priority;
uint8_t io_reexecute;
uint8_t io_state[ZIO_WAIT_TYPES];
uint64_t io_txg;
spa_t *io_spa;
blkptr_t *io_bp;
blkptr_t *io_bp_override;
blkptr_t io_bp_copy;
list_t io_parent_list;
list_t io_child_list;
zio_t *io_logical;
zio_transform_t *io_transform_stack;
/* Callback info */
zio_done_func_t *io_ready;
zio_done_func_t *io_children_ready;
zio_done_func_t *io_physdone;
zio_done_func_t *io_done;
void *io_private;
int64_t io_prev_space_delta; /* DMU private */
blkptr_t io_bp_orig;
/* io_lsize != io_orig_size iff this is a raw write */
uint64_t io_lsize;
/* Data represented by this I/O */
struct abd *io_abd;
struct abd *io_orig_abd;
uint64_t io_size;
uint64_t io_orig_size;
/* Stuff for the vdev stack */
vdev_t *io_vd;
void *io_vsd;
const zio_vsd_ops_t *io_vsd_ops;
uint64_t io_offset;
hrtime_t io_timestamp; /* submitted at */
hrtime_t io_queued_timestamp;
hrtime_t io_target_timestamp;
hrtime_t io_delta; /* vdev queue service delta */
hrtime_t io_delay; /* Device access time (disk or */
/* file). */
avl_node_t io_queue_node;
avl_node_t io_offset_node;
avl_node_t io_alloc_node;
zio_alloc_list_t io_alloc_list;
/* Internal pipeline state */
enum zio_flag io_flags;
enum zio_stage io_stage;
enum zio_stage io_pipeline;
enum zio_flag io_orig_flags;
enum zio_stage io_orig_stage;
enum zio_stage io_orig_pipeline;
enum zio_stage io_pipeline_trace;
int io_error;
int io_child_error[ZIO_CHILD_TYPES];
uint64_t io_children[ZIO_CHILD_TYPES][ZIO_WAIT_TYPES];
uint64_t io_child_count;
uint64_t io_phys_children;
uint64_t io_parent_count;
uint64_t *io_stall;
zio_t *io_gang_leader;
zio_gang_node_t *io_gang_tree;
void *io_executor;
void *io_waiter;
kmutex_t io_lock;
kcondvar_t io_cv;
/* FMA state */
zio_cksum_report_t *io_cksum_report;
uint64_t io_ena;
/* Taskq dispatching state */
taskq_ent_t io_tqent;
};
extern int zio_bookmark_compare(const void *, const void *);
extern zio_t *zio_null(zio_t *pio, spa_t *spa, vdev_t *vd,
zio_done_func_t *done, void *private, enum zio_flag flags);
extern zio_t *zio_root(spa_t *spa,
zio_done_func_t *done, void *private, enum zio_flag flags);
extern zio_t *zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
struct abd *data, uint64_t lsize, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb);
extern zio_t *zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *private, zio_priority_t priority, enum zio_flag flags,
const zbookmark_phys_t *zb);
extern zio_t *zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
struct abd *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb);
extern void zio_write_override(zio_t *zio, blkptr_t *bp, int copies,
boolean_t nopwrite);
extern void zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp);
extern zio_t *zio_claim(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp,
zio_done_func_t *done, void *private, enum zio_flag flags);
extern zio_t *zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *private, enum zio_flag flags);
extern zio_t *zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *private, zio_priority_t priority,
enum zio_flag flags, boolean_t labels);
extern zio_t *zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset,
uint64_t size, struct abd *data, int checksum,
zio_done_func_t *done, void *private, zio_priority_t priority,
enum zio_flag flags, boolean_t labels);
extern zio_t *zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg,
const blkptr_t *bp, enum zio_flag flags);
extern int zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg,
blkptr_t *new_bp, uint64_t size, boolean_t *slog);
extern void zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp);
extern void zio_flush(zio_t *zio, vdev_t *vd);
extern void zio_shrink(zio_t *zio, uint64_t size);
extern int zio_wait(zio_t *zio);
extern void zio_nowait(zio_t *zio);
extern void zio_execute(zio_t *zio);
extern void zio_interrupt(zio_t *zio);
extern void zio_delay_init(zio_t *zio);
extern void zio_delay_interrupt(zio_t *zio);
extern void zio_deadman(zio_t *zio, char *tag);
extern zio_t *zio_walk_parents(zio_t *cio, zio_link_t **);
extern zio_t *zio_walk_children(zio_t *pio, zio_link_t **);
extern zio_t *zio_unique_parent(zio_t *cio);
extern void zio_add_child(zio_t *pio, zio_t *cio);
extern void *zio_buf_alloc(size_t size);
extern void zio_buf_free(void *buf, size_t size);
extern void *zio_data_buf_alloc(size_t size);
extern void zio_data_buf_free(void *buf, size_t size);
extern void zio_push_transform(zio_t *zio, struct abd *abd, uint64_t size,
uint64_t bufsize, zio_transform_func_t *transform);
extern void zio_pop_transforms(zio_t *zio);
extern void zio_resubmit_stage_async(void *);
extern zio_t *zio_vdev_child_io(zio_t *zio, blkptr_t *bp, vdev_t *vd,
uint64_t offset, struct abd *data, uint64_t size, int type,
zio_priority_t priority, enum zio_flag flags,
zio_done_func_t *done, void *private);
extern zio_t *zio_vdev_delegated_io(vdev_t *vd, uint64_t offset,
struct abd *data, uint64_t size, int type, zio_priority_t priority,
enum zio_flag flags, zio_done_func_t *done, void *private);
extern void zio_vdev_io_bypass(zio_t *zio);
extern void zio_vdev_io_reissue(zio_t *zio);
extern void zio_vdev_io_redone(zio_t *zio);
extern void zio_change_priority(zio_t *pio, zio_priority_t priority);
extern void zio_checksum_verified(zio_t *zio);
extern int zio_worst_error(int e1, int e2);
extern enum zio_checksum zio_checksum_select(enum zio_checksum child,
enum zio_checksum parent);
extern enum zio_checksum zio_checksum_dedup_select(spa_t *spa,
enum zio_checksum child, enum zio_checksum parent);
extern enum zio_compress zio_compress_select(spa_t *spa,
enum zio_compress child, enum zio_compress parent);
extern void zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t);
extern int zio_resume(spa_t *spa);
extern void zio_resume_wait(spa_t *spa);
/*
* Initial setup and teardown.
*/
extern void zio_init(void);
extern void zio_fini(void);
/*
* Fault injection
*/
struct zinject_record;
extern uint32_t zio_injection_enabled;
extern int zio_inject_fault(char *name, int flags, int *id,
struct zinject_record *record);
extern int zio_inject_list_next(int *id, char *name, size_t buflen,
struct zinject_record *record);
extern int zio_clear_fault(int id);
extern void zio_handle_panic_injection(spa_t *spa, char *tag, uint64_t type);
extern int zio_handle_fault_injection(zio_t *zio, int error);
extern int zio_handle_device_injection(vdev_t *vd, zio_t *zio, int error);
extern int zio_handle_device_injections(vdev_t *vd, zio_t *zio, int err1,
int err2);
extern int zio_handle_label_injection(zio_t *zio, int error);
extern void zio_handle_ignored_writes(zio_t *zio);
extern hrtime_t zio_handle_io_delay(zio_t *zio);
/*
* Checksum ereport functions
*/
extern void zfs_ereport_start_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, void *arg, struct zio_bad_cksum *info);
extern void zfs_ereport_finish_checksum(zio_cksum_report_t *report,
const abd_t *good_data, const abd_t *bad_data, boolean_t drop_if_identical);
extern void zfs_ereport_free_checksum(zio_cksum_report_t *report);
/* If we have the good data in hand, this function can be used */
extern void zfs_ereport_post_checksum(spa_t *spa, vdev_t *vd,
const zbookmark_phys_t *zb, struct zio *zio, uint64_t offset,
uint64_t length, const abd_t *good_data, const abd_t *bad_data,
struct zio_bad_cksum *info);
/* Called from spa_sync(), but primarily an injection handler */
extern void spa_handle_ignored_writes(spa_t *spa);
/* zbookmark_phys functions */
boolean_t zbookmark_subtree_completed(const struct dnode_phys *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block);
int zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2,
uint8_t ibs2, const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2);
#ifdef __cplusplus
}
#endif
#endif /* _ZIO_H */
diff --git a/include/sys/zio_priority.h b/include/sys/zio_priority.h
index 3fc3589be0c1..c2cc8b2d5975 100644
--- a/include/sys/zio_priority.h
+++ b/include/sys/zio_priority.h
@@ -1,39 +1,40 @@
/*
* CDDL HEADER START
*
* This file and its contents are supplied under the terms of the
* Common Development and Distribution License ("CDDL"), version 1.0.
* You may only use this file in accordance with the terms of version
* 1.0 of the CDDL.
*
* A full copy of the text of the CDDL should have accompanied this
* source. A copy of the CDDL is also available via the Internet at
* http://www.illumos.org/license/CDDL.
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2014 by Delphix. All rights reserved.
*/
#ifndef _ZIO_PRIORITY_H
#define _ZIO_PRIORITY_H
#ifdef __cplusplus
extern "C" {
#endif
typedef enum zio_priority {
ZIO_PRIORITY_SYNC_READ,
ZIO_PRIORITY_SYNC_WRITE, /* ZIL */
ZIO_PRIORITY_ASYNC_READ, /* prefetch */
ZIO_PRIORITY_ASYNC_WRITE, /* spa_sync() */
ZIO_PRIORITY_SCRUB, /* asynchronous scrub/resilver reads */
+ ZIO_PRIORITY_REMOVAL, /* reads/writes for vdev removal */
ZIO_PRIORITY_NUM_QUEUEABLE,
ZIO_PRIORITY_NOW, /* non-queued i/os (e.g. free) */
} zio_priority_t;
#ifdef __cplusplus
}
#endif
#endif /* _ZIO_PRIORITY_H */
diff --git a/include/zfeature_common.h b/include/zfeature_common.h
index 3afa64b11dbc..13670c8e5123 100644
--- a/include/zfeature_common.h
+++ b/include/zfeature_common.h
@@ -1,106 +1,108 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
*/
#ifndef _ZFEATURE_COMMON_H
#define _ZFEATURE_COMMON_H
#include <sys/fs/zfs.h>
#include <sys/inttypes.h>
#include <sys/types.h>
#ifdef __cplusplus
extern "C" {
#endif
struct zfeature_info;
typedef enum spa_feature {
SPA_FEATURE_NONE = -1,
SPA_FEATURE_ASYNC_DESTROY,
SPA_FEATURE_EMPTY_BPOBJ,
SPA_FEATURE_LZ4_COMPRESS,
SPA_FEATURE_MULTI_VDEV_CRASH_DUMP,
SPA_FEATURE_SPACEMAP_HISTOGRAM,
SPA_FEATURE_ENABLED_TXG,
SPA_FEATURE_HOLE_BIRTH,
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_EMBEDDED_DATA,
SPA_FEATURE_BOOKMARKS,
SPA_FEATURE_FS_SS_LIMIT,
SPA_FEATURE_LARGE_BLOCKS,
SPA_FEATURE_LARGE_DNODE,
SPA_FEATURE_SHA512,
SPA_FEATURE_SKEIN,
SPA_FEATURE_EDONR,
SPA_FEATURE_USEROBJ_ACCOUNTING,
SPA_FEATURE_ENCRYPTION,
SPA_FEATURE_PROJECT_QUOTA,
+ SPA_FEATURE_DEVICE_REMOVAL,
+ SPA_FEATURE_OBSOLETE_COUNTS,
SPA_FEATURES
} spa_feature_t;
#define SPA_FEATURE_DISABLED (-1ULL)
typedef enum zfeature_flags {
/* Can open pool readonly even if this feature is not supported. */
ZFEATURE_FLAG_READONLY_COMPAT = (1 << 0),
/* Is this feature necessary to read the MOS? */
ZFEATURE_FLAG_MOS = (1 << 1),
/* Activate this feature at the same time it is enabled. */
ZFEATURE_FLAG_ACTIVATE_ON_ENABLE = (1 << 2),
/* Each dataset has a field set if it has ever used this feature. */
ZFEATURE_FLAG_PER_DATASET = (1 << 3)
} zfeature_flags_t;
typedef struct zfeature_info {
spa_feature_t fi_feature;
const char *fi_uname; /* User-facing feature name */
const char *fi_guid; /* On-disk feature identifier */
const char *fi_desc; /* Feature description */
zfeature_flags_t fi_flags;
/* array of dependencies, terminated by SPA_FEATURE_NONE */
const spa_feature_t *fi_depends;
} zfeature_info_t;
typedef int (zfeature_func_t)(zfeature_info_t *, void *);
#define ZFS_FEATURE_DEBUG
extern zfeature_info_t spa_feature_table[SPA_FEATURES];
extern boolean_t zfeature_is_valid_guid(const char *);
extern boolean_t zfeature_is_supported(const char *);
extern int zfeature_lookup_name(const char *, spa_feature_t *);
extern boolean_t zfeature_depends_on(spa_feature_t, spa_feature_t);
extern void zpool_feature_init(void);
#ifdef __cplusplus
}
#endif
#endif /* _ZFEATURE_COMMON_H */
diff --git a/include/zfs_deleg.h b/include/zfs_deleg.h
index e18849ebbaf8..32d66980e574 100644
--- a/include/zfs_deleg.h
+++ b/include/zfs_deleg.h
@@ -1,99 +1,100 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2010 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
*/
#ifndef _ZFS_DELEG_H
#define _ZFS_DELEG_H
#include <sys/fs/zfs.h>
#ifdef __cplusplus
extern "C" {
#endif
#define ZFS_DELEG_SET_NAME_CHR '@' /* set name lead char */
#define ZFS_DELEG_FIELD_SEP_CHR '$' /* field separator */
/*
* Max name length for a delegation attribute
*/
#define ZFS_MAX_DELEG_NAME 128
#define ZFS_DELEG_LOCAL 'l'
#define ZFS_DELEG_DESCENDENT 'd'
#define ZFS_DELEG_NA '-'
typedef enum {
ZFS_DELEG_NOTE_CREATE,
ZFS_DELEG_NOTE_DESTROY,
ZFS_DELEG_NOTE_SNAPSHOT,
ZFS_DELEG_NOTE_ROLLBACK,
ZFS_DELEG_NOTE_CLONE,
ZFS_DELEG_NOTE_PROMOTE,
ZFS_DELEG_NOTE_RENAME,
ZFS_DELEG_NOTE_SEND,
ZFS_DELEG_NOTE_RECEIVE,
ZFS_DELEG_NOTE_ALLOW,
ZFS_DELEG_NOTE_USERPROP,
ZFS_DELEG_NOTE_MOUNT,
ZFS_DELEG_NOTE_SHARE,
ZFS_DELEG_NOTE_USERQUOTA,
ZFS_DELEG_NOTE_GROUPQUOTA,
ZFS_DELEG_NOTE_USERUSED,
ZFS_DELEG_NOTE_GROUPUSED,
ZFS_DELEG_NOTE_USEROBJQUOTA,
ZFS_DELEG_NOTE_GROUPOBJQUOTA,
ZFS_DELEG_NOTE_USEROBJUSED,
ZFS_DELEG_NOTE_GROUPOBJUSED,
ZFS_DELEG_NOTE_HOLD,
ZFS_DELEG_NOTE_RELEASE,
ZFS_DELEG_NOTE_DIFF,
ZFS_DELEG_NOTE_BOOKMARK,
ZFS_DELEG_NOTE_LOAD_KEY,
ZFS_DELEG_NOTE_CHANGE_KEY,
ZFS_DELEG_NOTE_PROJECTUSED,
ZFS_DELEG_NOTE_PROJECTQUOTA,
ZFS_DELEG_NOTE_PROJECTOBJUSED,
ZFS_DELEG_NOTE_PROJECTOBJQUOTA,
+ ZFS_DELEG_NOTE_REMAP,
ZFS_DELEG_NOTE_NONE
} zfs_deleg_note_t;
typedef struct zfs_deleg_perm_tab {
char *z_perm;
zfs_deleg_note_t z_note;
} zfs_deleg_perm_tab_t;
extern zfs_deleg_perm_tab_t zfs_deleg_perm_tab[];
int zfs_deleg_verify_nvlist(nvlist_t *nvlist);
void zfs_deleg_whokey(char *attr, zfs_deleg_who_type_t type,
char checkflag, void *data);
const char *zfs_deleg_canonicalize_perm(const char *perm);
#ifdef __cplusplus
}
#endif
#endif /* _ZFS_DELEG_H */
diff --git a/lib/libzfs/libzfs_dataset.c b/lib/libzfs/libzfs_dataset.c
index 65834b6d1710..3eb3aebbc6bc 100644
--- a/lib/libzfs/libzfs_dataset.c
+++ b/lib/libzfs/libzfs_dataset.c
@@ -1,5280 +1,5298 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2012 DEY Storage Systems, Inc. All rights reserved.
* Copyright (c) 2012 Pawel Jakub Dawidek <pawel@dawidek.net>.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright 2017 RackTop Systems.
*/
#include <ctype.h>
#include <errno.h>
#include <libintl.h>
#include <math.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <stddef.h>
#include <zone.h>
#include <fcntl.h>
#include <sys/mntent.h>
#include <sys/mount.h>
#include <pwd.h>
#include <grp.h>
#include <stddef.h>
#include <ucred.h>
#ifdef HAVE_IDMAP
#include <idmap.h>
#include <aclutils.h>
#include <directory.h>
#endif /* HAVE_IDMAP */
#include <sys/dnode.h>
#include <sys/spa.h>
#include <sys/zap.h>
#include <sys/dsl_crypt.h>
#include <libzfs.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "libzfs.h"
#include "zfs_deleg.h"
static int userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp);
/*
* Given a single type (not a mask of types), return the type in a human
* readable form.
*/
const char *
zfs_type_to_name(zfs_type_t type)
{
switch (type) {
case ZFS_TYPE_FILESYSTEM:
return (dgettext(TEXT_DOMAIN, "filesystem"));
case ZFS_TYPE_SNAPSHOT:
return (dgettext(TEXT_DOMAIN, "snapshot"));
case ZFS_TYPE_VOLUME:
return (dgettext(TEXT_DOMAIN, "volume"));
case ZFS_TYPE_POOL:
return (dgettext(TEXT_DOMAIN, "pool"));
case ZFS_TYPE_BOOKMARK:
return (dgettext(TEXT_DOMAIN, "bookmark"));
default:
assert(!"unhandled zfs_type_t");
}
return (NULL);
}
/*
* Validate a ZFS path. This is used even before trying to open the dataset, to
* provide a more meaningful error message. We call zfs_error_aux() to
* explain exactly why the name was not valid.
*/
int
zfs_validate_name(libzfs_handle_t *hdl, const char *path, int type,
boolean_t modifying)
{
namecheck_err_t why;
char what;
if (entity_namecheck(path, &why, &what) != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is too long"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in name"), what);
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool doesn't begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"reserved disk name"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (0);
}
if (!(type & ZFS_TYPE_SNAPSHOT) && strchr(path, '@') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshot delimiter '@' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_SNAPSHOT && strchr(path, '@') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '@' delimiter in snapshot name"));
return (0);
}
if (!(type & ZFS_TYPE_BOOKMARK) && strchr(path, '#') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bookmark delimiter '#' is not expected here"));
return (0);
}
if (type == ZFS_TYPE_BOOKMARK && strchr(path, '#') == NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing '#' delimiter in bookmark name"));
return (0);
}
if (modifying && strchr(path, '%') != NULL) {
if (hdl != NULL)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid character %c in name"), '%');
return (0);
}
return (-1);
}
int
zfs_name_valid(const char *name, zfs_type_t type)
{
if (type == ZFS_TYPE_POOL)
return (zpool_name_valid(NULL, B_FALSE, name));
return (zfs_validate_name(NULL, name, type, B_FALSE));
}
/*
* This function takes the raw DSL properties, and filters out the user-defined
* properties into a separate nvlist.
*/
static nvlist_t *
process_user_props(zfs_handle_t *zhp, nvlist_t *props)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvpair_t *elem;
nvlist_t *propval;
nvlist_t *nvl;
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
if (!zfs_prop_user(nvpair_name(elem)))
continue;
verify(nvpair_value_nvlist(elem, &propval) == 0);
if (nvlist_add_nvlist(nvl, nvpair_name(elem), propval) != 0) {
nvlist_free(nvl);
(void) no_memory(hdl);
return (NULL);
}
}
return (nvl);
}
static zpool_handle_t *
zpool_add_handle(zfs_handle_t *zhp, const char *pool_name)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph;
if ((zph = zpool_open_canfail(hdl, pool_name)) != NULL) {
if (hdl->libzfs_pool_handles != NULL)
zph->zpool_next = hdl->libzfs_pool_handles;
hdl->libzfs_pool_handles = zph;
}
return (zph);
}
static zpool_handle_t *
zpool_find_handle(zfs_handle_t *zhp, const char *pool_name, int len)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zpool_handle_t *zph = hdl->libzfs_pool_handles;
while ((zph != NULL) &&
(strncmp(pool_name, zpool_get_name(zph), len) != 0))
zph = zph->zpool_next;
return (zph);
}
/*
* Returns a handle to the pool that contains the provided dataset.
* If a handle to that pool already exists then that handle is returned.
* Otherwise, a new handle is created and added to the list of handles.
*/
static zpool_handle_t *
zpool_handle(zfs_handle_t *zhp)
{
char *pool_name;
int len;
zpool_handle_t *zph;
len = strcspn(zhp->zfs_name, "/@#") + 1;
pool_name = zfs_alloc(zhp->zfs_hdl, len);
(void) strlcpy(pool_name, zhp->zfs_name, len);
zph = zpool_find_handle(zhp, pool_name, len);
if (zph == NULL)
zph = zpool_add_handle(zhp, pool_name);
free(pool_name);
return (zph);
}
void
zpool_free_handles(libzfs_handle_t *hdl)
{
zpool_handle_t *next, *zph = hdl->libzfs_pool_handles;
while (zph != NULL) {
next = zph->zpool_next;
zpool_close(zph);
zph = next;
}
hdl->libzfs_pool_handles = NULL;
}
/*
* Utility function to gather stats (objset and zpl) for the given object.
*/
static int
get_stats_ioctl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) strlcpy(zc->zc_name, zhp->zfs_name, sizeof (zc->zc_name));
while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, zc) != 0) {
return (-1);
}
} else {
return (-1);
}
}
return (0);
}
/*
* Utility function to get the received properties of the given object.
*/
static int
get_recvd_props_ioctl(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *recvdprops;
zfs_cmd_t zc = {"\0"};
int err;
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
while (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_RECVD_PROPS, &zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
return (-1);
}
} else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
err = zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &recvdprops);
zcmd_free_nvlists(&zc);
if (err != 0)
return (-1);
nvlist_free(zhp->zfs_recvd_props);
zhp->zfs_recvd_props = recvdprops;
return (0);
}
static int
put_stats_zhdl(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
nvlist_t *allprops, *userprops;
zhp->zfs_dmustats = zc->zc_objset_stats; /* structure assignment */
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, zc, &allprops) != 0) {
return (-1);
}
/*
* XXX Why do we store the user props separately, in addition to
* storing them in zfs_props?
*/
if ((userprops = process_user_props(zhp, allprops)) == NULL) {
nvlist_free(allprops);
return (-1);
}
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
zhp->zfs_props = allprops;
zhp->zfs_user_props = userprops;
return (0);
}
static int
get_stats(zfs_handle_t *zhp)
{
int rc = 0;
zfs_cmd_t zc = {"\0"};
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
return (-1);
if (get_stats_ioctl(zhp, &zc) != 0)
rc = -1;
else if (put_stats_zhdl(zhp, &zc) != 0)
rc = -1;
zcmd_free_nvlists(&zc);
return (rc);
}
/*
* Refresh the properties currently stored in the handle.
*/
void
zfs_refresh_properties(zfs_handle_t *zhp)
{
(void) get_stats(zhp);
}
/*
* Makes a handle from the given dataset name. Used by zfs_open() and
* zfs_iter_* to create child handles on the fly.
*/
static int
make_dataset_handle_common(zfs_handle_t *zhp, zfs_cmd_t *zc)
{
if (put_stats_zhdl(zhp, zc) != 0)
return (-1);
/*
* We've managed to open the dataset and gather statistics. Determine
* the high-level type.
*/
if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_head_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_head_type = ZFS_TYPE_FILESYSTEM;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_OTHER)
return (-1);
else
abort();
if (zhp->zfs_dmustats.dds_is_snapshot)
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZVOL)
zhp->zfs_type = ZFS_TYPE_VOLUME;
else if (zhp->zfs_dmustats.dds_type == DMU_OST_ZFS)
zhp->zfs_type = ZFS_TYPE_FILESYSTEM;
else
abort(); /* we should never see any other types */
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL)
return (-1);
return (0);
}
zfs_handle_t *
make_dataset_handle(libzfs_handle_t *hdl, const char *path)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0) {
free(zhp);
return (NULL);
}
if (get_stats_ioctl(zhp, &zc) == -1) {
zcmd_free_nvlists(&zc);
free(zhp);
return (NULL);
}
if (make_dataset_handle_common(zhp, &zc) == -1) {
free(zhp);
zhp = NULL;
}
zcmd_free_nvlists(&zc);
return (zhp);
}
zfs_handle_t *
make_dataset_handle_zc(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
if (make_dataset_handle_common(zhp, zc) == -1) {
free(zhp);
return (NULL);
}
return (zhp);
}
zfs_handle_t *
make_dataset_simple_handle_zc(zfs_handle_t *pzhp, zfs_cmd_t *zc)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = pzhp->zfs_hdl;
(void) strlcpy(zhp->zfs_name, zc->zc_name, sizeof (zhp->zfs_name));
zhp->zfs_head_type = pzhp->zfs_type;
zhp->zfs_type = ZFS_TYPE_SNAPSHOT;
zhp->zpool_hdl = zpool_handle(zhp);
return (zhp);
}
zfs_handle_t *
zfs_handle_dup(zfs_handle_t *zhp_orig)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
zhp->zfs_hdl = zhp_orig->zfs_hdl;
zhp->zpool_hdl = zhp_orig->zpool_hdl;
(void) strlcpy(zhp->zfs_name, zhp_orig->zfs_name,
sizeof (zhp->zfs_name));
zhp->zfs_type = zhp_orig->zfs_type;
zhp->zfs_head_type = zhp_orig->zfs_head_type;
zhp->zfs_dmustats = zhp_orig->zfs_dmustats;
if (zhp_orig->zfs_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_props, &zhp->zfs_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_user_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_user_props,
&zhp->zfs_user_props, 0) != 0) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
if (zhp_orig->zfs_recvd_props != NULL) {
if (nvlist_dup(zhp_orig->zfs_recvd_props,
&zhp->zfs_recvd_props, 0)) {
(void) no_memory(zhp->zfs_hdl);
zfs_close(zhp);
return (NULL);
}
}
zhp->zfs_mntcheck = zhp_orig->zfs_mntcheck;
if (zhp_orig->zfs_mntopts != NULL) {
zhp->zfs_mntopts = zfs_strdup(zhp_orig->zfs_hdl,
zhp_orig->zfs_mntopts);
}
zhp->zfs_props_table = zhp_orig->zfs_props_table;
return (zhp);
}
boolean_t
zfs_bookmark_exists(const char *path)
{
nvlist_t *bmarks;
nvlist_t *props;
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *bmark_name;
char *pound;
int err;
boolean_t rv;
(void) strlcpy(fsname, path, sizeof (fsname));
pound = strchr(fsname, '#');
if (pound == NULL)
return (B_FALSE);
*pound = '\0';
bmark_name = pound + 1;
props = fnvlist_alloc();
err = lzc_get_bookmarks(fsname, props, &bmarks);
nvlist_free(props);
if (err != 0) {
nvlist_free(bmarks);
return (B_FALSE);
}
rv = nvlist_exists(bmarks, bmark_name);
nvlist_free(bmarks);
return (rv);
}
zfs_handle_t *
make_bookmark_handle(zfs_handle_t *parent, const char *path,
nvlist_t *bmark_props)
{
zfs_handle_t *zhp = calloc(1, sizeof (zfs_handle_t));
if (zhp == NULL)
return (NULL);
/* Fill in the name. */
zhp->zfs_hdl = parent->zfs_hdl;
(void) strlcpy(zhp->zfs_name, path, sizeof (zhp->zfs_name));
/* Set the property lists. */
if (nvlist_dup(bmark_props, &zhp->zfs_props, 0) != 0) {
free(zhp);
return (NULL);
}
/* Set the types. */
zhp->zfs_head_type = parent->zfs_head_type;
zhp->zfs_type = ZFS_TYPE_BOOKMARK;
if ((zhp->zpool_hdl = zpool_handle(zhp)) == NULL) {
nvlist_free(zhp->zfs_props);
free(zhp);
return (NULL);
}
return (zhp);
}
struct zfs_open_bookmarks_cb_data {
const char *path;
zfs_handle_t *zhp;
};
static int
zfs_open_bookmarks_cb(zfs_handle_t *zhp, void *data)
{
struct zfs_open_bookmarks_cb_data *dp = data;
/*
* Is it the one we are looking for?
*/
if (strcmp(dp->path, zfs_get_name(zhp)) == 0) {
/*
* We found it. Save it and let the caller know we are done.
*/
dp->zhp = zhp;
return (EEXIST);
}
/*
* Not found. Close the handle and ask for another one.
*/
zfs_close(zhp);
return (0);
}
/*
* Opens the given snapshot, bookmark, filesystem, or volume. The 'types'
* argument is a mask of acceptable types. The function will print an
* appropriate error message and return NULL if it can't be opened.
*/
zfs_handle_t *
zfs_open(libzfs_handle_t *hdl, const char *path, int types)
{
zfs_handle_t *zhp;
char errbuf[1024];
char *bookp;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot open '%s'"), path);
/*
* Validate the name before we even try to open it.
*/
if (!zfs_validate_name(hdl, path, types, B_FALSE)) {
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
return (NULL);
}
/*
* Bookmarks needs to be handled separately.
*/
bookp = strchr(path, '#');
if (bookp == NULL) {
/*
* Try to get stats for the dataset, which will tell us if it
* exists.
*/
errno = 0;
if ((zhp = make_dataset_handle(hdl, path)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
} else {
char dsname[ZFS_MAX_DATASET_NAME_LEN];
zfs_handle_t *pzhp;
struct zfs_open_bookmarks_cb_data cb_data = {path, NULL};
/*
* We need to cut out '#' and everything after '#'
* to get the parent dataset name only.
*/
assert(bookp - path < sizeof (dsname));
(void) strncpy(dsname, path, bookp - path);
dsname[bookp - path] = '\0';
/*
* Create handle for the parent dataset.
*/
errno = 0;
if ((pzhp = make_dataset_handle(hdl, dsname)) == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
return (NULL);
}
/*
* Iterate bookmarks to find the right one.
*/
errno = 0;
if ((zfs_iter_bookmarks(pzhp, zfs_open_bookmarks_cb,
&cb_data) == 0) && (cb_data.zhp == NULL)) {
(void) zfs_error(hdl, EZFS_NOENT, errbuf);
zfs_close(pzhp);
return (NULL);
}
if (cb_data.zhp == NULL) {
(void) zfs_standard_error(hdl, errno, errbuf);
zfs_close(pzhp);
return (NULL);
}
zhp = cb_data.zhp;
/*
* Cleanup.
*/
zfs_close(pzhp);
}
if (!(types & zhp->zfs_type)) {
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Release a ZFS handle. Nothing to do but free the associated memory.
*/
void
zfs_close(zfs_handle_t *zhp)
{
if (zhp->zfs_mntopts)
free(zhp->zfs_mntopts);
nvlist_free(zhp->zfs_props);
nvlist_free(zhp->zfs_user_props);
nvlist_free(zhp->zfs_recvd_props);
free(zhp);
}
typedef struct mnttab_node {
struct mnttab mtn_mt;
avl_node_t mtn_node;
} mnttab_node_t;
static int
libzfs_mnttab_cache_compare(const void *arg1, const void *arg2)
{
const mnttab_node_t *mtn1 = (const mnttab_node_t *)arg1;
const mnttab_node_t *mtn2 = (const mnttab_node_t *)arg2;
int rv;
rv = strcmp(mtn1->mtn_mt.mnt_special, mtn2->mtn_mt.mnt_special);
return (AVL_ISIGN(rv));
}
void
libzfs_mnttab_init(libzfs_handle_t *hdl)
{
assert(avl_numnodes(&hdl->libzfs_mnttab_cache) == 0);
avl_create(&hdl->libzfs_mnttab_cache, libzfs_mnttab_cache_compare,
sizeof (mnttab_node_t), offsetof(mnttab_node_t, mtn_node));
}
int
libzfs_mnttab_update(libzfs_handle_t *hdl)
{
struct mnttab entry;
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL)
return (ENOENT);
while (getmntent(hdl->libzfs_mnttab, &entry) == 0) {
mnttab_node_t *mtn;
avl_index_t where;
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0)
continue;
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, entry.mnt_special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, entry.mnt_mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, entry.mnt_fstype);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, entry.mnt_mntopts);
/* Exclude duplicate mounts */
if (avl_find(&hdl->libzfs_mnttab_cache, mtn, &where) != NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
continue;
}
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
return (0);
}
void
libzfs_mnttab_fini(libzfs_handle_t *hdl)
{
void *cookie = NULL;
mnttab_node_t *mtn;
while ((mtn = avl_destroy_nodes(&hdl->libzfs_mnttab_cache, &cookie))
!= NULL) {
free(mtn->mtn_mt.mnt_special);
free(mtn->mtn_mt.mnt_mountp);
free(mtn->mtn_mt.mnt_fstype);
free(mtn->mtn_mt.mnt_mntopts);
free(mtn);
}
avl_destroy(&hdl->libzfs_mnttab_cache);
}
void
libzfs_mnttab_cache(libzfs_handle_t *hdl, boolean_t enable)
{
hdl->libzfs_mnttab_enable = enable;
}
int
libzfs_mnttab_find(libzfs_handle_t *hdl, const char *fsname,
struct mnttab *entry)
{
mnttab_node_t find;
mnttab_node_t *mtn;
int error;
if (!hdl->libzfs_mnttab_enable) {
struct mnttab srch = { 0 };
if (avl_numnodes(&hdl->libzfs_mnttab_cache))
libzfs_mnttab_fini(hdl);
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL)
return (ENOENT);
srch.mnt_special = (char *)fsname;
srch.mnt_fstype = MNTTYPE_ZFS;
if (getmntany(hdl->libzfs_mnttab, entry, &srch) == 0)
return (0);
else
return (ENOENT);
}
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0)
if ((error = libzfs_mnttab_update(hdl)) != 0)
return (error);
find.mtn_mt.mnt_special = (char *)fsname;
mtn = avl_find(&hdl->libzfs_mnttab_cache, &find, NULL);
if (mtn) {
*entry = mtn->mtn_mt;
return (0);
}
return (ENOENT);
}
void
libzfs_mnttab_add(libzfs_handle_t *hdl, const char *special,
const char *mountp, const char *mntopts)
{
mnttab_node_t *mtn;
if (avl_numnodes(&hdl->libzfs_mnttab_cache) == 0)
return;
mtn = zfs_alloc(hdl, sizeof (mnttab_node_t));
mtn->mtn_mt.mnt_special = zfs_strdup(hdl, special);
mtn->mtn_mt.mnt_mountp = zfs_strdup(hdl, mountp);
mtn->mtn_mt.mnt_fstype = zfs_strdup(hdl, MNTTYPE_ZFS);
mtn->mtn_mt.mnt_mntopts = zfs_strdup(hdl, mntopts);
avl_add(&hdl->libzfs_mnttab_cache, mtn);
}
void
libzfs_mnttab_remove(libzfs_handle_t *hdl, const char *fsname)
{
mnttab_node_t find;
mnttab_node_t *ret;
find.mtn_mt.mnt_special = (char *)fsname;
if ((ret = avl_find(&hdl->libzfs_mnttab_cache, (void *)&find, NULL))
!= NULL) {
avl_remove(&hdl->libzfs_mnttab_cache, ret);
free(ret->mtn_mt.mnt_special);
free(ret->mtn_mt.mnt_mountp);
free(ret->mtn_mt.mnt_fstype);
free(ret->mtn_mt.mnt_mntopts);
free(ret);
}
}
int
zfs_spa_version(zfs_handle_t *zhp, int *spa_version)
{
zpool_handle_t *zpool_handle = zhp->zpool_hdl;
if (zpool_handle == NULL)
return (-1);
*spa_version = zpool_get_prop_int(zpool_handle,
ZPOOL_PROP_VERSION, NULL);
return (0);
}
/*
* The choice of reservation property depends on the SPA version.
*/
static int
zfs_which_resv_prop(zfs_handle_t *zhp, zfs_prop_t *resv_prop)
{
int spa_version;
if (zfs_spa_version(zhp, &spa_version) < 0)
return (-1);
if (spa_version >= SPA_VERSION_REFRESERVATION)
*resv_prop = ZFS_PROP_REFRESERVATION;
else
*resv_prop = ZFS_PROP_RESERVATION;
return (0);
}
/*
* Given an nvlist of properties to set, validates that they are correct, and
* parses any numeric properties (index, boolean, etc) if they are specified as
* strings.
*/
nvlist_t *
zfs_valid_proplist(libzfs_handle_t *hdl, zfs_type_t type, nvlist_t *nvl,
uint64_t zoned, zfs_handle_t *zhp, zpool_handle_t *zpool_hdl,
boolean_t key_params_ok, const char *errbuf)
{
nvpair_t *elem;
uint64_t intval;
char *strval;
zfs_prop_t prop;
nvlist_t *ret;
int chosen_normal = -1;
int chosen_utf = -1;
if (nvlist_alloc(&ret, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
/*
* Make sure this property is valid and applies to this type.
*/
elem = NULL;
while ((elem = nvlist_next_nvpair(nvl, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zfs_name_to_prop(propname);
if (prop == ZPROP_INVAL && zfs_prop_user(propname)) {
/*
* This is a user property: make sure it's a
* string, and that it's less than ZAP_MAXNAMELEN.
*/
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strlen(nvpair_name(elem)) >= ZAP_MAXNAMELEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property name '%s' is too long"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (nvlist_add_string(ret, propname, strval) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Currently, only user properties can be modified on
* snapshots.
*/
if (type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"this property can not be modified for snapshots"));
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (prop == ZPROP_INVAL && zfs_prop_userquota(propname)) {
zfs_userquota_prop_t uqtype;
char *newpropname = NULL;
char domain[128];
uint64_t rid;
uint64_t valary[3];
int rc;
if (userquota_propname_decode(propname, zoned,
&uqtype, domain, sizeof (domain), &rid) != 0) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' has an invalid user/group name"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (uqtype != ZFS_PROP_USERQUOTA &&
uqtype != ZFS_PROP_GROUPQUOTA &&
uqtype != ZFS_PROP_USEROBJQUOTA &&
uqtype != ZFS_PROP_GROUPOBJQUOTA &&
uqtype != ZFS_PROP_PROJECTQUOTA &&
uqtype != ZFS_PROP_PROJECTOBJQUOTA) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY,
errbuf);
goto error;
}
if (nvpair_type(elem) == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, "none") == 0) {
intval = 0;
} else if (zfs_nicestrtonum(hdl,
strval, &intval) != 0) {
(void) zfs_error(hdl,
EZFS_BADPROP, errbuf);
goto error;
}
} else if (nvpair_type(elem) ==
DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, &intval);
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable "
"{user|group|project}quota"));
goto error;
}
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
/*
* Encode the prop name as
* userquota@<hex-rid>-domain, to make it easy
* for the kernel to decode.
*/
rc = asprintf(&newpropname, "%s%llx-%s",
zfs_userquota_prop_prefixes[uqtype],
(longlong_t)rid, domain);
if (rc == -1 || newpropname == NULL) {
(void) no_memory(hdl);
goto error;
}
valary[0] = uqtype;
valary[1] = rid;
valary[2] = intval;
if (nvlist_add_uint64_array(ret, newpropname,
valary, 3) != 0) {
free(newpropname);
(void) no_memory(hdl);
goto error;
}
free(newpropname);
continue;
} else if (prop == ZPROP_INVAL && zfs_prop_written(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (prop == ZPROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!zfs_prop_valid_for_type(prop, type, B_FALSE)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' does not "
"apply to datasets of this type"), propname);
(void) zfs_error(hdl, EZFS_PROPTYPE, errbuf);
goto error;
}
if (zfs_prop_readonly(prop) &&
!(zfs_prop_setonce(prop) && zhp == NULL) &&
!(zfs_prop_encryption_key_param(prop) && key_params_ok)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "'%s' is readonly"),
propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, type, ret,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform some additional checks for specific properties.
*/
switch (prop) {
case ZFS_PROP_VERSION:
{
int version;
if (zhp == NULL)
break;
version = zfs_prop_get_int(zhp, ZFS_PROP_VERSION);
if (intval < version) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Can not downgrade; already at version %u"),
version);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_VOLBLOCKSIZE:
case ZFS_PROP_RECORDSIZE:
{
int maxbs = SPA_MAXBLOCKSIZE;
char buf[64];
if (zpool_hdl != NULL) {
maxbs = zpool_get_prop_int(zpool_hdl,
ZPOOL_PROP_MAXBLOCKSIZE, NULL);
}
/*
* The value must be a power of two between
* SPA_MINBLOCKSIZE and maxbs.
*/
if (intval < SPA_MINBLOCKSIZE ||
intval > maxbs || !ISP2(intval)) {
zfs_nicebytes(maxbs, buf, sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be power of 2 from 512B "
"to %s"), propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
}
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
/*
* Verify the mlslabel string and convert to
* internal hex label string.
*/
m_label_t *new_sl;
char *hex = NULL; /* internal label string */
/* Default value is already OK. */
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
break;
/* Verify the label can be converted to binary form */
if (((new_sl = m_label_alloc(MAC_LABEL)) == NULL) ||
(str_to_label(strval, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1)) {
goto badlabel;
}
/* Now translate to hex internal label string */
if (label_to_str(new_sl, &hex, M_INTERNAL,
DEF_NAMES) != 0) {
if (hex)
free(hex);
goto badlabel;
}
m_label_free(new_sl);
/* If string is already in internal form, we're done. */
if (strcmp(strval, hex) == 0) {
free(hex);
break;
}
/* Replace the label string with the internal form. */
(void) nvlist_remove(ret, zfs_prop_to_name(prop),
DATA_TYPE_STRING);
verify(nvlist_add_string(ret, zfs_prop_to_name(prop),
hex) == 0);
free(hex);
break;
badlabel:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid mlslabel '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
m_label_free(new_sl); /* OK if null */
goto error;
#else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"mlslabels are unsupported"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
#endif /* HAVE_MLSLABEL */
}
case ZFS_PROP_MOUNTPOINT:
{
namecheck_err_t why;
if (strcmp(strval, ZFS_MOUNTPOINT_NONE) == 0 ||
strcmp(strval, ZFS_MOUNTPOINT_LEGACY) == 0)
break;
if (mountpoint_namecheck(strval, &why)) {
switch (why) {
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"'%s' must be an absolute path, "
"'none', or 'legacy'"), propname);
break;
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"component of '%s' is too long"),
propname);
break;
default:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"(%d) not defined"),
why);
break;
}
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
}
/*FALLTHRU*/
case ZFS_PROP_SHARESMB:
case ZFS_PROP_SHARENFS:
/*
* For the mountpoint and sharenfs or sharesmb
* properties, check if it can be set in a
* global/non-global zone based on
* the zoned property value:
*
* global zone non-global zone
* --------------------------------------------------
* zoned=on mountpoint (no) mountpoint (yes)
* sharenfs (no) sharenfs (no)
* sharesmb (no) sharesmb (no)
*
* zoned=off mountpoint (yes) N/A
* sharenfs (yes)
* sharesmb (yes)
*/
if (zoned) {
if (getzoneid() == GLOBAL_ZONEID) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set on "
"dataset in a non-global zone"),
propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
} else if (prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set in "
"a non-global zone"), propname);
(void) zfs_error(hdl, EZFS_ZONED,
errbuf);
goto error;
}
} else if (getzoneid() != GLOBAL_ZONEID) {
/*
* If zoned property is 'off', this must be in
* a global zone. If not, something is wrong.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set while dataset "
"'zoned' property is set"), propname);
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
/*
* At this point, it is legitimate to set the
* property. Now we want to make sure that the
* property value is valid if it is sharenfs.
*/
if ((prop == ZFS_PROP_SHARENFS ||
prop == ZFS_PROP_SHARESMB) &&
strcmp(strval, "on") != 0 &&
strcmp(strval, "off") != 0) {
zfs_share_proto_t proto;
if (prop == ZFS_PROP_SHARESMB)
proto = PROTO_SMB;
else
proto = PROTO_NFS;
/*
* Must be an valid sharing protocol
* option string so init the libshare
* in order to enable the parser and
* then parse the options. We use the
* control API since we don't care about
* the current configuration and don't
* want the overhead of loading it
* until we actually do something.
*/
if (zfs_init_libshare(hdl,
SA_INIT_CONTROL_API) != SA_OK) {
/*
* An error occurred so we can't do
* anything
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set: problem "
"in share initialization"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
if (zfs_parse_options(strval, proto) != SA_OK) {
/*
* There was an error in parsing so
* deal with it by issuing an error
* message and leaving after
* uninitializing the the libshare
* interface.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be set to invalid "
"options"), propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
zfs_uninit_libshare(hdl);
goto error;
}
zfs_uninit_libshare(hdl);
}
break;
case ZFS_PROP_KEYLOCATION:
if (!zfs_prop_valid_keylocation(strval, B_FALSE)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid keylocation"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zhp != NULL) {
uint64_t crypt =
zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION);
if (crypt == ZIO_CRYPT_OFF &&
strcmp(strval, "none") != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must be 'none' "
"for unencrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
} else if (crypt != ZIO_CRYPT_OFF &&
strcmp(strval, "none") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation must not be 'none' "
"for encrypted datasets"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
break;
case ZFS_PROP_PBKDF2_ITERS:
if (intval < MIN_PBKDF2_ITERATIONS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"minimum pbkdf2 iterations is %u"),
MIN_PBKDF2_ITERATIONS);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZFS_PROP_UTF8ONLY:
chosen_utf = (int)intval;
break;
case ZFS_PROP_NORMALIZE:
chosen_normal = (int)intval;
break;
default:
break;
}
/*
* For changes to existing volumes, we have some additional
* checks to enforce.
*/
if (type == ZFS_TYPE_VOLUME && zhp != NULL) {
uint64_t blocksize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLBLOCKSIZE);
char buf[64];
switch (prop) {
case ZFS_PROP_VOLSIZE:
if (intval % blocksize != 0) {
zfs_nicebytes(blocksize, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a multiple of "
"volume block size (%s)"),
propname, buf);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
if (intval == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' cannot be zero"),
propname);
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
/* check encryption properties */
if (zhp != NULL) {
int64_t crypt = zfs_prop_get_int(zhp,
ZFS_PROP_ENCRYPTION);
switch (prop) {
case ZFS_PROP_COPIES:
if (crypt != ZIO_CRYPT_OFF && intval > 2) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encrypted datasets cannot have "
"3 copies"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
break;
default:
break;
}
}
}
/*
* If normalization was chosen, but no UTF8 choice was made,
* enforce rejection of non-UTF8 names.
*
* If normalization was chosen, but rejecting non-UTF8 names
* was explicitly not chosen, it is an error.
*/
if (chosen_normal > 0 && chosen_utf < 0) {
if (nvlist_add_uint64(ret,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), 1) != 0) {
(void) no_memory(hdl);
goto error;
}
} else if (chosen_normal > 0 && chosen_utf == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be set 'on' if normalization chosen"),
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
return (ret);
error:
nvlist_free(ret);
return (NULL);
}
int
zfs_add_synthetic_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t old_volsize;
uint64_t new_volsize;
uint64_t old_reservation;
uint64_t new_reservation;
zfs_prop_t resv_prop;
nvlist_t *props;
/*
* If this is an existing volume, and someone is setting the volsize,
* make sure that it matches the reservation, or add it if necessary.
*/
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_reservation = zfs_prop_get_int(zhp, resv_prop);
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if ((zvol_volsize_to_reservation(old_volsize, props) !=
old_reservation) || nvlist_exists(nvl,
zfs_prop_to_name(resv_prop))) {
fnvlist_free(props);
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&new_volsize) != 0) {
fnvlist_free(props);
return (-1);
}
new_reservation = zvol_volsize_to_reservation(new_volsize, props);
fnvlist_free(props);
if (nvlist_add_uint64(nvl, zfs_prop_to_name(resv_prop),
new_reservation) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
/*
* Helper for 'zfs {set|clone} refreservation=auto'. Must be called after
* zfs_valid_proplist(), as it is what sets the UINT64_MAX sentinal value.
* Return codes must match zfs_add_synthetic_resv().
*/
static int
zfs_fix_auto_resv(zfs_handle_t *zhp, nvlist_t *nvl)
{
uint64_t volsize;
uint64_t resvsize;
zfs_prop_t prop;
nvlist_t *props;
if (!ZFS_IS_VOLUME(zhp)) {
return (0);
}
if (zfs_which_resv_prop(zhp, &prop) != 0) {
return (-1);
}
if (prop != ZFS_PROP_REFRESERVATION) {
return (0);
}
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(prop), &resvsize) != 0) {
/* No value being set, so it can't be "auto" */
return (0);
}
if (resvsize != UINT64_MAX) {
/* Being set to a value other than "auto" */
return (0);
}
props = fnvlist_alloc();
fnvlist_add_uint64(props, zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
zfs_prop_get_int(zhp, ZFS_PROP_VOLBLOCKSIZE));
if (nvlist_lookup_uint64(nvl, zfs_prop_to_name(ZFS_PROP_VOLSIZE),
&volsize) != 0) {
volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
}
resvsize = zvol_volsize_to_reservation(volsize, props);
fnvlist_free(props);
(void) nvlist_remove_all(nvl, zfs_prop_to_name(prop));
if (nvlist_add_uint64(nvl, zfs_prop_to_name(prop), resvsize) != 0) {
(void) no_memory(zhp->zfs_hdl);
return (-1);
}
return (1);
}
void
zfs_setprop_error(libzfs_handle_t *hdl, zfs_prop_t prop, int err,
char *errbuf)
{
switch (err) {
case ENOSPC:
/*
* For quotas and reservations, ENOSPC indicates
* something different; setting a quota or reservation
* doesn't use any disk space.
*/
switch (prop) {
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is less than current used or "
"reserved space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"size is greater than available space"));
(void) zfs_error(hdl, EZFS_PROPSPACE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, err, errbuf);
break;
}
break;
case EBUSY:
(void) zfs_standard_error(hdl, EBUSY, errbuf);
break;
case EROFS:
(void) zfs_error(hdl, EZFS_DSREADONLY, errbuf);
break;
case E2BIG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property value too long"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool and or dataset must be upgraded to set this "
"property or value"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case ERANGE:
if (prop == ZFS_PROP_COMPRESSION ||
prop == ZFS_PROP_DNODESIZE ||
prop == ZFS_PROP_RECORDSIZE) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"bootable datasets"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else if (prop == ZFS_PROP_CHECKSUM ||
prop == ZFS_PROP_DEDUP) {
(void) zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property setting is not allowed on "
"root pools"));
(void) zfs_error(hdl, EZFS_NOTSUP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EINVAL:
if (prop == ZPROP_INVAL) {
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EACCES:
if (prop == ZFS_PROP_KEYLOCATION) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"keylocation may only be set on encryption roots"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
} else {
(void) zfs_standard_error(hdl, err, errbuf);
}
break;
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
#ifdef _ILP32
if (prop == ZFS_PROP_VOLSIZE) {
(void) zfs_error(hdl, EZFS_VOLTOOBIG, errbuf);
break;
}
#endif
/* FALLTHROUGH */
default:
(void) zfs_standard_error(hdl, err, errbuf);
}
}
static boolean_t
zfs_is_namespace_prop(zfs_prop_t prop)
{
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_SETUID:
case ZFS_PROP_READONLY:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
return (B_TRUE);
default:
return (B_FALSE);
}
}
/*
* Given a property name and value, set the property for the given dataset.
*/
int
zfs_prop_set(zfs_handle_t *zhp, const char *propname, const char *propval)
{
int ret = -1;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl = NULL;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0 ||
nvlist_add_string(nvl, propname, propval) != 0) {
(void) no_memory(hdl);
goto error;
}
ret = zfs_prop_set_list(zhp, nvl);
error:
nvlist_free(nvl);
return (ret);
}
/*
* Given an nvlist of property names and values, set the properties for the
* given dataset.
*/
int
zfs_prop_set_list(zfs_handle_t *zhp, nvlist_t *props)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
prop_changelist_t **cls = NULL;
int cl_idx;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
nvlist_t *nvl;
int nvl_len = 0;
int added_resv = 0;
zfs_prop_t prop = 0;
nvpair_t *elem;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zfs_name);
if ((nvl = zfs_valid_proplist(hdl, zhp->zfs_type, props,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED), zhp, zhp->zpool_hdl,
B_FALSE, errbuf)) == NULL)
goto error;
/*
* We have to check for any extra properties which need to be added
* before computing the length of the nvlist.
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
if (zfs_name_to_prop(nvpair_name(elem)) == ZFS_PROP_VOLSIZE &&
(added_resv = zfs_add_synthetic_resv(zhp, nvl)) == -1) {
goto error;
}
}
if (added_resv != 1 &&
(added_resv = zfs_fix_auto_resv(zhp, nvl)) == -1) {
goto error;
}
/*
* Check how many properties we're setting and allocate an array to
* store changelist pointers for postfix().
*/
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem))
nvl_len++;
if ((cls = calloc(nvl_len, sizeof (prop_changelist_t *))) == NULL)
goto error;
cl_idx = 0;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
assert(cl_idx < nvl_len);
/*
* We don't want to unmount & remount the dataset when changing
* its canmount property to 'on' or 'noauto'. We only use
* the changelist logic to unmount when setting canmount=off.
*/
if (prop != ZFS_PROP_CANMOUNT ||
(fnvpair_value_uint64(elem) == ZFS_CANMOUNT_OFF &&
zfs_is_mounted(zhp, NULL))) {
cls[cl_idx] = changelist_gather(zhp, prop, 0, 0);
if (cls[cl_idx] == NULL)
goto error;
}
if (prop == ZFS_PROP_MOUNTPOINT &&
changelist_haszonedchild(cls[cl_idx])) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if (cls[cl_idx] != NULL &&
(ret = changelist_prefix(cls[cl_idx])) != 0)
goto error;
cl_idx++;
}
assert(cl_idx == nvl_len);
/*
* Execute the corresponding ioctl() to set this list of properties.
*/
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if ((ret = zcmd_write_src_nvlist(hdl, &zc, nvl)) != 0 ||
(ret = zcmd_alloc_dst_nvlist(hdl, &zc, 0)) != 0)
goto error;
ret = zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
if (ret != 0) {
/* Get the list of unset properties back and report them. */
nvlist_t *errorprops = NULL;
if (zcmd_read_dst_nvlist(hdl, &zc, &errorprops) != 0)
goto error;
for (elem = nvlist_next_nvpair(nvl, NULL);
elem != NULL;
elem = nvlist_next_nvpair(nvl, elem)) {
prop = zfs_name_to_prop(nvpair_name(elem));
zfs_setprop_error(hdl, prop, errno, errbuf);
}
nvlist_free(errorprops);
if (added_resv && errno == ENOSPC) {
/* clean up the volsize property we tried to set */
uint64_t old_volsize = zfs_prop_get_int(zhp,
ZFS_PROP_VOLSIZE);
nvlist_free(nvl);
nvl = NULL;
zcmd_free_nvlists(&zc);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
goto error;
if (nvlist_add_uint64(nvl,
zfs_prop_to_name(ZFS_PROP_VOLSIZE),
old_volsize) != 0)
goto error;
if (zcmd_write_src_nvlist(hdl, &zc, nvl) != 0)
goto error;
(void) zfs_ioctl(hdl, ZFS_IOC_SET_PROP, &zc);
}
} else {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL) {
int clp_err = changelist_postfix(cls[cl_idx]);
if (clp_err != 0)
ret = clp_err;
}
}
if (ret == 0) {
/*
* Refresh the statistics so the new property
* value is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
}
error:
nvlist_free(nvl);
zcmd_free_nvlists(&zc);
if (cls != NULL) {
for (cl_idx = 0; cl_idx < nvl_len; cl_idx++) {
if (cls[cl_idx] != NULL)
changelist_free(cls[cl_idx]);
}
free(cls);
}
return (ret);
}
/*
* Given a property, inherit the value from the parent dataset, or if received
* is TRUE, revert to the received value, if any.
*/
int
zfs_prop_inherit(zfs_handle_t *zhp, const char *propname, boolean_t received)
{
zfs_cmd_t zc = {"\0"};
int ret;
prop_changelist_t *cl;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
zfs_prop_t prop;
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot inherit %s for '%s'"), propname, zhp->zfs_name);
zc.zc_cookie = received;
if ((prop = zfs_name_to_prop(propname)) == ZPROP_INVAL) {
/*
* For user properties, the amount of work we have to do is very
* small, so just do it here.
*/
if (!zfs_prop_user(propname)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc) != 0)
return (zfs_standard_error(hdl, errno, errbuf));
return (0);
}
/*
* Verify that this property is inheritable.
*/
if (zfs_prop_readonly(prop))
return (zfs_error(hdl, EZFS_PROPREADONLY, errbuf));
if (!zfs_prop_inheritable(prop) && !received)
return (zfs_error(hdl, EZFS_PROPNONINHERIT, errbuf));
/*
* Check to see if the value applies to this type
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (zfs_error(hdl, EZFS_PROPTYPE, errbuf));
/*
* Normalize the name, to get rid of shorthand abbreviations.
*/
propname = zfs_prop_to_name(prop);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, propname, sizeof (zc.zc_value));
if (prop == ZFS_PROP_MOUNTPOINT && getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
/*
* Determine datasets which will be affected by this change, if any.
*/
if ((cl = changelist_gather(zhp, prop, 0, 0)) == NULL)
return (-1);
if (prop == ZFS_PROP_MOUNTPOINT && changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
ret = zfs_error(hdl, EZFS_ZONED, errbuf);
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_INHERIT_PROP, &zc)) != 0) {
return (zfs_standard_error(hdl, errno, errbuf));
} else {
if ((ret = changelist_postfix(cl)) != 0)
goto error;
/*
* Refresh the statistics so the new property is reflected.
*/
(void) get_stats(zhp);
/*
* Remount the filesystem to propagate the change
* if one of the options handled by the generic
* Linux namespace layer has been modified.
*/
if (zfs_is_namespace_prop(prop) &&
zfs_is_mounted(zhp, NULL))
ret = zfs_mount(zhp, MNTOPT_REMOUNT, 0);
}
error:
changelist_free(cl);
return (ret);
}
/*
* True DSL properties are stored in an nvlist. The following two functions
* extract them appropriately.
*/
uint64_t
getprop_uint64(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
uint64_t value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_numeric(prop);
*source = "";
}
return (value);
}
static const char *
getprop_string(zfs_handle_t *zhp, zfs_prop_t prop, char **source)
{
nvlist_t *nv;
const char *value;
*source = NULL;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(prop), &nv) == 0) {
value = fnvlist_lookup_string(nv, ZPROP_VALUE);
(void) nvlist_lookup_string(nv, ZPROP_SOURCE, source);
} else {
verify(!zhp->zfs_props_table ||
zhp->zfs_props_table[prop] == B_TRUE);
value = zfs_prop_default_string(prop);
*source = "";
}
return (value);
}
static boolean_t
zfs_is_recvd_props_mode(zfs_handle_t *zhp)
{
return (zhp->zfs_props == zhp->zfs_recvd_props);
}
static void
zfs_set_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
*cookie = (uint64_t)(uintptr_t)zhp->zfs_props;
zhp->zfs_props = zhp->zfs_recvd_props;
}
static void
zfs_unset_recvd_props_mode(zfs_handle_t *zhp, uint64_t *cookie)
{
zhp->zfs_props = (nvlist_t *)(uintptr_t)*cookie;
*cookie = 0;
}
/*
* Internal function for getting a numeric property. Both zfs_prop_get() and
* zfs_prop_get_int() are built using this interface.
*
* Certain properties can be overridden using 'mount -o'. In this case, scan
* the contents of the /proc/self/mounts entry, searching for the
* appropriate options. If they differ from the on-disk values, report the
* current values and mark the source "temporary".
*/
static int
get_numeric_property(zfs_handle_t *zhp, zfs_prop_t prop, zprop_source_t *src,
char **source, uint64_t *val)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zplprops = NULL;
struct mnttab mnt;
char *mntopt_on = NULL;
char *mntopt_off = NULL;
boolean_t received = zfs_is_recvd_props_mode(zhp);
*source = NULL;
/*
* If the property is being fetched for a snapshot, check whether
* the property is valid for the snapshot's head dataset type.
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT &&
!zfs_prop_valid_for_type(prop, zhp->zfs_head_type, B_TRUE)) {
*val = zfs_prop_default_numeric(prop);
return (-1);
}
switch (prop) {
case ZFS_PROP_ATIME:
mntopt_on = MNTOPT_ATIME;
mntopt_off = MNTOPT_NOATIME;
break;
case ZFS_PROP_RELATIME:
mntopt_on = MNTOPT_RELATIME;
mntopt_off = MNTOPT_NORELATIME;
break;
case ZFS_PROP_DEVICES:
mntopt_on = MNTOPT_DEVICES;
mntopt_off = MNTOPT_NODEVICES;
break;
case ZFS_PROP_EXEC:
mntopt_on = MNTOPT_EXEC;
mntopt_off = MNTOPT_NOEXEC;
break;
case ZFS_PROP_READONLY:
mntopt_on = MNTOPT_RO;
mntopt_off = MNTOPT_RW;
break;
case ZFS_PROP_SETUID:
mntopt_on = MNTOPT_SETUID;
mntopt_off = MNTOPT_NOSETUID;
break;
case ZFS_PROP_XATTR:
mntopt_on = MNTOPT_XATTR;
mntopt_off = MNTOPT_NOXATTR;
break;
case ZFS_PROP_NBMAND:
mntopt_on = MNTOPT_NBMAND;
mntopt_off = MNTOPT_NONBMAND;
break;
default:
break;
}
/*
* Because looking up the mount options is potentially expensive
* (iterating over all of /proc/self/mounts), we defer its
* calculation until we're looking up a property which requires
* its presence.
*/
if (!zhp->zfs_mntcheck &&
(mntopt_on != NULL || prop == ZFS_PROP_MOUNTED)) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
struct mnttab entry;
if (libzfs_mnttab_find(hdl, zhp->zfs_name, &entry) == 0) {
zhp->zfs_mntopts = zfs_strdup(hdl,
entry.mnt_mntopts);
if (zhp->zfs_mntopts == NULL)
return (-1);
}
zhp->zfs_mntcheck = B_TRUE;
}
if (zhp->zfs_mntopts == NULL)
mnt.mnt_mntopts = "";
else
mnt.mnt_mntopts = zhp->zfs_mntopts;
switch (prop) {
case ZFS_PROP_ATIME:
case ZFS_PROP_RELATIME:
case ZFS_PROP_DEVICES:
case ZFS_PROP_EXEC:
case ZFS_PROP_READONLY:
case ZFS_PROP_SETUID:
case ZFS_PROP_XATTR:
case ZFS_PROP_NBMAND:
*val = getprop_uint64(zhp, prop, source);
if (received)
break;
if (hasmntopt(&mnt, mntopt_on) && !*val) {
*val = B_TRUE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
} else if (hasmntopt(&mnt, mntopt_off) && *val) {
*val = B_FALSE;
if (src)
*src = ZPROP_SRC_TEMPORARY;
}
break;
case ZFS_PROP_CANMOUNT:
case ZFS_PROP_VOLSIZE:
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
*val = getprop_uint64(zhp, prop, source);
if (*source == NULL) {
/* not default, must be local */
*source = zhp->zfs_name;
}
break;
case ZFS_PROP_MOUNTED:
*val = (zhp->zfs_mntopts != NULL);
break;
case ZFS_PROP_NUMCLONES:
*val = zhp->zfs_dmustats.dds_num_clones;
break;
case ZFS_PROP_VERSION:
case ZFS_PROP_NORMALIZE:
case ZFS_PROP_UTF8ONLY:
case ZFS_PROP_CASE:
if (zcmd_alloc_dst_nvlist(zhp->zfs_hdl, &zc, 0) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_OBJSET_ZPLPROPS, &zc)) {
zcmd_free_nvlists(&zc);
if (prop == ZFS_PROP_VERSION &&
zhp->zfs_type == ZFS_TYPE_VOLUME)
*val = zfs_prop_default_numeric(prop);
return (-1);
}
if (zcmd_read_dst_nvlist(zhp->zfs_hdl, &zc, &zplprops) != 0 ||
nvlist_lookup_uint64(zplprops, zfs_prop_to_name(prop),
val) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
nvlist_free(zplprops);
zcmd_free_nvlists(&zc);
break;
case ZFS_PROP_INCONSISTENT:
*val = zhp->zfs_dmustats.dds_inconsistent;
break;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
*val = getprop_uint64(zhp, prop, source);
/*
* If we tried to use a default value for a
* readonly property, it means that it was not
* present. Note this only applies to "truly"
* readonly properties, not set-once properties
* like volblocksize.
*/
if (zfs_prop_readonly(prop) &&
!zfs_prop_setonce(prop) &&
*source != NULL && (*source)[0] == '\0') {
*source = NULL;
return (-1);
}
break;
case PROP_TYPE_STRING:
default:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"cannot get non-numeric property"));
return (zfs_error(zhp->zfs_hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "internal error")));
}
}
return (0);
}
/*
* Calculate the source type, given the raw source string.
*/
static void
get_source(zfs_handle_t *zhp, zprop_source_t *srctype, char *source,
char *statbuf, size_t statlen)
{
if (statbuf == NULL ||
srctype == NULL || *srctype == ZPROP_SRC_TEMPORARY) {
return;
}
if (source == NULL) {
*srctype = ZPROP_SRC_NONE;
} else if (source[0] == '\0') {
*srctype = ZPROP_SRC_DEFAULT;
} else if (strstr(source, ZPROP_SOURCE_VAL_RECVD) != NULL) {
*srctype = ZPROP_SRC_RECEIVED;
} else {
if (strcmp(source, zhp->zfs_name) == 0) {
*srctype = ZPROP_SRC_LOCAL;
} else {
(void) strlcpy(statbuf, source, statlen);
*srctype = ZPROP_SRC_INHERITED;
}
}
}
int
zfs_prop_get_recvd(zfs_handle_t *zhp, const char *propname, char *propbuf,
size_t proplen, boolean_t literal)
{
zfs_prop_t prop;
int err = 0;
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (-1);
prop = zfs_name_to_prop(propname);
if (prop != ZPROP_INVAL) {
uint64_t cookie;
if (!nvlist_exists(zhp->zfs_recvd_props, propname))
return (-1);
zfs_set_recvd_props_mode(zhp, &cookie);
err = zfs_prop_get(zhp, prop, propbuf, proplen,
NULL, NULL, 0, literal);
zfs_unset_recvd_props_mode(zhp, &cookie);
} else {
nvlist_t *propval;
char *recvdval;
if (nvlist_lookup_nvlist(zhp->zfs_recvd_props,
propname, &propval) != 0)
return (-1);
verify(nvlist_lookup_string(propval, ZPROP_VALUE,
&recvdval) == 0);
(void) strlcpy(propbuf, recvdval, proplen);
}
return (err == 0 ? 0 : -1);
}
static int
get_clones_string(zfs_handle_t *zhp, char *propbuf, size_t proplen)
{
nvlist_t *value;
nvpair_t *pair;
value = zfs_get_clones_nvl(zhp);
if (value == NULL)
return (-1);
propbuf[0] = '\0';
for (pair = nvlist_next_nvpair(value, NULL); pair != NULL;
pair = nvlist_next_nvpair(value, pair)) {
if (propbuf[0] != '\0')
(void) strlcat(propbuf, ",", proplen);
(void) strlcat(propbuf, nvpair_name(pair), proplen);
}
return (0);
}
struct get_clones_arg {
uint64_t numclones;
nvlist_t *value;
const char *origin;
char buf[ZFS_MAX_DATASET_NAME_LEN];
};
int
get_clones_cb(zfs_handle_t *zhp, void *arg)
{
struct get_clones_arg *gca = arg;
if (gca->numclones == 0) {
zfs_close(zhp);
return (0);
}
if (zfs_prop_get(zhp, ZFS_PROP_ORIGIN, gca->buf, sizeof (gca->buf),
NULL, NULL, 0, B_TRUE) != 0)
goto out;
if (strcmp(gca->buf, gca->origin) == 0) {
fnvlist_add_boolean(gca->value, zfs_get_name(zhp));
gca->numclones--;
}
out:
(void) zfs_iter_children(zhp, get_clones_cb, gca);
zfs_close(zhp);
return (0);
}
nvlist_t *
zfs_get_clones_nvl(zfs_handle_t *zhp)
{
nvlist_t *nv, *value;
if (nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv) != 0) {
struct get_clones_arg gca;
/*
* if this is a snapshot, then the kernel wasn't able
* to get the clones. Do it by slowly iterating.
*/
if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT)
return (NULL);
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, 0) != 0)
return (NULL);
if (nvlist_alloc(&value, NV_UNIQUE_NAME, 0) != 0) {
nvlist_free(nv);
return (NULL);
}
gca.numclones = zfs_prop_get_int(zhp, ZFS_PROP_NUMCLONES);
gca.value = value;
gca.origin = zhp->zfs_name;
if (gca.numclones != 0) {
zfs_handle_t *root;
char pool[ZFS_MAX_DATASET_NAME_LEN];
char *cp = pool;
/* get the pool name */
(void) strlcpy(pool, zhp->zfs_name, sizeof (pool));
(void) strsep(&cp, "/@");
root = zfs_open(zhp->zfs_hdl, pool,
ZFS_TYPE_FILESYSTEM);
if (root == NULL) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
(void) get_clones_cb(root, &gca);
}
if (gca.numclones != 0 ||
nvlist_add_nvlist(nv, ZPROP_VALUE, value) != 0 ||
nvlist_add_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), nv) != 0) {
nvlist_free(nv);
nvlist_free(value);
return (NULL);
}
nvlist_free(nv);
nvlist_free(value);
verify(0 == nvlist_lookup_nvlist(zhp->zfs_props,
zfs_prop_to_name(ZFS_PROP_CLONES), &nv));
}
verify(nvlist_lookup_nvlist(nv, ZPROP_VALUE, &value) == 0);
return (value);
}
/*
* Accepts a property and value and checks that the value
* matches the one found by the channel program. If they are
* not equal, print both of them.
*/
static void
zcp_check(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t intval,
const char *strval)
{
if (!zhp->zfs_hdl->libzfs_prop_debug)
return;
int error;
char *poolname = zhp->zpool_hdl->zpool_name;
const char *prop_name = zfs_prop_to_name(prop);
const char *program =
"args = ...\n"
"ds = args['dataset']\n"
"prop = args['property']\n"
"value, setpoint = zfs.get_prop(ds, prop)\n"
"return {value=value, setpoint=setpoint}\n";
nvlist_t *outnvl;
nvlist_t *retnvl;
nvlist_t *argnvl = fnvlist_alloc();
fnvlist_add_string(argnvl, "dataset", zhp->zfs_name);
fnvlist_add_string(argnvl, "property", zfs_prop_to_name(prop));
error = lzc_channel_program_nosync(poolname, program,
10 * 1000 * 1000, 10 * 1024 * 1024, argnvl, &outnvl);
if (error == 0) {
retnvl = fnvlist_lookup_nvlist(outnvl, "return");
if (zfs_prop_get_type(prop) == PROP_TYPE_NUMBER) {
int64_t ans;
error = nvlist_lookup_int64(retnvl, "value", &ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (ans != intval) {
(void) fprintf(stderr, "%s: zfs found %llu, "
"but zcp found %llu\n", prop_name,
(u_longlong_t)intval, (u_longlong_t)ans);
}
} else {
char *str_ans;
error = nvlist_lookup_string(retnvl, "value", &str_ans);
if (error != 0) {
(void) fprintf(stderr, "%s: zcp check error: "
"%u\n", prop_name, error);
return;
}
if (strcmp(strval, str_ans) != 0) {
(void) fprintf(stderr,
"%s: zfs found '%s', but zcp found '%s'\n",
prop_name, strval, str_ans);
}
}
} else {
(void) fprintf(stderr, "%s: zcp check failed, channel program "
"error: %u\n", prop_name, error);
}
nvlist_free(argnvl);
nvlist_free(outnvl);
}
/*
* Retrieve a property from the given object. If 'literal' is specified, then
* numbers are left as exact values. Otherwise, numbers are converted to a
* human-readable form.
*
* Returns 0 on success, or -1 on error.
*/
int
zfs_prop_get(zfs_handle_t *zhp, zfs_prop_t prop, char *propbuf, size_t proplen,
zprop_source_t *src, char *statbuf, size_t statlen, boolean_t literal)
{
char *source = NULL;
uint64_t val;
const char *str;
const char *strval;
boolean_t received = zfs_is_recvd_props_mode(zhp);
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE))
return (-1);
if (received && zfs_prop_readonly(prop))
return (-1);
if (src)
*src = ZPROP_SRC_NONE;
switch (prop) {
case ZFS_PROP_CREATION:
/*
* 'creation' is a time_t stored in the statistics. We convert
* this into a string unless 'literal' is specified.
*/
{
val = getprop_uint64(zhp, prop, &source);
time_t time = (time_t)val;
struct tm t;
if (literal ||
localtime_r(&time, &t) == NULL ||
strftime(propbuf, proplen, "%a %b %e %k:%M %Y",
&t) == 0)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_MOUNTPOINT:
/*
* Getting the precise mountpoint can be tricky.
*
* - for 'none' or 'legacy', return those values.
* - for inherited mountpoints, we want to take everything
* after our ancestor and append it to the inherited value.
*
* If the pool has an alternate root, we want to prepend that
* root to any values we return.
*/
str = getprop_string(zhp, prop, &source);
if (str[0] == '/') {
char buf[MAXPATHLEN];
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
relpath = zhp->zfs_name + strlen(source);
if (relpath[0] == '/')
relpath++;
}
if ((zpool_get_prop(zhp->zpool_hdl,
ZPOOL_PROP_ALTROOT, buf, MAXPATHLEN, NULL,
B_FALSE)) || (strcmp(root, "-") == 0))
root[0] = '\0';
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
if (str[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
str++;
if (relpath[0] == '\0')
(void) snprintf(propbuf, proplen, "%s%s",
root, str);
else
(void) snprintf(propbuf, proplen, "%s%s%s%s",
root, str, relpath[0] == '@' ? "" : "/",
relpath);
} else {
/* 'legacy' or 'none' */
(void) strlcpy(propbuf, str, proplen);
}
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_ORIGIN:
str = getprop_string(zhp, prop, &source);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case ZFS_PROP_CLONES:
if (get_clones_string(zhp, propbuf, proplen) != 0)
return (-1);
break;
case ZFS_PROP_QUOTA:
case ZFS_PROP_REFQUOTA:
case ZFS_PROP_RESERVATION:
case ZFS_PROP_REFRESERVATION:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If quota or reservation is 0, we translate this into 'none'
* (unless literal is set), and indicate that it's the default
* value. Otherwise, we print the number nicely and indicate
* that its set locally.
*/
if (val == 0) {
if (literal)
(void) strlcpy(propbuf, "0", proplen);
else
(void) strlcpy(propbuf, "none", proplen);
} else {
if (literal)
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
else
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
case ZFS_PROP_FILESYSTEM_COUNT:
case ZFS_PROP_SNAPSHOT_COUNT:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
/*
* If limit is UINT64_MAX, we translate this into 'none' (unless
* literal is set), and indicate that it's the default value.
* Otherwise, we print the number nicely and indicate that it's
* set locally.
*/
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else if (val == UINT64_MAX) {
(void) strlcpy(propbuf, "none", proplen);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFRATIO:
case ZFS_PROP_COMPRESSRATIO:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal)
(void) snprintf(propbuf, proplen, "%llu.%02llu",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
else
(void) snprintf(propbuf, proplen, "%llu.%02llux",
(u_longlong_t)(val / 100),
(u_longlong_t)(val % 100));
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_TYPE:
switch (zhp->zfs_type) {
case ZFS_TYPE_FILESYSTEM:
str = "filesystem";
break;
case ZFS_TYPE_VOLUME:
str = "volume";
break;
case ZFS_TYPE_SNAPSHOT:
str = "snapshot";
break;
case ZFS_TYPE_BOOKMARK:
str = "bookmark";
break;
default:
abort();
}
(void) snprintf(propbuf, proplen, "%s", str);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MOUNTED:
/*
* The 'mounted' property is a pseudo-property that described
* whether the filesystem is currently mounted. Even though
* it's a boolean value, the typical values of "on" and "off"
* don't make sense, so we translate to "yes" and "no".
*/
if (get_numeric_property(zhp, ZFS_PROP_MOUNTED,
src, &source, &val) != 0)
return (-1);
if (val)
(void) strlcpy(propbuf, "yes", proplen);
else
(void) strlcpy(propbuf, "no", proplen);
break;
case ZFS_PROP_NAME:
/*
* The 'name' property is a pseudo-property derived from the
* dataset name. It is presented as a real property to simplify
* consumers.
*/
(void) strlcpy(propbuf, zhp->zfs_name, proplen);
zcp_check(zhp, prop, 0, propbuf);
break;
case ZFS_PROP_MLSLABEL:
{
#ifdef HAVE_MLSLABEL
m_label_t *new_sl = NULL;
char *ascii = NULL; /* human readable label */
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
if (literal || (strcasecmp(propbuf,
ZFS_MLSLABEL_DEFAULT) == 0))
break;
/*
* Try to translate the internal hex string to
* human-readable output. If there are any
* problems just use the hex string.
*/
if (str_to_label(propbuf, &new_sl, MAC_LABEL,
L_NO_CORRECTION, NULL) == -1) {
m_label_free(new_sl);
break;
}
if (label_to_str(new_sl, &ascii, M_LABEL,
DEF_NAMES) != 0) {
if (ascii)
free(ascii);
m_label_free(new_sl);
break;
}
m_label_free(new_sl);
(void) strlcpy(propbuf, ascii, proplen);
free(ascii);
#else
(void) strlcpy(propbuf,
getprop_string(zhp, prop, &source), proplen);
#endif /* HAVE_MLSLABEL */
}
break;
case ZFS_PROP_GUID:
case ZFS_PROP_CREATETXG:
/*
* GUIDs are stored as numbers, but they are identifiers.
* We don't want them to be pretty printed, because pretty
* printing mangles the ID into a truncated and useless value.
*/
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
(void) snprintf(propbuf, proplen, "%llu", (u_longlong_t)val);
zcp_check(zhp, prop, val, NULL);
break;
case ZFS_PROP_REFERENCED:
case ZFS_PROP_AVAILABLE:
case ZFS_PROP_USED:
case ZFS_PROP_USEDSNAP:
case ZFS_PROP_USEDDS:
case ZFS_PROP_USEDREFRESERV:
case ZFS_PROP_USEDCHILD:
if (get_numeric_property(zhp, prop, src, &source, &val) != 0)
return (-1);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicebytes(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
default:
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0) {
return (-1);
}
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)val);
} else {
zfs_nicenum(val, propbuf, proplen);
}
zcp_check(zhp, prop, val, NULL);
break;
case PROP_TYPE_STRING:
str = getprop_string(zhp, prop, &source);
if (str == NULL)
return (-1);
(void) strlcpy(propbuf, str, proplen);
zcp_check(zhp, prop, 0, str);
break;
case PROP_TYPE_INDEX:
if (get_numeric_property(zhp, prop, src,
&source, &val) != 0)
return (-1);
if (zfs_prop_index_to_string(prop, val, &strval) != 0)
return (-1);
(void) strlcpy(propbuf, strval, proplen);
zcp_check(zhp, prop, 0, strval);
break;
default:
abort();
}
}
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
/*
* Utility function to get the given numeric property. Does no validation that
* the given property is the appropriate type; should only be used with
* hard-coded property types.
*/
uint64_t
zfs_prop_get_int(zfs_handle_t *zhp, zfs_prop_t prop)
{
char *source;
uint64_t val = 0;
(void) get_numeric_property(zhp, prop, NULL, &source, &val);
return (val);
}
int
zfs_prop_set_int(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t val)
{
char buf[64];
(void) snprintf(buf, sizeof (buf), "%llu", (longlong_t)val);
return (zfs_prop_set(zhp, zfs_prop_to_name(prop), buf));
}
/*
* Similar to zfs_prop_get(), but returns the value as an integer.
*/
int
zfs_prop_get_numeric(zfs_handle_t *zhp, zfs_prop_t prop, uint64_t *value,
zprop_source_t *src, char *statbuf, size_t statlen)
{
char *source;
/*
* Check to see if this property applies to our object
*/
if (!zfs_prop_valid_for_type(prop, zhp->zfs_type, B_FALSE)) {
return (zfs_error_fmt(zhp->zfs_hdl, EZFS_PROPTYPE,
dgettext(TEXT_DOMAIN, "cannot get property '%s'"),
zfs_prop_to_name(prop)));
}
if (src)
*src = ZPROP_SRC_NONE;
if (get_numeric_property(zhp, prop, src, &source, value) != 0)
return (-1);
get_source(zhp, src, source, statbuf, statlen);
return (0);
}
#ifdef HAVE_IDMAP
static int
idmap_id_to_numeric_domain_rid(uid_t id, boolean_t isuser,
char **domainp, idmap_rid_t *ridp)
{
idmap_get_handle_t *get_hdl = NULL;
idmap_stat status;
int err = EINVAL;
if (idmap_get_create(&get_hdl) != IDMAP_SUCCESS)
goto out;
if (isuser) {
err = idmap_get_sidbyuid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
} else {
err = idmap_get_sidbygid(get_hdl, id,
IDMAP_REQ_FLG_USE_CACHE, domainp, ridp, &status);
}
if (err == IDMAP_SUCCESS &&
idmap_get_mappings(get_hdl) == IDMAP_SUCCESS &&
status == IDMAP_SUCCESS)
err = 0;
else
err = EINVAL;
out:
if (get_hdl)
idmap_get_destroy(get_hdl);
return (err);
}
#endif /* HAVE_IDMAP */
/*
* convert the propname into parameters needed by kernel
* Eg: userquota@ahrens -> ZFS_PROP_USERQUOTA, "", 126829
* Eg: userused@matt@domain -> ZFS_PROP_USERUSED, "S-1-123-456", 789
* Eg: groupquota@staff -> ZFS_PROP_GROUPQUOTA, "", 1234
* Eg: groupused@staff -> ZFS_PROP_GROUPUSED, "", 1234
* Eg: projectquota@123 -> ZFS_PROP_PROJECTQUOTA, "", 123
* Eg: projectused@789 -> ZFS_PROP_PROJECTUSED, "", 789
*/
static int
userquota_propname_decode(const char *propname, boolean_t zoned,
zfs_userquota_prop_t *typep, char *domain, int domainlen, uint64_t *ridp)
{
zfs_userquota_prop_t type;
char *cp;
boolean_t isuser;
boolean_t isgroup;
boolean_t isproject;
struct passwd *pw;
struct group *gr;
domain[0] = '\0';
/* Figure out the property type ({user|group|project}{quota|space}) */
for (type = 0; type < ZFS_NUM_USERQUOTA_PROPS; type++) {
if (strncmp(propname, zfs_userquota_prop_prefixes[type],
strlen(zfs_userquota_prop_prefixes[type])) == 0)
break;
}
if (type == ZFS_NUM_USERQUOTA_PROPS)
return (EINVAL);
*typep = type;
isuser = (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_USERUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_USEROBJUSED);
isgroup = (type == ZFS_PROP_GROUPQUOTA || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_GROUPOBJUSED);
isproject = (type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED);
cp = strchr(propname, '@') + 1;
if (isuser && (pw = getpwnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = pw->pw_uid;
} else if (isgroup && (gr = getgrnam(cp)) != NULL) {
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
*ridp = gr->gr_gid;
} else if (!isproject && strchr(cp, '@')) {
#ifdef HAVE_IDMAP
/*
* It's a SID name (eg "user@domain") that needs to be
* turned into S-1-domainID-RID.
*/
directory_error_t e;
char *numericsid = NULL;
char *end;
if (zoned && getzoneid() == GLOBAL_ZONEID)
return (ENOENT);
if (isuser) {
e = directory_sid_from_user_name(NULL,
cp, &numericsid);
} else {
e = directory_sid_from_group_name(NULL,
cp, &numericsid);
}
if (e != NULL) {
directory_error_free(e);
return (ENOENT);
}
if (numericsid == NULL)
return (ENOENT);
cp = numericsid;
(void) strlcpy(domain, cp, domainlen);
cp = strrchr(domain, '-');
*cp = '\0';
cp++;
errno = 0;
*ridp = strtoull(cp, &end, 10);
free(numericsid);
if (errno != 0 || *end != '\0')
return (EINVAL);
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
/* It's a user/group/project ID (eg "12345"). */
uid_t id;
char *end;
id = strtoul(cp, &end, 10);
if (*end != '\0')
return (EINVAL);
if (id > MAXUID && !isproject) {
#ifdef HAVE_IDMAP
/* It's an ephemeral ID. */
idmap_rid_t rid;
char *mapdomain;
if (idmap_id_to_numeric_domain_rid(id, isuser,
&mapdomain, &rid) != 0)
return (ENOENT);
(void) strlcpy(domain, mapdomain, domainlen);
*ridp = rid;
#else
return (ENOSYS);
#endif /* HAVE_IDMAP */
} else {
*ridp = id;
}
}
return (0);
}
static int
zfs_prop_get_userquota_common(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue, zfs_userquota_prop_t *typep)
{
int err;
zfs_cmd_t zc = {"\0"};
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
err = userquota_propname_decode(propname,
zfs_prop_get_int(zhp, ZFS_PROP_ZONED),
typep, zc.zc_value, sizeof (zc.zc_value), &zc.zc_guid);
zc.zc_objset_type = *typep;
if (err)
return (err);
err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_USERSPACE_ONE, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_userquota_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
zfs_userquota_prop_t type;
return (zfs_prop_get_userquota_common(zhp, propname, propvalue,
&type));
}
int
zfs_prop_get_userquota(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
zfs_userquota_prop_t type;
err = zfs_prop_get_userquota_common(zhp, propname, &propvalue,
&type);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else if (propvalue == 0 &&
(type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USEROBJQUOTA || type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTQUOTA ||
type == ZFS_PROP_PROJECTOBJQUOTA)) {
(void) strlcpy(propbuf, "none", proplen);
} else if (type == ZFS_PROP_USERQUOTA || type == ZFS_PROP_GROUPQUOTA ||
type == ZFS_PROP_USERUSED || type == ZFS_PROP_GROUPUSED ||
type == ZFS_PROP_PROJECTUSED || type == ZFS_PROP_PROJECTQUOTA) {
zfs_nicebytes(propvalue, propbuf, proplen);
} else {
zfs_nicenum(propvalue, propbuf, proplen);
}
return (0);
}
int
zfs_prop_get_written_int(zfs_handle_t *zhp, const char *propname,
uint64_t *propvalue)
{
int err;
zfs_cmd_t zc = {"\0"};
const char *snapname;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
snapname = strchr(propname, '@') + 1;
if (strchr(snapname, '@')) {
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
} else {
/* snapname is the short name, append it to zhp's fsname */
char *cp;
(void) strlcpy(zc.zc_value, zhp->zfs_name,
sizeof (zc.zc_value));
cp = strchr(zc.zc_value, '@');
if (cp != NULL)
*cp = '\0';
(void) strlcat(zc.zc_value, "@", sizeof (zc.zc_value));
(void) strlcat(zc.zc_value, snapname, sizeof (zc.zc_value));
}
err = ioctl(zhp->zfs_hdl->libzfs_fd, ZFS_IOC_SPACE_WRITTEN, &zc);
if (err)
return (err);
*propvalue = zc.zc_cookie;
return (0);
}
int
zfs_prop_get_written(zfs_handle_t *zhp, const char *propname,
char *propbuf, int proplen, boolean_t literal)
{
int err;
uint64_t propvalue;
err = zfs_prop_get_written_int(zhp, propname, &propvalue);
if (err)
return (err);
if (literal) {
(void) snprintf(propbuf, proplen, "%llu",
(u_longlong_t)propvalue);
} else {
zfs_nicebytes(propvalue, propbuf, proplen);
}
return (0);
}
/*
* Returns the name of the given zfs handle.
*/
const char *
zfs_get_name(const zfs_handle_t *zhp)
{
return (zhp->zfs_name);
}
/*
* Returns the name of the parent pool for the given zfs handle.
*/
const char *
zfs_get_pool_name(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl->zpool_name);
}
/*
* Returns the type of the given zfs handle.
*/
zfs_type_t
zfs_get_type(const zfs_handle_t *zhp)
{
return (zhp->zfs_type);
}
/*
* Is one dataset name a child dataset of another?
*
* Needs to handle these cases:
* Dataset 1 "a/foo" "a/foo" "a/foo" "a/foo"
* Dataset 2 "a/fo" "a/foobar" "a/bar/baz" "a/foo/bar"
* Descendant? No. No. No. Yes.
*/
static boolean_t
is_descendant(const char *ds1, const char *ds2)
{
size_t d1len = strlen(ds1);
/* ds2 can't be a descendant if it's smaller */
if (strlen(ds2) < d1len)
return (B_FALSE);
/* otherwise, compare strings and verify that there's a '/' char */
return (ds2[d1len] == '/' && (strncmp(ds1, ds2, d1len) == 0));
}
/*
* Given a complete name, return just the portion that refers to the parent.
* Will return -1 if there is no parent (path is just the name of the
* pool).
*/
static int
parent_name(const char *path, char *buf, size_t buflen)
{
char *slashp;
(void) strlcpy(buf, path, buflen);
if ((slashp = strrchr(buf, '/')) == NULL)
return (-1);
*slashp = '\0';
return (0);
}
int
zfs_parent_name(zfs_handle_t *zhp, char *buf, size_t buflen)
{
return (parent_name(zfs_get_name(zhp), buf, buflen));
}
/*
* If accept_ancestor is false, then check to make sure that the given path has
* a parent, and that it exists. If accept_ancestor is true, then find the
* closest existing ancestor for the given path. In prefixlen return the
* length of already existing prefix of the given path. We also fetch the
* 'zoned' property, which is used to validate property settings when creating
* new datasets.
*/
static int
check_parents(libzfs_handle_t *hdl, const char *path, uint64_t *zoned,
boolean_t accept_ancestor, int *prefixlen)
{
zfs_cmd_t zc = {"\0"};
char parent[ZFS_MAX_DATASET_NAME_LEN];
char *slash;
zfs_handle_t *zhp;
char errbuf[1024];
uint64_t is_zoned;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot create '%s'"), path);
/* get parent, and check to see if this is just a pool */
if (parent_name(path, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
/* check to see if the pool exists */
if ((slash = strchr(parent, '/')) == NULL)
slash = parent + strlen(parent);
(void) strncpy(zc.zc_name, parent, slash - parent);
zc.zc_name[slash - parent] = '\0';
if (ioctl(hdl->libzfs_fd, ZFS_IOC_OBJSET_STATS, &zc) != 0 &&
errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
/* check to see if the parent dataset exists */
while ((zhp = make_dataset_handle(hdl, parent)) == NULL) {
if (errno == ENOENT && accept_ancestor) {
/*
* Go deeper to find an ancestor, give up on top level.
*/
if (parent_name(parent, parent, sizeof (parent)) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such pool '%s'"), zc.zc_name);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
}
} else if (errno == ENOENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent does not exist"));
return (zfs_error(hdl, EZFS_NOENT, errbuf));
} else
return (zfs_standard_error(hdl, errno, errbuf));
}
is_zoned = zfs_prop_get_int(zhp, ZFS_PROP_ZONED);
if (zoned != NULL)
*zoned = is_zoned;
/* we are in a non-global zone, but parent is in the global zone */
if (getzoneid() != GLOBAL_ZONEID && !is_zoned) {
(void) zfs_standard_error(hdl, EPERM, errbuf);
zfs_close(zhp);
return (-1);
}
/* make sure parent is a filesystem */
if (zfs_get_type(zhp) != ZFS_TYPE_FILESYSTEM) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent is not a filesystem"));
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
zfs_close(zhp);
return (-1);
}
zfs_close(zhp);
if (prefixlen != NULL)
*prefixlen = strlen(parent);
return (0);
}
/*
* Finds whether the dataset of the given type(s) exists.
*/
boolean_t
zfs_dataset_exists(libzfs_handle_t *hdl, const char *path, zfs_type_t types)
{
zfs_handle_t *zhp;
if (!zfs_validate_name(hdl, path, types, B_FALSE))
return (B_FALSE);
/*
* Try to get stats for the dataset, which will tell us if it exists.
*/
if ((zhp = make_dataset_handle(hdl, path)) != NULL) {
int ds_type = zhp->zfs_type;
zfs_close(zhp);
if (types & ds_type)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Given a path to 'target', create all the ancestors between
* the prefixlen portion of the path, and the target itself.
* Fail if the initial prefixlen-ancestor does not already exist.
*/
int
create_parents(libzfs_handle_t *hdl, char *target, int prefixlen)
{
zfs_handle_t *h;
char *cp;
const char *opname;
/* make sure prefix exists */
cp = target + prefixlen;
if (*cp != '/') {
assert(strchr(cp, '/') == NULL);
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
} else {
*cp = '\0';
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
*cp = '/';
}
if (h == NULL)
return (-1);
zfs_close(h);
/*
* Attempt to create, mount, and share any ancestor filesystems,
* up to the prefixlen-long one.
*/
for (cp = target + prefixlen + 1;
(cp = strchr(cp, '/')) != NULL; *cp = '/', cp++) {
*cp = '\0';
h = make_dataset_handle(hdl, target);
if (h) {
/* it already exists, nothing to do here */
zfs_close(h);
continue;
}
if (zfs_create(hdl, target, ZFS_TYPE_FILESYSTEM,
NULL) != 0) {
opname = dgettext(TEXT_DOMAIN, "create");
goto ancestorerr;
}
h = zfs_open(hdl, target, ZFS_TYPE_FILESYSTEM);
if (h == NULL) {
opname = dgettext(TEXT_DOMAIN, "open");
goto ancestorerr;
}
if (zfs_mount(h, NULL, 0) != 0) {
opname = dgettext(TEXT_DOMAIN, "mount");
goto ancestorerr;
}
if (zfs_share(h) != 0) {
opname = dgettext(TEXT_DOMAIN, "share");
goto ancestorerr;
}
zfs_close(h);
}
return (0);
ancestorerr:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"failed to %s ancestor '%s'"), opname, target);
return (-1);
}
/*
* Creates non-existing ancestors of the given path.
*/
int
zfs_create_ancestors(libzfs_handle_t *hdl, const char *path)
{
int prefix;
char *path_copy;
int rc = 0;
if (check_parents(hdl, path, NULL, B_TRUE, &prefix) != 0)
return (-1);
if ((path_copy = strdup(path)) != NULL) {
rc = create_parents(hdl, path_copy, prefix);
free(path_copy);
}
if (path_copy == NULL || rc != 0)
return (-1);
return (0);
}
/*
* Create a new filesystem or volume.
*/
int
zfs_create(libzfs_handle_t *hdl, const char *path, zfs_type_t type,
nvlist_t *props)
{
int ret;
uint64_t size = 0;
uint64_t blocksize = zfs_prop_default_numeric(ZFS_PROP_VOLBLOCKSIZE);
uint64_t zoned;
enum lzc_dataset_type ost;
zpool_handle_t *zpool_handle;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char errbuf[1024];
char parent[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), path);
/* validate the path, taking care to note the extended error message */
if (!zfs_validate_name(hdl, path, type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
if (check_parents(hdl, path, &zoned, B_FALSE, NULL) != 0)
return (-1);
/*
* The failure modes when creating a dataset of a different type over
* one that already exists is a little strange. In particular, if you
* try to create a dataset on top of an existing dataset, the ioctl()
* will return ENOENT, not EEXIST. To prevent this from happening, we
* first try to see if the dataset exists.
*/
if (zfs_dataset_exists(hdl, path, ZFS_TYPE_DATASET)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
}
if (type == ZFS_TYPE_VOLUME)
ost = LZC_DATSET_TYPE_ZVOL;
else
ost = LZC_DATSET_TYPE_ZFS;
/* open zpool handle for prop validation */
char pool_path[ZFS_MAX_DATASET_NAME_LEN];
(void) strlcpy(pool_path, path, sizeof (pool_path));
/* truncate pool_path at first slash */
char *p = strchr(pool_path, '/');
if (p != NULL)
*p = '\0';
if ((zpool_handle = zpool_open(hdl, pool_path)) == NULL)
return (-1);
if (props && (props = zfs_valid_proplist(hdl, type, props,
zoned, NULL, zpool_handle, B_TRUE, errbuf)) == 0) {
zpool_close(zpool_handle);
return (-1);
}
zpool_close(zpool_handle);
if (type == ZFS_TYPE_VOLUME) {
/*
* If we are creating a volume, the size and block size must
* satisfy a few restraints. First, the blocksize must be a
* valid block size between SPA_{MIN,MAX}BLOCKSIZE. Second, the
* volsize must be a multiple of the block size, and cannot be
* zero.
*/
if (props == NULL || nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &size) != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if ((ret = nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&blocksize)) != 0) {
if (ret == ENOENT) {
blocksize = zfs_prop_default_numeric(
ZFS_PROP_VOLBLOCKSIZE);
} else {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"missing volume block size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
if (size == 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size cannot be zero"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
if (size % blocksize != 0) {
nvlist_free(props);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"volume size must be a multiple of volume block "
"size"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
}
}
(void) parent_name(path, parent, sizeof (parent));
if (zfs_crypto_create(hdl, parent, props, NULL, &wkeydata,
&wkeylen) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
/* create the dataset */
ret = lzc_create(path, ost, props, wkeydata, wkeylen);
nvlist_free(props);
if (wkeydata != NULL)
free(wkeydata);
/* check for failure */
if (ret != 0) {
switch (errno) {
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(hdl, EZFS_NOENT, errbuf));
case EINVAL:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"parent '%s' is not a filesystem"), parent);
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to set this "
"property or value"));
return (zfs_error(hdl, EZFS_BADVERSION, errbuf));
case EACCES:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"encryption root's key is not loaded "
"or provided"));
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
case ERANGE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property value(s) specified"));
return (zfs_error(hdl, EZFS_BADPROP, errbuf));
#ifdef _ILP32
case EOVERFLOW:
/*
* This platform can't address a volume this big.
*/
if (type == ZFS_TYPE_VOLUME)
return (zfs_error(hdl, EZFS_VOLTOOBIG,
errbuf));
#endif
/* FALLTHROUGH */
default:
return (zfs_standard_error(hdl, errno, errbuf));
}
}
return (0);
}
/*
* Destroys the given dataset. The caller must make sure that the filesystem
* isn't mounted, and that there are no active dependents. If the file system
* does not exist this function does nothing.
*/
int
zfs_destroy(zfs_handle_t *zhp, boolean_t defer)
{
zfs_cmd_t zc = {"\0"};
if (zhp->zfs_type == ZFS_TYPE_BOOKMARK) {
nvlist_t *nv = fnvlist_alloc();
fnvlist_add_boolean(nv, zhp->zfs_name);
int error = lzc_destroy_bookmarks(nv, NULL);
fnvlist_free(nv);
if (error != 0) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
return (0);
}
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (ZFS_IS_VOLUME(zhp)) {
zc.zc_objset_type = DMU_OST_ZVOL;
} else {
zc.zc_objset_type = DMU_OST_ZFS;
}
zc.zc_defer_destroy = defer;
if (zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_DESTROY, &zc) != 0 &&
errno != ENOENT) {
return (zfs_standard_error_fmt(zhp->zfs_hdl, errno,
dgettext(TEXT_DOMAIN, "cannot destroy '%s'"),
zhp->zfs_name));
}
remove_mountpoint(zhp);
return (0);
}
struct destroydata {
nvlist_t *nvl;
const char *snapname;
};
static int
zfs_check_snap_cb(zfs_handle_t *zhp, void *arg)
{
struct destroydata *dd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
dd->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
verify(nvlist_add_boolean(dd->nvl, name) == 0);
rv = zfs_iter_filesystems(zhp, zfs_check_snap_cb, dd);
zfs_close(zhp);
return (rv);
}
/*
* Destroys all snapshots with the given name in zhp & descendants.
*/
int
zfs_destroy_snaps(zfs_handle_t *zhp, char *snapname, boolean_t defer)
{
int ret;
struct destroydata dd = { 0 };
dd.snapname = snapname;
verify(nvlist_alloc(&dd.nvl, NV_UNIQUE_NAME, 0) == 0);
(void) zfs_check_snap_cb(zfs_handle_dup(zhp), &dd);
if (nvlist_empty(dd.nvl)) {
ret = zfs_standard_error_fmt(zhp->zfs_hdl, ENOENT,
dgettext(TEXT_DOMAIN, "cannot destroy '%s@%s'"),
zhp->zfs_name, snapname);
} else {
ret = zfs_destroy_snaps_nvl(zhp->zfs_hdl, dd.nvl, defer);
}
nvlist_free(dd.nvl);
return (ret);
}
/*
* Destroys all the snapshots named in the nvlist.
*/
int
zfs_destroy_snaps_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, boolean_t defer)
{
int ret;
nvlist_t *errlist = NULL;
nvpair_t *pair;
ret = lzc_destroy_snaps(snaps, defer, &errlist);
if (ret == 0) {
nvlist_free(errlist);
return (0);
}
if (nvlist_empty(errlist)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshots"));
ret = zfs_standard_error(hdl, ret, errbuf);
}
for (pair = nvlist_next_nvpair(errlist, NULL);
pair != NULL; pair = nvlist_next_nvpair(errlist, pair)) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot destroy snapshot %s"),
nvpair_name(pair));
switch (fnvpair_value_int32(pair)) {
case EEXIST:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "snapshot is cloned"));
ret = zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
ret = zfs_standard_error(hdl, errno, errbuf);
break;
}
}
nvlist_free(errlist);
return (ret);
}
/*
* Clones the given dataset. The target must be of the same type as the source.
*/
int
zfs_clone(zfs_handle_t *zhp, const char *target, nvlist_t *props)
{
char parent[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[1024];
libzfs_handle_t *hdl = zhp->zfs_hdl;
uint64_t zoned;
assert(zhp->zfs_type == ZFS_TYPE_SNAPSHOT);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), target);
/* validate the target/clone name */
if (!zfs_validate_name(hdl, target, ZFS_TYPE_FILESYSTEM, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents exist */
if (check_parents(hdl, target, &zoned, B_FALSE, NULL) != 0)
return (-1);
(void) parent_name(target, parent, sizeof (parent));
/* do the clone */
if (props) {
zfs_type_t type;
if (ZFS_IS_VOLUME(zhp)) {
type = ZFS_TYPE_VOLUME;
} else {
type = ZFS_TYPE_FILESYSTEM;
}
if ((props = zfs_valid_proplist(hdl, type, props, zoned,
zhp, zhp->zpool_hdl, B_TRUE, errbuf)) == NULL)
return (-1);
if (zfs_fix_auto_resv(zhp, props) == -1) {
nvlist_free(props);
return (-1);
}
}
if (zfs_crypto_clone_check(hdl, zhp, parent, props) != 0) {
nvlist_free(props);
return (zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf));
}
ret = lzc_clone(target, zhp->zfs_name, props);
nvlist_free(props);
if (ret != 0) {
switch (errno) {
case ENOENT:
/*
* The parent doesn't exist. We should have caught this
* above, but there may a race condition that has since
* destroyed the parent.
*
* At this point, we don't know whether it's the source
* that doesn't exist anymore, or whether the target
* dataset doesn't exist.
*/
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"no such parent '%s'"), parent);
return (zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf));
case EXDEV:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"source and target pools differ"));
return (zfs_error(zhp->zfs_hdl, EZFS_CROSSTARGET,
errbuf));
default:
return (zfs_standard_error(zhp->zfs_hdl, errno,
errbuf));
}
}
return (ret);
}
/*
* Promotes the given clone fs to be the clone parent.
*/
int
zfs_promote(zfs_handle_t *zhp)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
char snapname[ZFS_MAX_DATASET_NAME_LEN];
int ret;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot promote '%s'"), zhp->zfs_name);
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots can not be promoted"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (zhp->zfs_dmustats.dds_origin[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"not a cloned filesystem"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
ret = lzc_promote(zhp->zfs_name, snapname, sizeof (snapname));
if (ret != 0) {
switch (ret) {
case EEXIST:
/* There is a conflicting snapshot name. */
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"conflicting snapshot '%s' from parent '%s'"),
snapname, zhp->zfs_dmustats.dds_origin);
return (zfs_error(hdl, EZFS_EXISTS, errbuf));
default:
return (zfs_standard_error(hdl, ret, errbuf));
}
}
return (ret);
}
typedef struct snapdata {
nvlist_t *sd_nvl;
const char *sd_snapname;
} snapdata_t;
static int
zfs_snapshot_cb(zfs_handle_t *zhp, void *arg)
{
snapdata_t *sd = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (zfs_prop_get_int(zhp, ZFS_PROP_INCONSISTENT) == 0) {
if (snprintf(name, sizeof (name), "%s@%s", zfs_get_name(zhp),
sd->sd_snapname) >= sizeof (name))
return (EINVAL);
fnvlist_add_boolean(sd->sd_nvl, name);
rv = zfs_iter_filesystems(zhp, zfs_snapshot_cb, sd);
}
zfs_close(zhp);
return (rv);
}
+int
+zfs_remap_indirects(libzfs_handle_t *hdl, const char *fs)
+{
+ int err;
+ char errbuf[1024];
+
+ (void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
+ "cannot remap filesystem '%s' "), fs);
+
+ err = lzc_remap(fs);
+
+ if (err != 0) {
+ (void) zfs_standard_error(hdl, err, errbuf);
+ }
+
+ return (err);
+}
+
/*
* Creates snapshots. The keys in the snaps nvlist are the snapshots to be
* created.
*/
int
zfs_snapshot_nvl(libzfs_handle_t *hdl, nvlist_t *snaps, nvlist_t *props)
{
int ret;
char errbuf[1024];
nvpair_t *elem;
nvlist_t *errors;
zpool_handle_t *zpool_hdl;
char pool[ZFS_MAX_DATASET_NAME_LEN];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot create snapshots "));
elem = NULL;
while ((elem = nvlist_next_nvpair(snaps, elem)) != NULL) {
const char *snapname = nvpair_name(elem);
/* validate the target name */
if (!zfs_validate_name(hdl, snapname, ZFS_TYPE_SNAPSHOT,
B_TRUE)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), snapname);
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
/*
* get pool handle for prop validation. assumes all snaps are in the
* same pool, as does lzc_snapshot (below).
*/
elem = nvlist_next_nvpair(snaps, NULL);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
zpool_hdl = zpool_open(hdl, pool);
if (zpool_hdl == NULL)
return (-1);
if (props != NULL &&
(props = zfs_valid_proplist(hdl, ZFS_TYPE_SNAPSHOT,
props, B_FALSE, NULL, zpool_hdl, B_FALSE, errbuf)) == NULL) {
zpool_close(zpool_hdl);
return (-1);
}
zpool_close(zpool_hdl);
ret = lzc_snapshot(snaps, props, &errors);
if (ret != 0) {
boolean_t printed = B_FALSE;
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot create snapshot '%s'"), nvpair_name(elem));
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
printed = B_TRUE;
}
if (!printed) {
switch (ret) {
case EXDEV:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple snapshots of same "
"fs not allowed"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
}
nvlist_free(props);
nvlist_free(errors);
return (ret);
}
int
zfs_snapshot(libzfs_handle_t *hdl, const char *path, boolean_t recursive,
nvlist_t *props)
{
int ret;
snapdata_t sd = { 0 };
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *cp;
zfs_handle_t *zhp;
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot snapshot %s"), path);
if (!zfs_validate_name(hdl, path, ZFS_TYPE_SNAPSHOT, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
(void) strlcpy(fsname, path, sizeof (fsname));
cp = strchr(fsname, '@');
*cp = '\0';
sd.sd_snapname = cp + 1;
if ((zhp = zfs_open(hdl, fsname, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME)) == NULL) {
return (-1);
}
verify(nvlist_alloc(&sd.sd_nvl, NV_UNIQUE_NAME, 0) == 0);
if (recursive) {
(void) zfs_snapshot_cb(zfs_handle_dup(zhp), &sd);
} else {
fnvlist_add_boolean(sd.sd_nvl, path);
}
ret = zfs_snapshot_nvl(hdl, sd.sd_nvl, props);
nvlist_free(sd.sd_nvl);
zfs_close(zhp);
return (ret);
}
/*
* Destroy any more recent snapshots. We invoke this callback on any dependents
* of the snapshot first. If the 'cb_dependent' member is non-zero, then this
* is a dependent and we should just destroy it without checking the transaction
* group.
*/
typedef struct rollback_data {
const char *cb_target; /* the snapshot */
uint64_t cb_create; /* creation time reference */
boolean_t cb_error;
boolean_t cb_force;
} rollback_data_t;
static int
rollback_destroy_dependent(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
prop_changelist_t *clp;
/* We must destroy this clone; first unmount it */
clp = changelist_gather(zhp, ZFS_PROP_NAME, 0,
cbp->cb_force ? MS_FORCE: 0);
if (clp == NULL || changelist_prefix(clp) != 0) {
cbp->cb_error = B_TRUE;
zfs_close(zhp);
return (0);
}
if (zfs_destroy(zhp, B_FALSE) != 0)
cbp->cb_error = B_TRUE;
else
changelist_remove(clp, zhp->zfs_name);
(void) changelist_postfix(clp);
changelist_free(clp);
zfs_close(zhp);
return (0);
}
static int
rollback_destroy(zfs_handle_t *zhp, void *data)
{
rollback_data_t *cbp = data;
if (zfs_prop_get_int(zhp, ZFS_PROP_CREATETXG) > cbp->cb_create) {
cbp->cb_error |= zfs_iter_dependents(zhp, B_FALSE,
rollback_destroy_dependent, cbp);
cbp->cb_error |= zfs_destroy(zhp, B_FALSE);
}
zfs_close(zhp);
return (0);
}
/*
* Given a dataset, rollback to a specific snapshot, discarding any
* data changes since then and making it the active dataset.
*
* Any snapshots and bookmarks more recent than the target are
* destroyed, along with their dependents (i.e. clones).
*/
int
zfs_rollback(zfs_handle_t *zhp, zfs_handle_t *snap, boolean_t force)
{
rollback_data_t cb = { 0 };
int err;
boolean_t restore_resv = 0;
uint64_t old_volsize = 0, new_volsize;
zfs_prop_t resv_prop = { 0 };
assert(zhp->zfs_type == ZFS_TYPE_FILESYSTEM ||
zhp->zfs_type == ZFS_TYPE_VOLUME);
/*
* Destroy all recent snapshots and their dependents.
*/
cb.cb_force = force;
cb.cb_target = snap->zfs_name;
cb.cb_create = zfs_prop_get_int(snap, ZFS_PROP_CREATETXG);
(void) zfs_iter_snapshots(zhp, B_FALSE, rollback_destroy, &cb);
(void) zfs_iter_bookmarks(zhp, rollback_destroy, &cb);
if (cb.cb_error)
return (-1);
/*
* Now that we have verified that the snapshot is the latest,
* rollback to the given snapshot.
*/
if (zhp->zfs_type == ZFS_TYPE_VOLUME) {
if (zfs_which_resv_prop(zhp, &resv_prop) < 0)
return (-1);
old_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
restore_resv =
(old_volsize == zfs_prop_get_int(zhp, resv_prop));
}
/*
* Pass both the filesystem and the wanted snapshot names,
* we would get an error back if the snapshot is destroyed or
* a new snapshot is created before this request is processed.
*/
err = lzc_rollback_to(zhp->zfs_name, snap->zfs_name);
if (err != 0) {
char errbuf[1024];
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rollback '%s'"),
zhp->zfs_name);
switch (err) {
case EEXIST:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"there is a snapshot or bookmark more recent "
"than '%s'"), snap->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_EXISTS, errbuf);
break;
case ESRCH:
zfs_error_aux(zhp->zfs_hdl, dgettext(TEXT_DOMAIN,
"'%s' is not found among snapshots of '%s'"),
snap->zfs_name, zhp->zfs_name);
(void) zfs_error(zhp->zfs_hdl, EZFS_NOENT, errbuf);
break;
case EINVAL:
(void) zfs_error(zhp->zfs_hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(zhp->zfs_hdl, err, errbuf);
}
return (err);
}
/*
* For volumes, if the pre-rollback volsize matched the pre-
* rollback reservation and the volsize has changed then set
* the reservation property to the post-rollback volsize.
* Make a new handle since the rollback closed the dataset.
*/
if ((zhp->zfs_type == ZFS_TYPE_VOLUME) &&
(zhp = make_dataset_handle(zhp->zfs_hdl, zhp->zfs_name))) {
if (restore_resv) {
new_volsize = zfs_prop_get_int(zhp, ZFS_PROP_VOLSIZE);
if (old_volsize != new_volsize)
err = zfs_prop_set_int(zhp, resv_prop,
new_volsize);
}
zfs_close(zhp);
}
return (err);
}
/*
* Renames the given dataset.
*/
int
zfs_rename(zfs_handle_t *zhp, const char *target, boolean_t recursive,
boolean_t force_unmount)
{
int ret = 0;
zfs_cmd_t zc = {"\0"};
char *delim;
prop_changelist_t *cl = NULL;
zfs_handle_t *zhrp = NULL;
char *parentname = NULL;
char parent[ZFS_MAX_DATASET_NAME_LEN];
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
/* if we have the same exact name, just return success */
if (strcmp(zhp->zfs_name, target) == 0)
return (0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename to '%s'"), target);
/* make sure source name is valid */
if (!zfs_validate_name(hdl, zhp->zfs_name, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/*
* Make sure the target name is valid
*/
if (zhp->zfs_type == ZFS_TYPE_SNAPSHOT) {
if ((strchr(target, '@') == NULL) ||
*target == '@') {
/*
* Snapshot target name is abbreviated,
* reconstruct full dataset name
*/
(void) strlcpy(parent, zhp->zfs_name,
sizeof (parent));
delim = strchr(parent, '@');
if (strchr(target, '@') == NULL)
*(++delim) = '\0';
else
*delim = '\0';
(void) strlcat(parent, target, sizeof (parent));
target = parent;
} else {
/*
* Make sure we're renaming within the same dataset.
*/
delim = strchr(target, '@');
if (strncmp(zhp->zfs_name, target, delim - target)
!= 0 || zhp->zfs_name[delim - target] != '@') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"snapshots must be part of same "
"dataset"));
return (zfs_error(hdl, EZFS_CROSSTARGET,
errbuf));
}
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
} else {
if (recursive) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"recursive rename must be a snapshot"));
return (zfs_error(hdl, EZFS_BADTYPE, errbuf));
}
if (!zfs_validate_name(hdl, target, zhp->zfs_type, B_TRUE))
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
/* validate parents */
if (check_parents(hdl, target, NULL, B_FALSE, NULL) != 0)
return (-1);
/* make sure we're in the same pool */
verify((delim = strchr(target, '/')) != NULL);
if (strncmp(zhp->zfs_name, target, delim - target) != 0 ||
zhp->zfs_name[delim - target] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"datasets must be within same pool"));
return (zfs_error(hdl, EZFS_CROSSTARGET, errbuf));
}
/* new name cannot be a child of the current dataset name */
if (is_descendant(zhp->zfs_name, target)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"New dataset name cannot be a descendant of "
"current dataset name"));
return (zfs_error(hdl, EZFS_INVALIDNAME, errbuf));
}
}
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot rename '%s'"), zhp->zfs_name);
if (getzoneid() == GLOBAL_ZONEID &&
zfs_prop_get_int(zhp, ZFS_PROP_ZONED)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is used in a non-global zone"));
return (zfs_error(hdl, EZFS_ZONED, errbuf));
}
if (recursive) {
parentname = zfs_strdup(zhp->zfs_hdl, zhp->zfs_name);
if (parentname == NULL) {
ret = -1;
goto error;
}
delim = strchr(parentname, '@');
*delim = '\0';
zhrp = zfs_open(zhp->zfs_hdl, parentname, ZFS_TYPE_DATASET);
if (zhrp == NULL) {
ret = -1;
goto error;
}
} else if (zhp->zfs_type != ZFS_TYPE_SNAPSHOT) {
if ((cl = changelist_gather(zhp, ZFS_PROP_NAME, 0,
force_unmount ? MS_FORCE : 0)) == NULL)
return (-1);
if (changelist_haszonedchild(cl)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"child dataset with inherited mountpoint is used "
"in a non-global zone"));
(void) zfs_error(hdl, EZFS_ZONED, errbuf);
ret = -1;
goto error;
}
if ((ret = changelist_prefix(cl)) != 0)
goto error;
}
if (ZFS_IS_VOLUME(zhp))
zc.zc_objset_type = DMU_OST_ZVOL;
else
zc.zc_objset_type = DMU_OST_ZFS;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, target, sizeof (zc.zc_value));
zc.zc_cookie = recursive;
if ((ret = zfs_ioctl(zhp->zfs_hdl, ZFS_IOC_RENAME, &zc)) != 0) {
/*
* if it was recursive, the one that actually failed will
* be in zc.zc_name
*/
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot rename '%s'"), zc.zc_name);
if (recursive && errno == EEXIST) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"a child dataset already has a snapshot "
"with the new name"));
(void) zfs_error(hdl, EZFS_EXISTS, errbuf);
} else if (errno == EACCES) {
if (zfs_prop_get_int(zhp, ZFS_PROP_ENCRYPTION) ==
ZIO_CRYPT_OFF) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot rename an unencrypted dataset to "
"be a decendent of an encrypted one"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot move encryption child outside of "
"its encryption root"));
}
(void) zfs_error(hdl, EZFS_CRYPTOFAILED, errbuf);
} else {
(void) zfs_standard_error(zhp->zfs_hdl, errno, errbuf);
}
/*
* On failure, we still want to remount any filesystems that
* were previously mounted, so we don't alter the system state.
*/
if (cl != NULL)
(void) changelist_postfix(cl);
} else {
if (cl != NULL) {
changelist_rename(cl, zfs_get_name(zhp), target);
ret = changelist_postfix(cl);
}
}
error:
if (parentname != NULL) {
free(parentname);
}
if (zhrp != NULL) {
zfs_close(zhrp);
}
if (cl != NULL) {
changelist_free(cl);
}
return (ret);
}
nvlist_t *
zfs_get_all_props(zfs_handle_t *zhp)
{
return (zhp->zfs_props);
}
nvlist_t *
zfs_get_recvd_props(zfs_handle_t *zhp)
{
if (zhp->zfs_recvd_props == NULL)
if (get_recvd_props_ioctl(zhp) != 0)
return (NULL);
return (zhp->zfs_recvd_props);
}
nvlist_t *
zfs_get_user_props(zfs_handle_t *zhp)
{
return (zhp->zfs_user_props);
}
/*
* This function is used by 'zfs list' to determine the exact set of columns to
* display, and their maximum widths. This does two main things:
*
* - If this is a list of all properties, then expand the list to include
* all native properties, and set a flag so that for each dataset we look
* for new unique user properties and add them to the list.
*
* - For non fixed-width properties, keep track of the maximum width seen
* so that we can size the column appropriately. If the user has
* requested received property values, we also need to compute the width
* of the RECEIVED column.
*/
int
zfs_expand_proplist(zfs_handle_t *zhp, zprop_list_t **plp, boolean_t received,
boolean_t literal)
{
libzfs_handle_t *hdl = zhp->zfs_hdl;
zprop_list_t *entry;
zprop_list_t **last, **start;
nvlist_t *userprops, *propval;
nvpair_t *elem;
char *strval;
char buf[ZFS_MAXPROPLEN];
if (zprop_expand_list(hdl, plp, ZFS_TYPE_DATASET) != 0)
return (-1);
userprops = zfs_get_user_props(zhp);
entry = *plp;
if (entry->pl_all && nvlist_next_nvpair(userprops, NULL) != NULL) {
/*
* Go through and add any user properties as necessary. We
* start by incrementing our list pointer to the first
* non-native property.
*/
start = plp;
while (*start != NULL) {
if ((*start)->pl_prop == ZPROP_INVAL)
break;
start = &(*start)->pl_next;
}
elem = NULL;
while ((elem = nvlist_next_nvpair(userprops, elem)) != NULL) {
/*
* See if we've already found this property in our list.
*/
for (last = start; *last != NULL;
last = &(*last)->pl_next) {
if (strcmp((*last)->pl_user_prop,
nvpair_name(elem)) == 0)
break;
}
if (*last == NULL) {
if ((entry = zfs_alloc(hdl,
sizeof (zprop_list_t))) == NULL ||
((entry->pl_user_prop = zfs_strdup(hdl,
nvpair_name(elem)))) == NULL) {
free(entry);
return (-1);
}
entry->pl_prop = ZPROP_INVAL;
entry->pl_width = strlen(nvpair_name(elem));
entry->pl_all = B_TRUE;
*last = entry;
}
}
}
/*
* Now go through and check the width of any non-fixed columns
*/
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed && !literal)
continue;
if (entry->pl_prop != ZPROP_INVAL) {
if (zfs_prop_get(zhp, entry->pl_prop,
buf, sizeof (buf), NULL, NULL, 0, literal) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
if (received && zfs_prop_get_recvd(zhp,
zfs_prop_to_name(entry->pl_prop),
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
} else {
if (nvlist_lookup_nvlist(userprops, entry->pl_user_prop,
&propval) == 0) {
verify(nvlist_lookup_string(propval,
ZPROP_VALUE, &strval) == 0);
if (strlen(strval) > entry->pl_width)
entry->pl_width = strlen(strval);
}
if (received && zfs_prop_get_recvd(zhp,
entry->pl_user_prop,
buf, sizeof (buf), literal) == 0)
if (strlen(buf) > entry->pl_recvd_width)
entry->pl_recvd_width = strlen(buf);
}
}
return (0);
}
void
zfs_prune_proplist(zfs_handle_t *zhp, uint8_t *props)
{
nvpair_t *curr;
nvpair_t *next;
/*
* Keep a reference to the props-table against which we prune the
* properties.
*/
zhp->zfs_props_table = props;
curr = nvlist_next_nvpair(zhp->zfs_props, NULL);
while (curr) {
zfs_prop_t zfs_prop = zfs_name_to_prop(nvpair_name(curr));
next = nvlist_next_nvpair(zhp->zfs_props, curr);
/*
* User properties will result in ZPROP_INVAL, and since we
* only know how to prune standard ZFS properties, we always
* leave these in the list. This can also happen if we
* encounter an unknown DSL property (when running older
* software, for example).
*/
if (zfs_prop != ZPROP_INVAL && props[zfs_prop] == B_FALSE)
(void) nvlist_remove(zhp->zfs_props,
nvpair_name(curr), nvpair_type(curr));
curr = next;
}
}
static int
zfs_smb_acl_mgmt(libzfs_handle_t *hdl, char *dataset, char *path,
zfs_smb_acl_op_t cmd, char *resource1, char *resource2)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *nvlist = NULL;
int error;
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_value, path, sizeof (zc.zc_value));
zc.zc_cookie = (uint64_t)cmd;
if (cmd == ZFS_SMB_ACL_RENAME) {
if (nvlist_alloc(&nvlist, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (0);
}
}
switch (cmd) {
case ZFS_SMB_ACL_ADD:
case ZFS_SMB_ACL_REMOVE:
(void) strlcpy(zc.zc_string, resource1, sizeof (zc.zc_string));
break;
case ZFS_SMB_ACL_RENAME:
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_SRC,
resource1) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (nvlist_add_string(nvlist, ZFS_SMB_ACL_TARGET,
resource2) != 0) {
(void) no_memory(hdl);
return (-1);
}
if (zcmd_write_src_nvlist(hdl, &zc, nvlist) != 0) {
nvlist_free(nvlist);
return (-1);
}
break;
case ZFS_SMB_ACL_PURGE:
break;
default:
return (-1);
}
error = ioctl(hdl->libzfs_fd, ZFS_IOC_SMB_ACL, &zc);
nvlist_free(nvlist);
return (error);
}
int
zfs_smb_acl_add(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_ADD,
resource, NULL));
}
int
zfs_smb_acl_remove(libzfs_handle_t *hdl, char *dataset,
char *path, char *resource)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_REMOVE,
resource, NULL));
}
int
zfs_smb_acl_purge(libzfs_handle_t *hdl, char *dataset, char *path)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_PURGE,
NULL, NULL));
}
int
zfs_smb_acl_rename(libzfs_handle_t *hdl, char *dataset, char *path,
char *oldname, char *newname)
{
return (zfs_smb_acl_mgmt(hdl, dataset, path, ZFS_SMB_ACL_RENAME,
oldname, newname));
}
int
zfs_userspace(zfs_handle_t *zhp, zfs_userquota_prop_t type,
zfs_userspace_cb_t func, void *arg)
{
zfs_cmd_t zc = {"\0"};
zfs_useracct_t buf[100];
libzfs_handle_t *hdl = zhp->zfs_hdl;
int ret;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
zc.zc_objset_type = type;
zc.zc_nvlist_dst = (uintptr_t)buf;
for (;;) {
zfs_useracct_t *zua = buf;
zc.zc_nvlist_dst_size = sizeof (buf);
if (zfs_ioctl(hdl, ZFS_IOC_USERSPACE_MANY, &zc) != 0) {
char errbuf[1024];
if ((errno == ENOTSUP &&
(type == ZFS_PROP_USEROBJUSED ||
type == ZFS_PROP_GROUPOBJUSED ||
type == ZFS_PROP_USEROBJQUOTA ||
type == ZFS_PROP_GROUPOBJQUOTA ||
type == ZFS_PROP_PROJECTOBJUSED ||
type == ZFS_PROP_PROJECTOBJQUOTA ||
type == ZFS_PROP_PROJECTUSED ||
type == ZFS_PROP_PROJECTQUOTA)))
break;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot get used/quota for %s"), zc.zc_name);
return (zfs_standard_error_fmt(hdl, errno, errbuf));
}
if (zc.zc_nvlist_dst_size == 0)
break;
while (zc.zc_nvlist_dst_size > 0) {
if ((ret = func(arg, zua->zu_domain, zua->zu_rid,
zua->zu_space)) != 0)
return (ret);
zua++;
zc.zc_nvlist_dst_size -= sizeof (zfs_useracct_t);
}
}
return (0);
}
struct holdarg {
nvlist_t *nvl;
const char *snapname;
const char *tag;
boolean_t recursive;
int error;
};
static int
zfs_hold_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name))
return (EINVAL);
if (lzc_exists(name))
fnvlist_add_string(ha->nvl, name, ha->tag);
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_hold_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_hold(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive, int cleanup_fd)
{
int ret;
struct holdarg ha;
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
(void) zfs_hold_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
char errbuf[1024];
fnvlist_free(ha.nvl);
ret = ENOENT;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s@%s'"),
zhp->zfs_name, snapname);
(void) zfs_standard_error(zhp->zfs_hdl, ret, errbuf);
return (ret);
}
ret = zfs_hold_nvl(zhp, cleanup_fd, ha.nvl);
fnvlist_free(ha.nvl);
return (ret);
}
int
zfs_hold_nvl(zfs_handle_t *zhp, int cleanup_fd, nvlist_t *holds)
{
int ret;
nvlist_t *errors;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
nvpair_t *elem;
errors = NULL;
ret = lzc_hold(holds, cleanup_fd, &errors);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot hold"));
switch (ret) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error(hdl, ret, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot hold snapshot '%s'"), nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case E2BIG:
/*
* Temporary tags wind up having the ds object id
* prepended. So even if we passed the length check
* above, it's still possible for the tag to wind
* up being slightly too long.
*/
(void) zfs_error(hdl, EZFS_TAGTOOLONG, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case EEXIST:
(void) zfs_error(hdl, EZFS_REFTAG_HOLD, errbuf);
break;
default:
(void) zfs_standard_error(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
static int
zfs_release_one(zfs_handle_t *zhp, void *arg)
{
struct holdarg *ha = arg;
char name[ZFS_MAX_DATASET_NAME_LEN];
int rv = 0;
nvlist_t *existing_holds;
if (snprintf(name, sizeof (name), "%s@%s", zhp->zfs_name,
ha->snapname) >= sizeof (name)) {
ha->error = EINVAL;
rv = EINVAL;
}
if (lzc_get_holds(name, &existing_holds) != 0) {
ha->error = ENOENT;
} else if (!nvlist_exists(existing_holds, ha->tag)) {
ha->error = ESRCH;
} else {
nvlist_t *torelease = fnvlist_alloc();
fnvlist_add_boolean(torelease, ha->tag);
fnvlist_add_nvlist(ha->nvl, name, torelease);
fnvlist_free(torelease);
}
if (ha->recursive)
rv = zfs_iter_filesystems(zhp, zfs_release_one, ha);
zfs_close(zhp);
return (rv);
}
int
zfs_release(zfs_handle_t *zhp, const char *snapname, const char *tag,
boolean_t recursive)
{
int ret;
struct holdarg ha;
nvlist_t *errors = NULL;
nvpair_t *elem;
libzfs_handle_t *hdl = zhp->zfs_hdl;
char errbuf[1024];
ha.nvl = fnvlist_alloc();
ha.snapname = snapname;
ha.tag = tag;
ha.recursive = recursive;
ha.error = 0;
(void) zfs_release_one(zfs_handle_dup(zhp), &ha);
if (nvlist_empty(ha.nvl)) {
fnvlist_free(ha.nvl);
ret = ha.error;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s@%s'"),
zhp->zfs_name, snapname);
if (ret == ESRCH) {
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
} else {
(void) zfs_standard_error(hdl, ret, errbuf);
}
return (ret);
}
ret = lzc_release(ha.nvl, &errors);
fnvlist_free(ha.nvl);
if (ret == 0) {
/* There may be errors even in the success case. */
fnvlist_free(errors);
return (0);
}
if (nvlist_empty(errors)) {
/* no hold-specific errors */
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot release"));
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
default:
(void) zfs_standard_error_fmt(hdl, errno, errbuf);
}
}
for (elem = nvlist_next_nvpair(errors, NULL);
elem != NULL;
elem = nvlist_next_nvpair(errors, elem)) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN,
"cannot release hold from snapshot '%s'"),
nvpair_name(elem));
switch (fnvpair_value_int32(elem)) {
case ESRCH:
(void) zfs_error(hdl, EZFS_REFTAG_RELE, errbuf);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
default:
(void) zfs_standard_error_fmt(hdl,
fnvpair_value_int32(elem), errbuf);
}
}
fnvlist_free(errors);
return (ret);
}
int
zfs_get_fsacl(zfs_handle_t *zhp, nvlist_t **nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
int nvsz = 2048;
void *nvbuf;
int err = 0;
char errbuf[1024];
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
tryagain:
nvbuf = malloc(nvsz);
if (nvbuf == NULL) {
err = (zfs_error(hdl, EZFS_NOMEM, strerror(errno)));
goto out;
}
zc.zc_nvlist_dst_size = nvsz;
zc.zc_nvlist_dst = (uintptr_t)nvbuf;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (ioctl(hdl->libzfs_fd, ZFS_IOC_GET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOMEM:
free(nvbuf);
nvsz = zc.zc_nvlist_dst_size;
goto tryagain;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error_fmt(hdl, errno, errbuf);
break;
}
} else {
/* success */
int rc = nvlist_unpack(nvbuf, zc.zc_nvlist_dst_size, nvl, 0);
if (rc) {
(void) snprintf(errbuf, sizeof (errbuf), dgettext(
TEXT_DOMAIN, "cannot get permissions on '%s'"),
zc.zc_name);
err = zfs_standard_error_fmt(hdl, rc, errbuf);
}
}
free(nvbuf);
out:
return (err);
}
int
zfs_set_fsacl(zfs_handle_t *zhp, boolean_t un, nvlist_t *nvl)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zfs_hdl;
char *nvbuf;
char errbuf[1024];
size_t nvsz;
int err;
assert(zhp->zfs_type == ZFS_TYPE_VOLUME ||
zhp->zfs_type == ZFS_TYPE_FILESYSTEM);
err = nvlist_size(nvl, &nvsz, NV_ENCODE_NATIVE);
assert(err == 0);
nvbuf = malloc(nvsz);
err = nvlist_pack(nvl, &nvbuf, &nvsz, NV_ENCODE_NATIVE, 0);
assert(err == 0);
zc.zc_nvlist_src_size = nvsz;
zc.zc_nvlist_src = (uintptr_t)nvbuf;
zc.zc_perm_action = un;
(void) strlcpy(zc.zc_name, zhp->zfs_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_SET_FSACL, &zc) != 0) {
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set permissions on '%s'"),
zc.zc_name);
switch (errno) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error_fmt(hdl, errno, errbuf);
break;
}
}
free(nvbuf);
return (err);
}
int
zfs_get_holds(zfs_handle_t *zhp, nvlist_t **nvl)
{
int err;
char errbuf[1024];
err = lzc_get_holds(zhp->zfs_name, nvl);
if (err != 0) {
libzfs_handle_t *hdl = zhp->zfs_hdl;
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot get holds for '%s'"),
zhp->zfs_name);
switch (err) {
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded"));
err = zfs_error(hdl, EZFS_BADVERSION, errbuf);
break;
case EINVAL:
err = zfs_error(hdl, EZFS_BADTYPE, errbuf);
break;
case ENOENT:
err = zfs_error(hdl, EZFS_NOENT, errbuf);
break;
default:
err = zfs_standard_error_fmt(hdl, errno, errbuf);
break;
}
}
return (err);
}
/*
* Convert the zvol's volume size to an appropriate reservation.
* Note: If this routine is updated, it is necessary to update the ZFS test
* suite's shell version in reservation.kshlib.
*/
uint64_t
zvol_volsize_to_reservation(uint64_t volsize, nvlist_t *props)
{
uint64_t numdb;
uint64_t nblocks, volblocksize;
int ncopies;
char *strval;
if (nvlist_lookup_string(props,
zfs_prop_to_name(ZFS_PROP_COPIES), &strval) == 0)
ncopies = atoi(strval);
else
ncopies = 1;
if (nvlist_lookup_uint64(props,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&volblocksize) != 0)
volblocksize = ZVOL_DEFAULT_BLOCKSIZE;
nblocks = volsize/volblocksize;
/* start with metadnode L0-L6 */
numdb = 7;
/* calculate number of indirects */
while (nblocks > 1) {
nblocks += DNODES_PER_LEVEL - 1;
nblocks /= DNODES_PER_LEVEL;
numdb += nblocks;
}
numdb *= MIN(SPA_DVAS_PER_BP, ncopies + 1);
volsize *= ncopies;
/*
* this is exactly DN_MAX_INDBLKSHIFT when metadata isn't
* compressed, but in practice they compress down to about
* 1100 bytes
*/
numdb *= 1ULL << DN_MAX_INDBLKSHIFT;
volsize += numdb;
return (volsize);
}
diff --git a/lib/libzfs/libzfs_pool.c b/lib/libzfs/libzfs_pool.c
index 3fe0b9e0ee44..99b4ea29af86 100644
--- a/lib/libzfs/libzfs_pool.c
+++ b/lib/libzfs/libzfs_pool.c
@@ -1,4761 +1,4834 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2017 Datto Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
#include <ctype.h>
#include <errno.h>
#include <devid.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <libgen.h>
#include <zone.h>
#include <sys/stat.h>
#include <sys/efi_partition.h>
#include <sys/systeminfo.h>
#include <sys/vtoc.h>
#include <sys/zfs_ioctl.h>
#include <dlfcn.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "libzfs_impl.h"
#include "zfs_comutil.h"
#include "zfeature_common.h"
static int read_efi_label(nvlist_t *config, diskaddr_t *sb);
static boolean_t zpool_vdev_is_interior(const char *name);
typedef struct prop_flags {
int create:1; /* Validate property on creation */
int import:1; /* Validate property on import */
} prop_flags_t;
/*
* ====================================================================
* zpool property functions
* ====================================================================
*/
static int
zpool_get_all_props(zpool_handle_t *zhp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zcmd_alloc_dst_nvlist(hdl, &zc, 0) != 0)
return (-1);
while (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_PROPS, &zc) != 0) {
if (errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
} else {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (zcmd_read_dst_nvlist(hdl, &zc, &zhp->zpool_props) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zcmd_free_nvlists(&zc);
return (0);
}
static int
zpool_props_refresh(zpool_handle_t *zhp)
{
nvlist_t *old_props;
old_props = zhp->zpool_props;
if (zpool_get_all_props(zhp) != 0)
return (-1);
nvlist_free(old_props);
return (0);
}
static const char *
zpool_get_prop_string(zpool_handle_t *zhp, zpool_prop_t prop,
zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t ival;
char *value;
zprop_source_t source;
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &ival) == 0);
source = ival;
verify(nvlist_lookup_string(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
if ((value = (char *)zpool_prop_default_string(prop)) == NULL)
value = "-";
}
if (src)
*src = source;
return (value);
}
uint64_t
zpool_get_prop_int(zpool_handle_t *zhp, zpool_prop_t prop, zprop_source_t *src)
{
nvlist_t *nv, *nvl;
uint64_t value;
zprop_source_t source;
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp)) {
/*
* zpool_get_all_props() has most likely failed because
* the pool is faulted, but if all we need is the top level
* vdev's guid then get it from the zhp config nvlist.
*/
if ((prop == ZPOOL_PROP_GUID) &&
(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nv) == 0) &&
(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value)
== 0)) {
return (value);
}
return (zpool_prop_default_numeric(prop));
}
nvl = zhp->zpool_props;
if (nvlist_lookup_nvlist(nvl, zpool_prop_to_name(prop), &nv) == 0) {
verify(nvlist_lookup_uint64(nv, ZPROP_SOURCE, &value) == 0);
source = value;
verify(nvlist_lookup_uint64(nv, ZPROP_VALUE, &value) == 0);
} else {
source = ZPROP_SRC_DEFAULT;
value = zpool_prop_default_numeric(prop);
}
if (src)
*src = source;
return (value);
}
/*
* Map VDEV STATE to printed strings.
*/
const char *
zpool_state_to_name(vdev_state_t state, vdev_aux_t aux)
{
switch (state) {
case VDEV_STATE_CLOSED:
case VDEV_STATE_OFFLINE:
return (gettext("OFFLINE"));
case VDEV_STATE_REMOVED:
return (gettext("REMOVED"));
case VDEV_STATE_CANT_OPEN:
if (aux == VDEV_AUX_CORRUPT_DATA || aux == VDEV_AUX_BAD_LOG)
return (gettext("FAULTED"));
else if (aux == VDEV_AUX_SPLIT_POOL)
return (gettext("SPLIT"));
else
return (gettext("UNAVAIL"));
case VDEV_STATE_FAULTED:
return (gettext("FAULTED"));
case VDEV_STATE_DEGRADED:
return (gettext("DEGRADED"));
case VDEV_STATE_HEALTHY:
return (gettext("ONLINE"));
default:
break;
}
return (gettext("UNKNOWN"));
}
/*
* Map POOL STATE to printed strings.
*/
const char *
zpool_pool_state_to_name(pool_state_t state)
{
switch (state) {
default:
break;
case POOL_STATE_ACTIVE:
return (gettext("ACTIVE"));
case POOL_STATE_EXPORTED:
return (gettext("EXPORTED"));
case POOL_STATE_DESTROYED:
return (gettext("DESTROYED"));
case POOL_STATE_SPARE:
return (gettext("SPARE"));
case POOL_STATE_L2CACHE:
return (gettext("L2CACHE"));
case POOL_STATE_UNINITIALIZED:
return (gettext("UNINITIALIZED"));
case POOL_STATE_UNAVAIL:
return (gettext("UNAVAIL"));
case POOL_STATE_POTENTIALLY_ACTIVE:
return (gettext("POTENTIALLY_ACTIVE"));
}
return (gettext("UNKNOWN"));
}
/*
* Get a zpool property value for 'prop' and return the value in
* a pre-allocated buffer.
*/
int
zpool_get_prop(zpool_handle_t *zhp, zpool_prop_t prop, char *buf,
size_t len, zprop_source_t *srctype, boolean_t literal)
{
uint64_t intval;
const char *strval;
zprop_source_t src = ZPROP_SRC_NONE;
nvlist_t *nvroot;
vdev_stat_t *vs;
uint_t vsc;
if (zpool_get_state(zhp) == POOL_STATE_UNAVAIL) {
switch (prop) {
case ZPOOL_PROP_NAME:
(void) strlcpy(buf, zpool_get_name(zhp), len);
break;
case ZPOOL_PROP_HEALTH:
(void) strlcpy(buf, "FAULTED", len);
break;
case ZPOOL_PROP_GUID:
intval = zpool_get_prop_int(zhp, prop, &src);
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
break;
case ZPOOL_PROP_ALTROOT:
case ZPOOL_PROP_CACHEFILE:
case ZPOOL_PROP_COMMENT:
if (zhp->zpool_props != NULL ||
zpool_get_all_props(zhp) == 0) {
(void) strlcpy(buf,
zpool_get_prop_string(zhp, prop, &src),
len);
break;
}
/* FALLTHROUGH */
default:
(void) strlcpy(buf, "-", len);
break;
}
if (srctype != NULL)
*srctype = src;
return (0);
}
if (zhp->zpool_props == NULL && zpool_get_all_props(zhp) &&
prop != ZPOOL_PROP_NAME)
return (-1);
switch (zpool_prop_get_type(prop)) {
case PROP_TYPE_STRING:
(void) strlcpy(buf, zpool_get_prop_string(zhp, prop, &src),
len);
break;
case PROP_TYPE_NUMBER:
intval = zpool_get_prop_int(zhp, prop, &src);
switch (prop) {
case ZPOOL_PROP_SIZE:
case ZPOOL_PROP_ALLOCATED:
case ZPOOL_PROP_FREE:
case ZPOOL_PROP_FREEING:
case ZPOOL_PROP_LEAKED:
case ZPOOL_PROP_ASHIFT:
if (literal)
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
else
(void) zfs_nicenum(intval, buf, len);
break;
case ZPOOL_PROP_EXPANDSZ:
if (intval == 0) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) zfs_nicebytes(intval, buf, len);
}
break;
case ZPOOL_PROP_CAPACITY:
if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_FRAGMENTATION:
if (intval == UINT64_MAX) {
(void) strlcpy(buf, "-", len);
} else if (literal) {
(void) snprintf(buf, len, "%llu",
(u_longlong_t)intval);
} else {
(void) snprintf(buf, len, "%llu%%",
(u_longlong_t)intval);
}
break;
case ZPOOL_PROP_DEDUPRATIO:
if (literal)
(void) snprintf(buf, len, "%llu.%02llu",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
else
(void) snprintf(buf, len, "%llu.%02llux",
(u_longlong_t)(intval / 100),
(u_longlong_t)(intval % 100));
break;
case ZPOOL_PROP_HEALTH:
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
verify(nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
(void) strlcpy(buf, zpool_state_to_name(intval,
vs->vs_aux), len);
break;
case ZPOOL_PROP_VERSION:
if (intval >= SPA_VERSION_FEATURES) {
(void) snprintf(buf, len, "-");
break;
}
/* FALLTHROUGH */
default:
(void) snprintf(buf, len, "%llu", (u_longlong_t)intval);
}
break;
case PROP_TYPE_INDEX:
intval = zpool_get_prop_int(zhp, prop, &src);
if (zpool_prop_index_to_string(prop, intval, &strval)
!= 0)
return (-1);
(void) strlcpy(buf, strval, len);
break;
default:
abort();
}
if (srctype)
*srctype = src;
return (0);
}
/*
* Check if the bootfs name has the same pool name as it is set to.
* Assuming bootfs is a valid dataset name.
*/
static boolean_t
bootfs_name_valid(const char *pool, char *bootfs)
{
int len = strlen(pool);
if (bootfs[0] == '\0')
return (B_TRUE);
if (!zfs_name_valid(bootfs, ZFS_TYPE_FILESYSTEM|ZFS_TYPE_SNAPSHOT))
return (B_FALSE);
if (strncmp(pool, bootfs, len) == 0 &&
(bootfs[len] == '/' || bootfs[len] == '\0'))
return (B_TRUE);
return (B_FALSE);
}
boolean_t
zpool_is_bootable(zpool_handle_t *zhp)
{
char bootfs[ZFS_MAX_DATASET_NAME_LEN];
return (zpool_get_prop(zhp, ZPOOL_PROP_BOOTFS, bootfs,
sizeof (bootfs), NULL, B_FALSE) == 0 && strncmp(bootfs, "-",
sizeof (bootfs)) != 0);
}
/*
* Given an nvlist of zpool properties to be set, validate that they are
* correct, and parse any numeric properties (index, boolean, etc) if they are
* specified as strings.
*/
static nvlist_t *
zpool_valid_proplist(libzfs_handle_t *hdl, const char *poolname,
nvlist_t *props, uint64_t version, prop_flags_t flags, char *errbuf)
{
nvpair_t *elem;
nvlist_t *retprops;
zpool_prop_t prop;
char *strval;
uint64_t intval;
char *slash, *check;
struct stat64 statbuf;
zpool_handle_t *zhp;
if (nvlist_alloc(&retprops, NV_UNIQUE_NAME, 0) != 0) {
(void) no_memory(hdl);
return (NULL);
}
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
const char *propname = nvpair_name(elem);
prop = zpool_name_to_prop(propname);
if (prop == ZPOOL_PROP_INVAL && zpool_prop_feature(propname)) {
int err;
char *fname = strchr(propname, '@') + 1;
err = zfeature_lookup_name(fname, NULL);
if (err != 0) {
ASSERT3U(err, ==, ENOENT);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid feature '%s'"), fname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvpair_type(elem) != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
(void) nvpair_value_string(elem, &strval);
if (strcmp(strval, ZFS_FEATURE_ENABLED) != 0 &&
strcmp(strval, ZFS_FEATURE_DISABLED) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'enabled' or 'disabled'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (!flags.create &&
strcmp(strval, ZFS_FEATURE_DISABLED) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set to "
"'disabled' at creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (nvlist_add_uint64(retprops, propname, 0) != 0) {
(void) no_memory(hdl);
goto error;
}
continue;
}
/*
* Make sure this property is valid and applies to this type.
*/
if (prop == ZPOOL_PROP_INVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (zpool_prop_readonly(prop)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is readonly"), propname);
(void) zfs_error(hdl, EZFS_PROPREADONLY, errbuf);
goto error;
}
if (zprop_parse_value(hdl, elem, prop, ZFS_TYPE_POOL, retprops,
&strval, &intval, errbuf) != 0)
goto error;
/*
* Perform additional checking for specific properties.
*/
switch (prop) {
case ZPOOL_PROP_VERSION:
if (intval < version ||
!SPA_VERSION_IS_SUPPORTED(intval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' number %d is invalid."),
propname, intval);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
break;
case ZPOOL_PROP_ASHIFT:
if (intval != 0 &&
(intval < ASHIFT_MIN || intval > ASHIFT_MAX)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid '%s=%d' property: only values "
"between %" PRId32 " and %" PRId32 " "
"are allowed.\n"),
propname, intval, ASHIFT_MIN, ASHIFT_MAX);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_BOOTFS:
if (flags.create || flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' cannot be set at creation "
"or import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (version < SPA_VERSION_BOOTFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to support "
"'%s' property"), propname);
(void) zfs_error(hdl, EZFS_BADVERSION, errbuf);
goto error;
}
/*
* bootfs property value has to be a dataset name and
* the dataset has to be in the same pool as it sets to.
*/
if (!bootfs_name_valid(poolname, strval)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "'%s' "
"is an invalid name"), strval);
(void) zfs_error(hdl, EZFS_INVALIDNAME, errbuf);
goto error;
}
if ((zhp = zpool_open_canfail(hdl, poolname)) == NULL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"could not open pool '%s'"), poolname);
(void) zfs_error(hdl, EZFS_OPENFAILED, errbuf);
goto error;
}
zpool_close(zhp);
break;
case ZPOOL_PROP_ALTROOT:
if (!flags.create && !flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set during pool "
"creation or import"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad alternate root '%s'"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
break;
case ZPOOL_PROP_CACHEFILE:
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' must be empty, an "
"absolute path, or 'none'"), propname);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
slash = strrchr(strval, '/');
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid file"), strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '\0';
if (strval[0] != '\0' &&
(stat64(strval, &statbuf) != 0 ||
!S_ISDIR(statbuf.st_mode))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is not a valid directory"),
strval);
(void) zfs_error(hdl, EZFS_BADPATH, errbuf);
goto error;
}
*slash = '/';
break;
case ZPOOL_PROP_COMMENT:
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN,
"comment may only have printable "
"characters"));
(void) zfs_error(hdl, EZFS_BADPROP,
errbuf);
goto error;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"comment must not exceed %d characters"),
ZPROP_MAX_COMMENT);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_READONLY:
if (!flags.import) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"import time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_TNAME:
if (!flags.create) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s' can only be set at "
"creation time"), propname);
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
case ZPOOL_PROP_MULTIHOST:
if (get_system_hostid() == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"requires a non-zero system hostid"));
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
goto error;
}
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property '%s'(%d) not defined"), propname, prop);
break;
}
}
return (retprops);
error:
nvlist_free(retprops);
return (NULL);
}
/*
* Set zpool property : propname=propval.
*/
int
zpool_set_prop(zpool_handle_t *zhp, const char *propname, const char *propval)
{
zfs_cmd_t zc = {"\0"};
int ret = -1;
char errbuf[1024];
nvlist_t *nvl = NULL;
nvlist_t *realprops;
uint64_t version;
prop_flags_t flags = { 0 };
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot set property for '%s'"),
zhp->zpool_name);
if (nvlist_alloc(&nvl, NV_UNIQUE_NAME, 0) != 0)
return (no_memory(zhp->zpool_hdl));
if (nvlist_add_string(nvl, propname, propval) != 0) {
nvlist_free(nvl);
return (no_memory(zhp->zpool_hdl));
}
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if ((realprops = zpool_valid_proplist(zhp->zpool_hdl,
zhp->zpool_name, nvl, version, flags, errbuf)) == NULL) {
nvlist_free(nvl);
return (-1);
}
nvlist_free(nvl);
nvl = realprops;
/*
* Execute the corresponding ioctl() to set this property.
*/
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zcmd_write_src_nvlist(zhp->zpool_hdl, &zc, nvl) != 0) {
nvlist_free(nvl);
return (-1);
}
ret = zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_SET_PROPS, &zc);
zcmd_free_nvlists(&zc);
nvlist_free(nvl);
if (ret)
(void) zpool_standard_error(zhp->zpool_hdl, errno, errbuf);
else
(void) zpool_props_refresh(zhp);
return (ret);
}
int
zpool_expand_proplist(zpool_handle_t *zhp, zprop_list_t **plp)
{
libzfs_handle_t *hdl = zhp->zpool_hdl;
zprop_list_t *entry;
char buf[ZFS_MAXPROPLEN];
nvlist_t *features = NULL;
nvpair_t *nvp;
zprop_list_t **last;
boolean_t firstexpand = (NULL == *plp);
int i;
if (zprop_expand_list(hdl, plp, ZFS_TYPE_POOL) != 0)
return (-1);
last = plp;
while (*last != NULL)
last = &(*last)->pl_next;
if ((*plp)->pl_all)
features = zpool_get_features(zhp);
if ((*plp)->pl_all && firstexpand) {
for (i = 0; i < SPA_FEATURES; i++) {
zprop_list_t *entry = zfs_alloc(hdl,
sizeof (zprop_list_t));
entry->pl_prop = ZPROP_INVAL;
entry->pl_user_prop = zfs_asprintf(hdl, "feature@%s",
spa_feature_table[i].fi_uname);
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
}
/* add any unsupported features */
for (nvp = nvlist_next_nvpair(features, NULL);
nvp != NULL; nvp = nvlist_next_nvpair(features, nvp)) {
char *propname;
boolean_t found;
zprop_list_t *entry;
if (zfeature_is_supported(nvpair_name(nvp)))
continue;
propname = zfs_asprintf(hdl, "unsupported@%s",
nvpair_name(nvp));
/*
* Before adding the property to the list make sure that no
* other pool already added the same property.
*/
found = B_FALSE;
entry = *plp;
while (entry != NULL) {
if (entry->pl_user_prop != NULL &&
strcmp(propname, entry->pl_user_prop) == 0) {
found = B_TRUE;
break;
}
entry = entry->pl_next;
}
if (found) {
free(propname);
continue;
}
entry = zfs_alloc(hdl, sizeof (zprop_list_t));
entry->pl_prop = ZPROP_INVAL;
entry->pl_user_prop = propname;
entry->pl_width = strlen(entry->pl_user_prop);
entry->pl_all = B_TRUE;
*last = entry;
last = &entry->pl_next;
}
for (entry = *plp; entry != NULL; entry = entry->pl_next) {
if (entry->pl_fixed)
continue;
if (entry->pl_prop != ZPROP_INVAL &&
zpool_get_prop(zhp, entry->pl_prop, buf, sizeof (buf),
NULL, B_FALSE) == 0) {
if (strlen(buf) > entry->pl_width)
entry->pl_width = strlen(buf);
}
}
return (0);
}
/*
* Get the state for the given feature on the given ZFS pool.
*/
int
zpool_prop_get_feature(zpool_handle_t *zhp, const char *propname, char *buf,
size_t len)
{
uint64_t refcount;
boolean_t found = B_FALSE;
nvlist_t *features = zpool_get_features(zhp);
boolean_t supported;
const char *feature = strchr(propname, '@') + 1;
supported = zpool_prop_feature(propname);
ASSERT(supported || zpool_prop_unsupported(propname));
/*
* Convert from feature name to feature guid. This conversion is
* unnecessary for unsupported@... properties because they already
* use guids.
*/
if (supported) {
int ret;
spa_feature_t fid;
ret = zfeature_lookup_name(feature, &fid);
if (ret != 0) {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
feature = spa_feature_table[fid].fi_guid;
}
if (nvlist_lookup_uint64(features, feature, &refcount) == 0)
found = B_TRUE;
if (supported) {
if (!found) {
(void) strlcpy(buf, ZFS_FEATURE_DISABLED, len);
} else {
if (refcount == 0)
(void) strlcpy(buf, ZFS_FEATURE_ENABLED, len);
else
(void) strlcpy(buf, ZFS_FEATURE_ACTIVE, len);
}
} else {
if (found) {
if (refcount == 0) {
(void) strcpy(buf, ZFS_UNSUPPORTED_INACTIVE);
} else {
(void) strcpy(buf, ZFS_UNSUPPORTED_READONLY);
}
} else {
(void) strlcpy(buf, "-", len);
return (ENOTSUP);
}
}
return (0);
}
/*
* Don't start the slice at the default block of 34; many storage
* devices will use a stripe width of 128k, other vendors prefer a 1m
* alignment. It is best to play it safe and ensure a 1m alignment
* given 512B blocks. When the block size is larger by a power of 2
* we will still be 1m aligned. Some devices are sensitive to the
* partition ending alignment as well.
*/
#define NEW_START_BLOCK 2048
#define PARTITION_END_ALIGNMENT 2048
/*
* Validate the given pool name, optionally putting an extended error message in
* 'buf'.
*/
boolean_t
zpool_name_valid(libzfs_handle_t *hdl, boolean_t isopen, const char *pool)
{
namecheck_err_t why;
char what;
int ret;
ret = pool_namecheck(pool, &why, &what);
/*
* The rules for reserved pool names were extended at a later point.
* But we need to support users with existing pools that may now be
* invalid. So we only check for this expanded set of names during a
* create (or import), and only in userland.
*/
if (ret == 0 && !isopen &&
(strncmp(pool, "mirror", 6) == 0 ||
strncmp(pool, "raidz", 5) == 0 ||
strncmp(pool, "spare", 5) == 0 ||
strcmp(pool, "log") == 0)) {
if (hdl != NULL)
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is reserved"));
return (B_FALSE);
}
if (ret != 0) {
if (hdl != NULL) {
switch (why) {
case NAME_ERR_TOOLONG:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "name is too long"));
break;
case NAME_ERR_INVALCHAR:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "invalid character "
"'%c' in pool name"), what);
break;
case NAME_ERR_NOLETTER:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name must begin with a letter"));
break;
case NAME_ERR_RESERVED:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"name is reserved"));
break;
case NAME_ERR_DISKLIKE:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool name is reserved"));
break;
case NAME_ERR_LEADING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"leading slash in name"));
break;
case NAME_ERR_EMPTY_COMPONENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty component in name"));
break;
case NAME_ERR_TRAILING_SLASH:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"trailing slash in name"));
break;
case NAME_ERR_MULTIPLE_DELIMITERS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"multiple '@' and/or '#' delimiters in "
"name"));
break;
case NAME_ERR_NO_AT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"permission set is missing '@'"));
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"(%d) not defined"), why);
break;
}
}
return (B_FALSE);
}
return (B_TRUE);
}
/*
* Open a handle to the given pool, even if the pool is currently in the FAULTED
* state.
*/
zpool_handle_t *
zpool_open_canfail(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
boolean_t missing;
/*
* Make sure the pool name is valid.
*/
if (!zpool_name_valid(hdl, B_TRUE, pool)) {
(void) zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot open '%s'"),
pool);
return (NULL);
}
if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
return (NULL);
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (NULL);
}
if (missing) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "no such pool"));
(void) zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), pool);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Like the above, but silent on error. Used when iterating over pools (because
* the configuration cache may be out of date).
*/
int
zpool_open_silent(libzfs_handle_t *hdl, const char *pool, zpool_handle_t **ret)
{
zpool_handle_t *zhp;
boolean_t missing;
if ((zhp = zfs_alloc(hdl, sizeof (zpool_handle_t))) == NULL)
return (-1);
zhp->zpool_hdl = hdl;
(void) strlcpy(zhp->zpool_name, pool, sizeof (zhp->zpool_name));
if (zpool_refresh_stats(zhp, &missing) != 0) {
zpool_close(zhp);
return (-1);
}
if (missing) {
zpool_close(zhp);
*ret = NULL;
return (0);
}
*ret = zhp;
return (0);
}
/*
* Similar to zpool_open_canfail(), but refuses to open pools in the faulted
* state.
*/
zpool_handle_t *
zpool_open(libzfs_handle_t *hdl, const char *pool)
{
zpool_handle_t *zhp;
if ((zhp = zpool_open_canfail(hdl, pool)) == NULL)
return (NULL);
if (zhp->zpool_state == POOL_STATE_UNAVAIL) {
(void) zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "cannot open '%s'"), zhp->zpool_name);
zpool_close(zhp);
return (NULL);
}
return (zhp);
}
/*
* Close the handle. Simply frees the memory associated with the handle.
*/
void
zpool_close(zpool_handle_t *zhp)
{
nvlist_free(zhp->zpool_config);
nvlist_free(zhp->zpool_old_config);
nvlist_free(zhp->zpool_props);
free(zhp);
}
/*
* Return the name of the pool.
*/
const char *
zpool_get_name(zpool_handle_t *zhp)
{
return (zhp->zpool_name);
}
/*
* Return the state of the pool (ACTIVE or UNAVAILABLE)
*/
int
zpool_get_state(zpool_handle_t *zhp)
{
return (zhp->zpool_state);
}
/*
* Create the named pool, using the provided vdev list. It is assumed
* that the consumer has already validated the contents of the nvlist, so we
* don't have to worry about error semantics.
*/
int
zpool_create(libzfs_handle_t *hdl, const char *pool, nvlist_t *nvroot,
nvlist_t *props, nvlist_t *fsprops)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *zc_fsprops = NULL;
nvlist_t *zc_props = NULL;
nvlist_t *hidden_args = NULL;
uint8_t *wkeydata = NULL;
uint_t wkeylen = 0;
char msg[1024];
int ret = -1;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot create '%s'"), pool);
if (!zpool_name_valid(hdl, B_FALSE, pool))
return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
if (props) {
prop_flags_t flags = { .create = B_TRUE, .import = B_FALSE };
if ((zc_props = zpool_valid_proplist(hdl, pool, props,
SPA_VERSION_1, flags, msg)) == NULL) {
goto create_failed;
}
}
if (fsprops) {
uint64_t zoned;
char *zonestr;
zoned = ((nvlist_lookup_string(fsprops,
zfs_prop_to_name(ZFS_PROP_ZONED), &zonestr) == 0) &&
strcmp(zonestr, "on") == 0);
if ((zc_fsprops = zfs_valid_proplist(hdl, ZFS_TYPE_FILESYSTEM,
fsprops, zoned, NULL, NULL, B_TRUE, msg)) == NULL) {
goto create_failed;
}
if (!zc_props &&
(nvlist_alloc(&zc_props, NV_UNIQUE_NAME, 0) != 0)) {
goto create_failed;
}
if (zfs_crypto_create(hdl, NULL, zc_fsprops, props,
&wkeydata, &wkeylen) != 0) {
zfs_error(hdl, EZFS_CRYPTOFAILED, msg);
goto create_failed;
}
if (nvlist_add_nvlist(zc_props,
ZPOOL_ROOTFS_PROPS, zc_fsprops) != 0) {
goto create_failed;
}
if (wkeydata != NULL) {
if (nvlist_alloc(&hidden_args, NV_UNIQUE_NAME, 0) != 0)
goto create_failed;
if (nvlist_add_uint8_array(hidden_args, "wkeydata",
wkeydata, wkeylen) != 0)
goto create_failed;
if (nvlist_add_nvlist(zc_props, ZPOOL_HIDDEN_ARGS,
hidden_args) != 0)
goto create_failed;
}
}
if (zc_props && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
goto create_failed;
(void) strlcpy(zc.zc_name, pool, sizeof (zc.zc_name));
if ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_CREATE, &zc)) != 0) {
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label. This can also happen under if the device is
* part of an active md or lvm device.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device, or "
"one of\nthe devices is part of an active md or "
"lvm device"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
case ERANGE:
/*
* This happens if the record size is smaller or larger
* than the allowed size range, or not a power of 2.
*
* NOTE: although zfs_valid_proplist is called earlier,
* this case may have slipped through since the
* pool does not exist yet and it is therefore
* impossible to read properties e.g. max blocksize
* from the pool.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"record size invalid"));
return (zfs_error(hdl, EZFS_BADPROP, msg));
case EOVERFLOW:
/*
* This occurs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is less than the "
"minimum size (%s)"), buf);
}
return (zfs_error(hdl, EZFS_BADDEV, msg));
case ENOSPC:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is out of space"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
case ENOTBLK:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cache device must be a disk or disk slice"));
return (zfs_error(hdl, EZFS_BADDEV, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
create_failed:
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(zc_fsprops);
nvlist_free(hidden_args);
if (wkeydata != NULL)
free(wkeydata);
return (ret);
}
/*
* Destroy the given pool. It is up to the caller to ensure that there are no
* datasets left in the pool.
*/
int
zpool_destroy(zpool_handle_t *zhp, const char *log_str)
{
zfs_cmd_t zc = {"\0"};
zfs_handle_t *zfp = NULL;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
if (zhp->zpool_state == POOL_STATE_ACTIVE &&
(zfp = zfs_open(hdl, zhp->zpool_name, ZFS_TYPE_FILESYSTEM)) == NULL)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_DESTROY, &zc) != 0) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot destroy '%s'"), zhp->zpool_name);
if (errno == EROFS) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
} else {
(void) zpool_standard_error(hdl, errno, msg);
}
if (zfp)
zfs_close(zfp);
return (-1);
}
if (zfp) {
remove_mountpoint(zfp);
zfs_close(zfp);
}
return (0);
}
/*
* Add the given vdevs to the pool. The caller must have already performed the
* necessary verification to ensure that the vdev specification is well-formed.
*/
int
zpool_add(zpool_handle_t *zhp, nvlist_t *nvroot)
{
zfs_cmd_t zc = {"\0"};
int ret;
libzfs_handle_t *hdl = zhp->zpool_hdl;
char msg[1024];
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot add to '%s'"), zhp->zpool_name);
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_SPARES &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add hot spares"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
if (zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL) <
SPA_VERSION_L2CACHE &&
nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool must be "
"upgraded to add cache devices"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_ADD, &zc) != 0) {
switch (errno) {
case EBUSY:
/*
* This can happen if the user has specified the same
* device multiple times. We can't reliably detect this
* until we try to add it and see we already have a
* label.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more vdevs refer to the same device"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
+ case EINVAL:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid config; a pool with removing/removed "
+ "vdevs does not support adding raidz vdevs"));
+ (void) zfs_error(hdl, EZFS_BADDEV, msg);
+ break;
+
case EOVERFLOW:
/*
* This occurrs when one of the devices is below
* SPA_MINDEVSIZE. Unfortunately, we can't detect which
* device was the problem device since there's no
* reliable way to determine device size from userland.
*/
{
char buf[64];
zfs_nicebytes(SPA_MINDEVSIZE, buf,
sizeof (buf));
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is less than the minimum "
"size (%s)"), buf);
}
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case ENOTSUP:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool must be upgraded to add these vdevs"));
(void) zfs_error(hdl, EZFS_BADVERSION, msg);
break;
case ENOTBLK:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cache device must be a disk or disk slice"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
ret = -1;
} else {
ret = 0;
}
zcmd_free_nvlists(&zc);
return (ret);
}
/*
* Exports the pool from the system. The caller must ensure that there are no
* mounted datasets in the pool.
*/
static int
zpool_export_common(zpool_handle_t *zhp, boolean_t force, boolean_t hardforce,
const char *log_str)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot export '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = force;
zc.zc_guid = hardforce;
zc.zc_history = (uint64_t)(uintptr_t)log_str;
if (zfs_ioctl(zhp->zpool_hdl, ZFS_IOC_POOL_EXPORT, &zc) != 0) {
switch (errno) {
case EXDEV:
zfs_error_aux(zhp->zpool_hdl, dgettext(TEXT_DOMAIN,
"use '-f' to override the following errors:\n"
"'%s' has an active shared spare which could be"
" used by other pools once '%s' is exported."),
zhp->zpool_name, zhp->zpool_name);
return (zfs_error(zhp->zpool_hdl, EZFS_ACTIVE_SPARE,
msg));
default:
return (zpool_standard_error_fmt(zhp->zpool_hdl, errno,
msg));
}
}
return (0);
}
int
zpool_export(zpool_handle_t *zhp, boolean_t force, const char *log_str)
{
return (zpool_export_common(zhp, force, B_FALSE, log_str));
}
int
zpool_export_force(zpool_handle_t *zhp, const char *log_str)
{
return (zpool_export_common(zhp, B_TRUE, B_TRUE, log_str));
}
static void
zpool_rewind_exclaim(libzfs_handle_t *hdl, const char *name, boolean_t dryrun,
nvlist_t *config)
{
nvlist_t *nv = NULL;
uint64_t rewindto;
int64_t loss = -1;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr || config == NULL)
return;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0) {
return;
}
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
return;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
if (dryrun) {
(void) printf(dgettext(TEXT_DOMAIN,
"Would be able to return %s "
"to its state as of %s.\n"),
name, timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"Pool %s returned to its state as of %s.\n"),
name, timestr);
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
((longlong_t)loss + 30) / 60);
(void) printf(dgettext(TEXT_DOMAIN,
"minutes of transactions.\n"));
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"%s approximately %lld "),
dryrun ? "Would discard" : "Discarded",
(longlong_t)loss);
(void) printf(dgettext(TEXT_DOMAIN,
"seconds of transactions.\n"));
}
}
}
void
zpool_explain_recover(libzfs_handle_t *hdl, const char *name, int reason,
nvlist_t *config)
{
nvlist_t *nv = NULL;
int64_t loss = -1;
uint64_t edata = UINT64_MAX;
uint64_t rewindto;
struct tm t;
char timestr[128];
if (!hdl->libzfs_printerr)
return;
if (reason >= 0)
(void) printf(dgettext(TEXT_DOMAIN, "action: "));
else
(void) printf(dgettext(TEXT_DOMAIN, "\t"));
/* All attempted rewinds failed if ZPOOL_CONFIG_LOAD_TIME missing */
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nv) != 0 ||
nvlist_lookup_nvlist(nv, ZPOOL_CONFIG_REWIND_INFO, &nv) != 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_TIME, &rewindto) != 0)
goto no_info;
(void) nvlist_lookup_int64(nv, ZPOOL_CONFIG_REWIND_TIME, &loss);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_LOAD_DATA_ERRORS,
&edata);
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery is possible, but will result in some data loss.\n"));
if (localtime_r((time_t *)&rewindto, &t) != NULL &&
strftime(timestr, 128, "%c", &t) != 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReturning the pool to its state as of %s\n"
"\tshould correct the problem. "),
timestr);
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"\tReverting the pool to an earlier state "
"should correct the problem.\n\t"));
}
if (loss > 120) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld minutes of data\n"
"\tmust be discarded, irreversibly. "),
((longlong_t)loss + 30) / 60);
} else if (loss > 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"Approximately %lld seconds of data\n"
"\tmust be discarded, irreversibly. "),
(longlong_t)loss);
}
if (edata != 0 && edata != UINT64_MAX) {
if (edata == 1) {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, at least\n"
"\tone persistent user-data error will remain. "));
} else {
(void) printf(dgettext(TEXT_DOMAIN,
"After rewind, several\n"
"\tpersistent user-data errors will remain. "));
}
}
(void) printf(dgettext(TEXT_DOMAIN,
"Recovery can be attempted\n\tby executing 'zpool %s -F %s'. "),
reason >= 0 ? "clear" : "import", name);
(void) printf(dgettext(TEXT_DOMAIN,
"A scrub of the pool\n"
"\tis strongly recommended after recovery.\n"));
return;
no_info:
(void) printf(dgettext(TEXT_DOMAIN,
"Destroy and re-create the pool from\n\ta backup source.\n"));
}
/*
* zpool_import() is a contracted interface. Should be kept the same
* if possible.
*
* Applications should use zpool_import_props() to import a pool with
* new properties value to be set.
*/
int
zpool_import(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
char *altroot)
{
nvlist_t *props = NULL;
int ret;
if (altroot != NULL) {
if (nvlist_alloc(&props, NV_UNIQUE_NAME, 0) != 0) {
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
if (nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), altroot) != 0 ||
nvlist_add_string(props,
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE), "none") != 0) {
nvlist_free(props);
return (zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
}
}
ret = zpool_import_props(hdl, config, newname, props,
ZFS_IMPORT_NORMAL);
nvlist_free(props);
return (ret);
}
static void
print_vdev_tree(libzfs_handle_t *hdl, const char *name, nvlist_t *nv,
int indent)
{
nvlist_t **child;
uint_t c, children;
char *vname;
uint64_t is_log = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG,
&is_log);
if (name != NULL)
(void) printf("\t%*s%s%s\n", indent, "", name,
is_log ? " [log]" : "");
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return;
for (c = 0; c < children; c++) {
vname = zpool_vdev_name(hdl, NULL, child[c], VDEV_NAME_TYPE_ID);
print_vdev_tree(hdl, vname, child[c], indent + 2);
free(vname);
}
}
void
zpool_print_unsup_feat(nvlist_t *config)
{
nvlist_t *nvinfo, *unsup_feat;
nvpair_t *nvp;
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO, &nvinfo) ==
0);
verify(nvlist_lookup_nvlist(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT,
&unsup_feat) == 0);
for (nvp = nvlist_next_nvpair(unsup_feat, NULL); nvp != NULL;
nvp = nvlist_next_nvpair(unsup_feat, nvp)) {
char *desc;
verify(nvpair_type(nvp) == DATA_TYPE_STRING);
verify(nvpair_value_string(nvp, &desc) == 0);
if (strlen(desc) > 0)
(void) printf("\t%s (%s)\n", nvpair_name(nvp), desc);
else
(void) printf("\t%s\n", nvpair_name(nvp));
}
}
/*
* Import the given pool using the known configuration and a list of
* properties to be set. The configuration should have come from
* zpool_find_import(). The 'newname' parameters control whether the pool
* is imported with a different name.
*/
int
zpool_import_props(libzfs_handle_t *hdl, nvlist_t *config, const char *newname,
nvlist_t *props, int flags)
{
zfs_cmd_t zc = {"\0"};
zpool_rewind_policy_t policy;
nvlist_t *nv = NULL;
nvlist_t *nvinfo = NULL;
nvlist_t *missing = NULL;
char *thename;
char *origname;
int ret;
int error = 0;
char errbuf[1024];
verify(nvlist_lookup_string(config, ZPOOL_CONFIG_POOL_NAME,
&origname) == 0);
(void) snprintf(errbuf, sizeof (errbuf), dgettext(TEXT_DOMAIN,
"cannot import pool '%s'"), origname);
if (newname != NULL) {
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error_fmt(hdl, EZFS_INVALIDNAME,
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
newname));
thename = (char *)newname;
} else {
thename = origname;
}
if (props != NULL) {
uint64_t version;
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&version) == 0);
if ((props = zpool_valid_proplist(hdl, origname,
props, version, flags, errbuf)) == NULL)
return (-1);
if (zcmd_write_src_nvlist(hdl, &zc, props) != 0) {
nvlist_free(props);
return (-1);
}
nvlist_free(props);
}
(void) strlcpy(zc.zc_name, thename, sizeof (zc.zc_name));
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID,
&zc.zc_guid) == 0);
if (zcmd_write_conf_nvlist(hdl, &zc, config) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
if (zcmd_alloc_dst_nvlist(hdl, &zc, zc.zc_nvlist_conf_size * 2) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
zc.zc_cookie = flags;
while ((ret = zfs_ioctl(hdl, ZFS_IOC_POOL_IMPORT, &zc)) != 0 &&
errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (ret != 0)
error = errno;
(void) zcmd_read_dst_nvlist(hdl, &zc, &nv);
zcmd_free_nvlists(&zc);
zpool_get_rewind_policy(config, &policy);
if (error) {
char desc[1024];
char aux[256];
/*
* Dry-run failed, but we print out what success
* looks like if we found a best txg
*/
if (policy.zrp_request & ZPOOL_TRY_REWIND) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
B_TRUE, nv);
nvlist_free(nv);
return (-1);
}
if (newname == NULL)
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s'"),
thename);
else
(void) snprintf(desc, sizeof (desc),
dgettext(TEXT_DOMAIN, "cannot import '%s' as '%s'"),
origname, thename);
switch (error) {
case ENOTSUP:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_exists(nvinfo, ZPOOL_CONFIG_UNSUP_FEAT)) {
(void) printf(dgettext(TEXT_DOMAIN, "This "
"pool uses the following feature(s) not "
"supported by this system:\n"));
zpool_print_unsup_feat(nv);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_CAN_RDONLY)) {
(void) printf(dgettext(TEXT_DOMAIN,
"All unsupported features are only "
"required for writing to the pool."
"\nThe pool can be imported using "
"'-o readonly=on'.\n"));
}
}
/*
* Unsupported version.
*/
(void) zfs_error(hdl, EZFS_BADVERSION, desc);
break;
case EREMOTEIO:
if (nv != NULL && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
mmp_state_t mmp_state;
mmp_state = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_STATE);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME))
hostname = fnvlist_lookup_string(nvinfo,
ZPOOL_CONFIG_MMP_HOSTNAME);
if (nvlist_exists(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID))
hostid = fnvlist_lookup_uint64(nvinfo,
ZPOOL_CONFIG_MMP_HOSTID);
if (mmp_state == MMP_STATE_ACTIVE) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool is imp"
"orted on host '%s' (hostid=%lx).\n"
"Export the pool on the other "
"system, then run 'zpool import'."),
hostname, (unsigned long) hostid);
} else if (mmp_state == MMP_STATE_NO_HOSTID) {
(void) snprintf(aux, sizeof (aux),
dgettext(TEXT_DOMAIN, "pool has "
"the multihost property on and "
"the\nsystem's hostid is not set. "
"Set a unique system hostid with "
"the zgenhostid(8) command.\n"));
}
(void) zfs_error_aux(hdl, aux);
}
(void) zfs_error(hdl, EZFS_ACTIVE_POOL, desc);
break;
case EINVAL:
(void) zfs_error(hdl, EZFS_INVALCONFIG, desc);
break;
case EROFS:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is read only"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENXIO:
if (nv && nvlist_lookup_nvlist(nv,
ZPOOL_CONFIG_LOAD_INFO, &nvinfo) == 0 &&
nvlist_lookup_nvlist(nvinfo,
ZPOOL_CONFIG_MISSING_DEVICES, &missing) == 0) {
(void) printf(dgettext(TEXT_DOMAIN,
"The devices below are missing, use "
"'-m' to import the pool anyway:\n"));
print_vdev_tree(hdl, NULL, missing, 2);
(void) printf("\n");
}
(void) zpool_standard_error(hdl, error, desc);
break;
case EEXIST:
(void) zpool_standard_error(hdl, error, desc);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices are already in use\n"));
(void) zfs_error(hdl, EZFS_BADDEV, desc);
break;
case ENAMETOOLONG:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new name of at least one dataset is longer than "
"the maximum allowable length"));
(void) zfs_error(hdl, EZFS_NAMETOOLONG, desc);
break;
default:
(void) zpool_standard_error(hdl, error, desc);
zpool_explain_recover(hdl,
newname ? origname : thename, -error, nv);
break;
}
nvlist_free(nv);
ret = -1;
} else {
zpool_handle_t *zhp;
/*
* This should never fail, but play it safe anyway.
*/
if (zpool_open_silent(hdl, thename, &zhp) != 0)
ret = -1;
else if (zhp != NULL)
zpool_close(zhp);
if (policy.zrp_request &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
zpool_rewind_exclaim(hdl, newname ? origname : thename,
((policy.zrp_request & ZPOOL_TRY_REWIND) != 0), nv);
}
nvlist_free(nv);
return (0);
}
return (ret);
}
/*
* Scan the pool.
*/
int
zpool_scan(zpool_handle_t *zhp, pool_scan_func_t func, pool_scrub_cmd_t cmd)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
int err;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_cookie = func;
zc.zc_flags = cmd;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_SCAN, &zc) == 0)
return (0);
err = errno;
/* ECANCELED on a scrub means we resumed a paused scrub */
if (err == ECANCELED && func == POOL_SCAN_SCRUB &&
cmd == POOL_SCRUB_NORMAL)
return (0);
if (err == ENOENT && func != POOL_SCAN_NONE && cmd == POOL_SCRUB_NORMAL)
return (0);
if (func == POOL_SCAN_SCRUB) {
if (cmd == POOL_SCRUB_PAUSE) {
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot pause scrubbing %s"), zc.zc_name);
} else {
assert(cmd == POOL_SCRUB_NORMAL);
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot scrub %s"), zc.zc_name);
}
} else if (func == POOL_SCAN_NONE) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot cancel scrubbing %s"),
zc.zc_name);
} else {
assert(!"unexpected result");
}
if (err == EBUSY) {
nvlist_t *nvroot;
pool_scan_stat_t *ps = NULL;
uint_t psc;
verify(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
(void) nvlist_lookup_uint64_array(nvroot,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t **)&ps, &psc);
if (ps && ps->pss_func == POOL_SCAN_SCRUB) {
if (cmd == POOL_SCRUB_PAUSE)
return (zfs_error(hdl, EZFS_SCRUB_PAUSED, msg));
else
return (zfs_error(hdl, EZFS_SCRUBBING, msg));
} else {
return (zfs_error(hdl, EZFS_RESILVERING, msg));
}
} else if (err == ENOENT) {
return (zfs_error(hdl, EZFS_NO_SCRUB, msg));
} else {
return (zpool_standard_error(hdl, err, msg));
}
}
/*
* Find a vdev that matches the search criteria specified. We use the
* the nvpair name to determine how we should look for the device.
* 'avail_spare' is set to TRUE if the provided guid refers to an AVAIL
* spare; but FALSE if its an INUSE spare.
*/
static nvlist_t *
vdev_to_nvlist_iter(nvlist_t *nv, nvlist_t *search, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
uint_t c, children;
nvlist_t **child;
nvlist_t *ret;
uint64_t is_log;
char *srchkey;
nvpair_t *pair = nvlist_next_nvpair(search, NULL);
/* Nothing to look for */
if (search == NULL || pair == NULL)
return (NULL);
/* Obtain the key we will use to search */
srchkey = nvpair_name(pair);
switch (nvpair_type(pair)) {
case DATA_TYPE_UINT64:
if (strcmp(srchkey, ZPOOL_CONFIG_GUID) == 0) {
uint64_t srchval, theguid;
verify(nvpair_value_uint64(pair, &srchval) == 0);
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&theguid) == 0);
if (theguid == srchval)
return (nv);
}
break;
case DATA_TYPE_STRING: {
char *srchval, *val;
verify(nvpair_value_string(pair, &srchval) == 0);
if (nvlist_lookup_string(nv, srchkey, &val) != 0)
break;
/*
* Search for the requested value. Special cases:
*
* - ZPOOL_CONFIG_PATH for whole disk entries. These end in
* "-part1", or "p1". The suffix is hidden from the user,
* but included in the string, so this matches around it.
* - ZPOOL_CONFIG_PATH for short names zfs_strcmp_shortname()
* is used to check all possible expanded paths.
* - looking for a top-level vdev name (i.e. ZPOOL_CONFIG_TYPE).
*
* Otherwise, all other searches are simple string compares.
*/
if (strcmp(srchkey, ZPOOL_CONFIG_PATH) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
if (zfs_strcmp_pathname(srchval, val, wholedisk) == 0)
return (nv);
} else if (strcmp(srchkey, ZPOOL_CONFIG_TYPE) == 0 && val) {
char *type, *idx, *end, *p;
uint64_t id, vdev_id;
/*
* Determine our vdev type, keeping in mind
* that the srchval is composed of a type and
* vdev id pair (i.e. mirror-4).
*/
if ((type = strdup(srchval)) == NULL)
return (NULL);
if ((p = strrchr(type, '-')) == NULL) {
free(type);
break;
}
idx = p + 1;
*p = '\0';
/*
* If the types don't match then keep looking.
*/
if (strncmp(val, type, strlen(val)) != 0) {
free(type);
break;
}
verify(zpool_vdev_is_interior(type));
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
&id) == 0);
errno = 0;
vdev_id = strtoull(idx, &end, 10);
free(type);
if (errno != 0)
return (NULL);
/*
* Now verify that we have the correct vdev id.
*/
if (vdev_id == id)
return (nv);
}
/*
* Common case
*/
if (strcmp(srchval, val) == 0)
return (nv);
break;
}
default:
break;
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0)
return (NULL);
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
/*
* The 'is_log' value is only set for the toplevel
* vdev, not the leaf vdevs. So we always lookup the
* log device from the root of the vdev tree (where
* 'log' is non-NULL).
*/
if (log != NULL &&
nvlist_lookup_uint64(child[c],
ZPOOL_CONFIG_IS_LOG, &is_log) == 0 &&
is_log) {
*log = B_TRUE;
}
return (ret);
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_SPARES,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*avail_spare = B_TRUE;
return (ret);
}
}
}
if (nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_L2CACHE,
&child, &children) == 0) {
for (c = 0; c < children; c++) {
if ((ret = vdev_to_nvlist_iter(child[c], search,
avail_spare, l2cache, NULL)) != NULL) {
*l2cache = B_TRUE;
return (ret);
}
}
}
return (NULL);
}
/*
* Given a physical path (minus the "/devices" prefix), find the
* associated vdev.
*/
nvlist_t *
zpool_find_vdev_by_physpath(zpool_handle_t *zhp, const char *ppath,
boolean_t *avail_spare, boolean_t *l2cache, boolean_t *log)
{
nvlist_t *search, *nvroot, *ret;
verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
verify(nvlist_add_string(search, ZPOOL_CONFIG_PHYS_PATH, ppath) == 0);
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
nvlist_free(search);
return (ret);
}
/*
* Determine if we have an "interior" top-level vdev (i.e mirror/raidz).
*/
static boolean_t
zpool_vdev_is_interior(const char *name)
{
if (strncmp(name, VDEV_TYPE_RAIDZ, strlen(VDEV_TYPE_RAIDZ)) == 0 ||
strncmp(name, VDEV_TYPE_SPARE, strlen(VDEV_TYPE_SPARE)) == 0 ||
strncmp(name,
VDEV_TYPE_REPLACING, strlen(VDEV_TYPE_REPLACING)) == 0 ||
strncmp(name, VDEV_TYPE_MIRROR, strlen(VDEV_TYPE_MIRROR)) == 0)
return (B_TRUE);
return (B_FALSE);
}
nvlist_t *
zpool_find_vdev(zpool_handle_t *zhp, const char *path, boolean_t *avail_spare,
boolean_t *l2cache, boolean_t *log)
{
char *end;
nvlist_t *nvroot, *search, *ret;
uint64_t guid;
verify(nvlist_alloc(&search, NV_UNIQUE_NAME, KM_SLEEP) == 0);
guid = strtoull(path, &end, 0);
if (guid != 0 && *end == '\0') {
verify(nvlist_add_uint64(search, ZPOOL_CONFIG_GUID, guid) == 0);
} else if (zpool_vdev_is_interior(path)) {
verify(nvlist_add_string(search, ZPOOL_CONFIG_TYPE, path) == 0);
} else {
verify(nvlist_add_string(search, ZPOOL_CONFIG_PATH, path) == 0);
}
verify(nvlist_lookup_nvlist(zhp->zpool_config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
*avail_spare = B_FALSE;
*l2cache = B_FALSE;
if (log != NULL)
*log = B_FALSE;
ret = vdev_to_nvlist_iter(nvroot, search, avail_spare, l2cache, log);
nvlist_free(search);
return (ret);
}
static int
vdev_is_online(nvlist_t *nv)
{
uint64_t ival;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE, &ival) == 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED, &ival) == 0 ||
nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED, &ival) == 0)
return (0);
return (1);
}
/*
* Helper function for zpool_get_physpaths().
*/
static int
vdev_get_one_physpath(nvlist_t *config, char *physpath, size_t physpath_size,
size_t *bytes_written)
{
size_t bytes_left, pos, rsz;
char *tmppath;
const char *format;
if (nvlist_lookup_string(config, ZPOOL_CONFIG_PHYS_PATH,
&tmppath) != 0)
return (EZFS_NODEVICE);
pos = *bytes_written;
bytes_left = physpath_size - pos;
format = (pos == 0) ? "%s" : " %s";
rsz = snprintf(physpath + pos, bytes_left, format, tmppath);
*bytes_written += rsz;
if (rsz >= bytes_left) {
/* if physpath was not copied properly, clear it */
if (bytes_left != 0) {
physpath[pos] = 0;
}
return (EZFS_NOSPC);
}
return (0);
}
static int
vdev_get_physpaths(nvlist_t *nv, char *physpath, size_t phypath_size,
size_t *rsz, boolean_t is_spare)
{
char *type;
int ret;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (EZFS_INVALCONFIG);
if (strcmp(type, VDEV_TYPE_DISK) == 0) {
/*
* An active spare device has ZPOOL_CONFIG_IS_SPARE set.
* For a spare vdev, we only want to boot from the active
* spare device.
*/
if (is_spare) {
uint64_t spare = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare);
if (!spare)
return (EZFS_INVALCONFIG);
}
if (vdev_is_online(nv)) {
if ((ret = vdev_get_one_physpath(nv, physpath,
phypath_size, rsz)) != 0)
return (ret);
}
} else if (strcmp(type, VDEV_TYPE_MIRROR) == 0 ||
strcmp(type, VDEV_TYPE_RAIDZ) == 0 ||
strcmp(type, VDEV_TYPE_REPLACING) == 0 ||
(is_spare = (strcmp(type, VDEV_TYPE_SPARE) == 0))) {
nvlist_t **child;
uint_t count;
int i, ret;
if (nvlist_lookup_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, &child, &count) != 0)
return (EZFS_INVALCONFIG);
for (i = 0; i < count; i++) {
ret = vdev_get_physpaths(child[i], physpath,
phypath_size, rsz, is_spare);
if (ret == EZFS_NOSPC)
return (ret);
}
}
return (EZFS_POOL_INVALARG);
}
/*
* Get phys_path for a root pool config.
* Return 0 on success; non-zero on failure.
*/
static int
zpool_get_config_physpath(nvlist_t *config, char *physpath, size_t phypath_size)
{
size_t rsz;
nvlist_t *vdev_root;
nvlist_t **child;
uint_t count;
char *type;
rsz = 0;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&vdev_root) != 0)
return (EZFS_INVALCONFIG);
if (nvlist_lookup_string(vdev_root, ZPOOL_CONFIG_TYPE, &type) != 0 ||
nvlist_lookup_nvlist_array(vdev_root, ZPOOL_CONFIG_CHILDREN,
&child, &count) != 0)
return (EZFS_INVALCONFIG);
/*
* root pool can only have a single top-level vdev.
*/
if (strcmp(type, VDEV_TYPE_ROOT) != 0 || count != 1)
return (EZFS_POOL_INVALARG);
(void) vdev_get_physpaths(child[0], physpath, phypath_size, &rsz,
B_FALSE);
/* No online devices */
if (rsz == 0)
return (EZFS_NODEVICE);
return (0);
}
/*
* Get phys_path for a root pool
* Return 0 on success; non-zero on failure.
*/
int
zpool_get_physpath(zpool_handle_t *zhp, char *physpath, size_t phypath_size)
{
return (zpool_get_config_physpath(zhp->zpool_config, physpath,
phypath_size));
}
/*
* If the device has being dynamically expanded then we need to relabel
* the disk to use the new unallocated space.
*/
static int
zpool_relabel_disk(libzfs_handle_t *hdl, const char *path, const char *msg)
{
int fd, error;
if ((fd = open(path, O_RDWR|O_DIRECT)) < 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"relabel '%s': unable to open device: %d"), path, errno);
return (zfs_error(hdl, EZFS_OPENFAILED, msg));
}
/*
* It's possible that we might encounter an error if the device
* does not have any unallocated space left. If so, we simply
* ignore that error and continue on.
*
* Also, we don't call efi_rescan() - that would just return EBUSY.
* The module will do it for us in vdev_disk_open().
*/
error = efi_use_whole_disk(fd);
/* Flush the buffers to disk and invalidate the page cache. */
(void) fsync(fd);
(void) ioctl(fd, BLKFLSBUF);
(void) close(fd);
if (error && error != VT_ENOSPC) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"relabel '%s': unable to read disk capacity"), path);
return (zfs_error(hdl, EZFS_NOCAP, msg));
}
return (0);
}
/*
* Convert a vdev path to a GUID. Returns GUID or 0 on error.
*
* If is_spare, is_l2cache, or is_log is non-NULL, then store within it
* if the VDEV is a spare, l2cache, or log device. If they're NULL then
* ignore them.
*/
static uint64_t
zpool_vdev_path_to_guid_impl(zpool_handle_t *zhp, const char *path,
boolean_t *is_spare, boolean_t *is_l2cache, boolean_t *is_log)
{
uint64_t guid;
boolean_t spare = B_FALSE, l2cache = B_FALSE, log = B_FALSE;
nvlist_t *tgt;
if ((tgt = zpool_find_vdev(zhp, path, &spare, &l2cache,
&log)) == NULL)
return (0);
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &guid) == 0);
if (is_spare != NULL)
*is_spare = spare;
if (is_l2cache != NULL)
*is_l2cache = l2cache;
if (is_log != NULL)
*is_log = log;
return (guid);
}
/* Convert a vdev path to a GUID. Returns GUID or 0 on error. */
uint64_t
zpool_vdev_path_to_guid(zpool_handle_t *zhp, const char *path)
{
return (zpool_vdev_path_to_guid_impl(zhp, path, NULL, NULL, NULL));
}
/*
* Bring the specified vdev online. The 'flags' parameter is a set of the
* ZFS_ONLINE_* flags.
*/
int
zpool_vdev_online(zpool_handle_t *zhp, const char *path, int flags,
vdev_state_t *newstate)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
char *pathname;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
int error;
if (flags & ZFS_ONLINE_EXPAND) {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot expand %s"), path);
} else {
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot online %s"), path);
}
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
&islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if ((flags & ZFS_ONLINE_EXPAND ||
zpool_get_prop_int(zhp, ZPOOL_PROP_AUTOEXPAND, NULL)) &&
nvlist_lookup_string(tgt, ZPOOL_CONFIG_PATH, &pathname) == 0) {
uint64_t wholedisk = 0;
(void) nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk);
/*
* XXX - L2ARC 1.0 devices can't support expansion.
*/
if (l2cache) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot expand cache devices"));
return (zfs_error(hdl, EZFS_VDEVNOTSUP, msg));
}
if (wholedisk) {
const char *fullpath = path;
char buf[MAXPATHLEN];
if (path[0] != '/') {
error = zfs_resolve_shortname(path, buf,
sizeof (buf));
if (error != 0)
return (zfs_error(hdl, EZFS_NODEVICE,
msg));
fullpath = buf;
}
error = zpool_relabel_disk(hdl, fullpath, msg);
if (error != 0)
return (error);
}
}
zc.zc_cookie = VDEV_STATE_ONLINE;
zc.zc_obj = flags;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) != 0) {
if (errno == EINVAL) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "was split "
"from this pool into a new one. Use '%s' "
"instead"), "zpool detach");
return (zfs_error(hdl, EZFS_POSTSPLIT_ONLINE, msg));
}
return (zpool_standard_error(hdl, errno, msg));
}
*newstate = zc.zc_cookie;
return (0);
}
/*
* Take the specified vdev offline
*/
int
zpool_vdev_offline(zpool_handle_t *zhp, const char *path, boolean_t istmp)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot offline %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
zc.zc_cookie = VDEV_STATE_OFFLINE;
zc.zc_obj = istmp ? ZFS_OFFLINE_TEMPORARY : 0;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
case EEXIST:
/*
* The log device has unplayed logs
*/
return (zfs_error(hdl, EZFS_UNPLAYED_LOGS, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* Mark the given vdev faulted.
*/
int
zpool_vdev_fault(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot fault %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_FAULTED;
zc.zc_obj = aux;
if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
switch (errno) {
case EBUSY:
/*
* There are no other replicas of this device.
*/
return (zfs_error(hdl, EZFS_NOREPLICAS, msg));
default:
return (zpool_standard_error(hdl, errno, msg));
}
}
/*
* Mark the given vdev degraded.
*/
int
zpool_vdev_degrade(zpool_handle_t *zhp, uint64_t guid, vdev_aux_t aux)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot degrade %llu"), (u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = VDEV_STATE_DEGRADED;
zc.zc_obj = aux;
if (ioctl(hdl->libzfs_fd, ZFS_IOC_VDEV_SET_STATE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Returns TRUE if the given nvlist is a vdev that was originally swapped in as
* a hot spare.
*/
static boolean_t
is_replacing_spare(nvlist_t *search, nvlist_t *tgt, int which)
{
nvlist_t **child;
uint_t c, children;
char *type;
if (nvlist_lookup_nvlist_array(search, ZPOOL_CONFIG_CHILDREN, &child,
&children) == 0) {
verify(nvlist_lookup_string(search, ZPOOL_CONFIG_TYPE,
&type) == 0);
if (strcmp(type, VDEV_TYPE_SPARE) == 0 &&
children == 2 && child[which] == tgt)
return (B_TRUE);
for (c = 0; c < children; c++)
if (is_replacing_spare(child[c], tgt, which))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Attach new_disk (fully described by nvroot) to old_disk.
* If 'replacing' is specified, the new disk will replace the old one.
*/
int
zpool_vdev_attach(zpool_handle_t *zhp,
const char *old_disk, const char *new_disk, nvlist_t *nvroot, int replacing)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
int ret;
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
uint64_t val;
char *newname;
nvlist_t **child;
uint_t children;
nvlist_t *config_root;
libzfs_handle_t *hdl = zhp->zpool_hdl;
boolean_t rootpool = zpool_is_bootable(zhp);
if (replacing)
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot replace %s with %s"), old_disk, new_disk);
else
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot attach %s to %s"), new_disk, old_disk);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, old_disk, &avail_spare, &l2cache,
- &islog)) == 0)
+ &islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
zc.zc_cookie = replacing;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_CHILDREN,
&child, &children) != 0 || children != 1) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
return (zfs_error(hdl, EZFS_INVALCONFIG, msg));
}
verify(nvlist_lookup_nvlist(zpool_get_config(zhp, NULL),
ZPOOL_CONFIG_VDEV_TREE, &config_root) == 0);
if ((newname = zpool_vdev_name(NULL, NULL, child[0], 0)) == NULL)
return (-1);
/*
* If the target is a hot spare that has been swapped in, we can only
* replace it with another hot spare.
*/
if (replacing &&
nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_IS_SPARE, &val) == 0 &&
(zpool_find_vdev(zhp, newname, &avail_spare, &l2cache,
NULL) == NULL || !avail_spare) &&
is_replacing_spare(config_root, tgt, 1)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only be replaced by another hot spare"));
free(newname);
return (zfs_error(hdl, EZFS_BADTARGET, msg));
}
free(newname);
if (zcmd_write_conf_nvlist(hdl, &zc, nvroot) != 0)
return (-1);
ret = zfs_ioctl(hdl, ZFS_IOC_VDEV_ATTACH, &zc);
zcmd_free_nvlists(&zc);
if (ret == 0) {
if (rootpool) {
/*
* XXX need a better way to prevent user from
* booting up a half-baked vdev.
*/
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "Make "
"sure to wait until resilver is done "
"before rebooting.\n"));
}
return (0);
}
switch (errno) {
case ENOTSUP:
/*
* Can't attach to or replace this type of vdev.
*/
if (replacing) {
uint64_t version = zpool_get_prop_int(zhp,
ZPOOL_PROP_VERSION, NULL);
if (islog)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a log with a spare"));
else if (version >= SPA_VERSION_MULTI_REPLACE)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"already in replacing/spare config; wait "
"for completion or use 'zpool detach'"));
else
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"cannot replace a replacing device"));
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"can only attach to mirrors and top-level "
"disks"));
}
(void) zfs_error(hdl, EZFS_BADTARGET, msg);
break;
case EINVAL:
/*
* The new device must be a single disk.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device must be a single disk"));
(void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
break;
case EBUSY:
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy"),
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "%s is busy, "
+ "or pool has removing/removed vdevs"),
new_disk);
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EOVERFLOW:
/*
* The new device is too small.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"device is too small"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case EDOM:
/*
* The new device has a different optimal sector size.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"new device has a different optimal sector size; use the "
"option '-o ashift=N' to override the optimal size"));
(void) zfs_error(hdl, EZFS_BADDEV, msg);
break;
case ENAMETOOLONG:
/*
* The resulting top-level vdev spec won't fit in the label.
*/
(void) zfs_error(hdl, EZFS_DEVOVERFLOW, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
return (-1);
}
/*
* Detach the specified device.
*/
int
zpool_vdev_detach(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot detach %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
- NULL)) == 0)
+ NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
if (l2cache)
return (zfs_error(hdl, EZFS_ISL2CACHE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_DETACH, &zc) == 0)
return (0);
switch (errno) {
case ENOTSUP:
/*
* Can't detach from this type of vdev.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "only "
"applicable to mirror and replacing vdevs"));
(void) zfs_error(hdl, EZFS_BADTARGET, msg);
break;
case EBUSY:
/*
* There are no other replicas of this device.
*/
(void) zfs_error(hdl, EZFS_NOREPLICAS, msg);
break;
default:
(void) zpool_standard_error(hdl, errno, msg);
}
return (-1);
}
/*
* Find a mirror vdev in the source nvlist.
*
* The mchild array contains a list of disks in one of the top-level mirrors
* of the source pool. The schild array contains a list of disks that the
* user specified on the command line. We loop over the mchild array to
* see if any entry in the schild array matches.
*
* If a disk in the mchild array is found in the schild array, we return
* the index of that entry. Otherwise we return -1.
*/
static int
find_vdev_entry(zpool_handle_t *zhp, nvlist_t **mchild, uint_t mchildren,
nvlist_t **schild, uint_t schildren)
{
uint_t mc;
for (mc = 0; mc < mchildren; mc++) {
uint_t sc;
char *mpath = zpool_vdev_name(zhp->zpool_hdl, zhp,
mchild[mc], 0);
for (sc = 0; sc < schildren; sc++) {
char *spath = zpool_vdev_name(zhp->zpool_hdl, zhp,
schild[sc], 0);
boolean_t result = (strcmp(mpath, spath) == 0);
free(spath);
if (result) {
free(mpath);
return (mc);
}
}
free(mpath);
}
return (-1);
}
/*
* Split a mirror pool. If newroot points to null, then a new nvlist
* is generated and it is the responsibility of the caller to free it.
*/
int
zpool_vdev_split(zpool_handle_t *zhp, char *newname, nvlist_t **newroot,
nvlist_t *props, splitflags_t flags)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tree, *config, **child, **newchild, *newconfig = NULL;
nvlist_t **varray = NULL, *zc_props = NULL;
uint_t c, children, newchildren, lastlog = 0, vcount, found = 0;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t vers, readonly = B_FALSE;
boolean_t freelist = B_FALSE, memory_err = B_TRUE;
int retval = 0;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "Unable to split %s"), zhp->zpool_name);
if (!zpool_name_valid(hdl, B_FALSE, newname))
return (zfs_error(hdl, EZFS_INVALIDNAME, msg));
if ((config = zpool_get_config(zhp, NULL)) == NULL) {
(void) fprintf(stderr, gettext("Internal error: unable to "
"retrieve pool configuration\n"));
return (-1);
}
verify(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &tree)
== 0);
verify(nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION, &vers) == 0);
if (props) {
prop_flags_t flags = { .create = B_FALSE, .import = B_TRUE };
if ((zc_props = zpool_valid_proplist(hdl, zhp->zpool_name,
props, vers, flags, msg)) == NULL)
return (-1);
(void) nvlist_lookup_uint64(zc_props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"property %s can only be set at import time"),
zpool_prop_to_name(ZPOOL_PROP_READONLY));
return (-1);
}
}
if (nvlist_lookup_nvlist_array(tree, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool is missing vdev tree"));
nvlist_free(zc_props);
return (-1);
}
varray = zfs_alloc(hdl, children * sizeof (nvlist_t *));
vcount = 0;
if (*newroot == NULL ||
nvlist_lookup_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN,
&newchild, &newchildren) != 0)
newchildren = 0;
for (c = 0; c < children; c++) {
uint64_t is_log = B_FALSE, is_hole = B_FALSE;
char *type;
nvlist_t **mchild, *vdev;
uint_t mchildren;
int entry;
/*
* Unlike cache & spares, slogs are stored in the
* ZPOOL_CONFIG_CHILDREN array. We filter them out here.
*/
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_LOG,
&is_log);
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_log || is_hole) {
/*
* Create a hole vdev and put it in the config.
*/
if (nvlist_alloc(&vdev, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_string(vdev, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_HOLE) != 0)
goto out;
if (nvlist_add_uint64(vdev, ZPOOL_CONFIG_IS_HOLE,
1) != 0)
goto out;
if (lastlog == 0)
lastlog = vcount;
varray[vcount++] = vdev;
continue;
}
lastlog = 0;
verify(nvlist_lookup_string(child[c], ZPOOL_CONFIG_TYPE, &type)
== 0);
if (strcmp(type, VDEV_TYPE_MIRROR) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"Source pool must be composed only of mirrors\n"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
goto out;
}
verify(nvlist_lookup_nvlist_array(child[c],
ZPOOL_CONFIG_CHILDREN, &mchild, &mchildren) == 0);
/* find or add an entry for this top-level vdev */
if (newchildren > 0 &&
(entry = find_vdev_entry(zhp, mchild, mchildren,
newchild, newchildren)) >= 0) {
/* We found a disk that the user specified. */
vdev = mchild[entry];
++found;
} else {
/* User didn't specify a disk for this vdev. */
vdev = mchild[mchildren - 1];
}
if (nvlist_dup(vdev, &varray[vcount++], 0) != 0)
goto out;
}
/* did we find every disk the user specified? */
if (found != newchildren) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "Device list must "
"include at most one disk from each mirror"));
retval = zfs_error(hdl, EZFS_INVALCONFIG, msg);
goto out;
}
/* Prepare the nvlist for populating. */
if (*newroot == NULL) {
if (nvlist_alloc(newroot, NV_UNIQUE_NAME, 0) != 0)
goto out;
freelist = B_TRUE;
if (nvlist_add_string(*newroot, ZPOOL_CONFIG_TYPE,
VDEV_TYPE_ROOT) != 0)
goto out;
} else {
verify(nvlist_remove_all(*newroot, ZPOOL_CONFIG_CHILDREN) == 0);
}
/* Add all the children we found */
if (nvlist_add_nvlist_array(*newroot, ZPOOL_CONFIG_CHILDREN, varray,
lastlog == 0 ? vcount : lastlog) != 0)
goto out;
/*
* If we're just doing a dry run, exit now with success.
*/
if (flags.dryrun) {
memory_err = B_FALSE;
freelist = B_FALSE;
goto out;
}
/* now build up the config list & call the ioctl */
if (nvlist_alloc(&newconfig, NV_UNIQUE_NAME, 0) != 0)
goto out;
if (nvlist_add_nvlist(newconfig,
ZPOOL_CONFIG_VDEV_TREE, *newroot) != 0 ||
nvlist_add_string(newconfig,
ZPOOL_CONFIG_POOL_NAME, newname) != 0 ||
nvlist_add_uint64(newconfig, ZPOOL_CONFIG_VERSION, vers) != 0)
goto out;
/*
* The new pool is automatically part of the namespace unless we
* explicitly export it.
*/
if (!flags.import)
zc.zc_cookie = ZPOOL_EXPORT_AFTER_SPLIT;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strlcpy(zc.zc_string, newname, sizeof (zc.zc_string));
if (zcmd_write_conf_nvlist(hdl, &zc, newconfig) != 0)
goto out;
if (zc_props != NULL && zcmd_write_src_nvlist(hdl, &zc, zc_props) != 0)
goto out;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_SPLIT, &zc) != 0) {
retval = zpool_standard_error(hdl, errno, msg);
goto out;
}
freelist = B_FALSE;
memory_err = B_FALSE;
out:
if (varray != NULL) {
int v;
for (v = 0; v < vcount; v++)
nvlist_free(varray[v]);
free(varray);
}
zcmd_free_nvlists(&zc);
nvlist_free(zc_props);
nvlist_free(newconfig);
if (freelist) {
nvlist_free(*newroot);
*newroot = NULL;
}
if (retval != 0)
return (retval);
if (memory_err)
return (no_memory(hdl));
return (0);
}
/*
- * Remove the given device. Currently, this is supported only for hot spares,
- * cache, and log devices.
+ * Remove the given device.
*/
int
zpool_vdev_remove(zpool_handle_t *zhp, const char *path)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
boolean_t avail_spare, l2cache, islog;
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t version;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot remove %s"), path);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
- &islog)) == 0)
- return (zfs_error(hdl, EZFS_NODEVICE, msg));
- /*
- * XXX - this should just go away.
- */
- if (!avail_spare && !l2cache && !islog) {
- zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "only inactive hot spares, cache, "
- "or log devices can be removed"));
+ &islog)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
- }
version = zpool_get_prop_int(zhp, ZPOOL_PROP_VERSION, NULL);
if (islog && version < SPA_VERSION_HOLES) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
- "pool must be upgrade to support log removal"));
+ "pool must be upgraded to support log removal"));
return (zfs_error(hdl, EZFS_BADVERSION, msg));
}
- verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID, &zc.zc_guid) == 0);
+ if (!islog && !avail_spare && !l2cache && zpool_is_bootable(zhp)) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "root pool can not have removed devices, "
+ "because GRUB does not understand them"));
+ return (zfs_error(hdl, EINVAL, msg));
+ }
+
+ zc.zc_guid = fnvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID);
+
+ if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
+ return (0);
+
+ switch (errno) {
+
+ case EINVAL:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "invalid config; all top-level vdevs must "
+ "have the same sector size and not be raidz."));
+ (void) zfs_error(hdl, EZFS_INVALCONFIG, msg);
+ break;
+
+ case EBUSY:
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "Pool busy; removal may already be in progress"));
+ (void) zfs_error(hdl, EZFS_BUSY, msg);
+ break;
+
+ default:
+ (void) zpool_standard_error(hdl, errno, msg);
+ }
+ return (-1);
+}
+
+int
+zpool_vdev_remove_cancel(zpool_handle_t *zhp)
+{
+ zfs_cmd_t zc;
+ char msg[1024];
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot cancel removal"));
+
+ bzero(&zc, sizeof (zc));
+ (void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
+ zc.zc_cookie = 1;
if (zfs_ioctl(hdl, ZFS_IOC_VDEV_REMOVE, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
+int
+zpool_vdev_indirect_size(zpool_handle_t *zhp, const char *path,
+ uint64_t *sizep)
+{
+ char msg[1024];
+ nvlist_t *tgt;
+ boolean_t avail_spare, l2cache, islog;
+ libzfs_handle_t *hdl = zhp->zpool_hdl;
+
+ (void) snprintf(msg, sizeof (msg),
+ dgettext(TEXT_DOMAIN, "cannot determine indirect size of %s"),
+ path);
+
+ if ((tgt = zpool_find_vdev(zhp, path, &avail_spare, &l2cache,
+ &islog)) == NULL)
+ return (zfs_error(hdl, EZFS_NODEVICE, msg));
+
+ if (avail_spare || l2cache || islog) {
+ *sizep = 0;
+ return (0);
+ }
+
+ if (nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_INDIRECT_SIZE, sizep) != 0) {
+ zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
+ "indirect size not available"));
+ return (zfs_error(hdl, EINVAL, msg));
+ }
+ return (0);
+}
+
/*
* Clear the errors for the pool, or the particular device if specified.
*/
int
zpool_clear(zpool_handle_t *zhp, const char *path, nvlist_t *rewindnvl)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
nvlist_t *tgt;
zpool_rewind_policy_t policy;
boolean_t avail_spare, l2cache;
libzfs_handle_t *hdl = zhp->zpool_hdl;
nvlist_t *nvi = NULL;
int error;
if (path)
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
path);
else
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %s"),
zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (path) {
if ((tgt = zpool_find_vdev(zhp, path, &avail_spare,
- &l2cache, NULL)) == 0)
+ &l2cache, NULL)) == NULL)
return (zfs_error(hdl, EZFS_NODEVICE, msg));
/*
* Don't allow error clearing for hot spares. Do allow
* error clearing for l2cache devices.
*/
if (avail_spare)
return (zfs_error(hdl, EZFS_ISSPARE, msg));
verify(nvlist_lookup_uint64(tgt, ZPOOL_CONFIG_GUID,
&zc.zc_guid) == 0);
}
zpool_get_rewind_policy(rewindnvl, &policy);
zc.zc_cookie = policy.zrp_request;
if (zcmd_alloc_dst_nvlist(hdl, &zc, zhp->zpool_config_size * 2) != 0)
return (-1);
if (zcmd_write_src_nvlist(hdl, &zc, rewindnvl) != 0)
return (-1);
while ((error = zfs_ioctl(hdl, ZFS_IOC_CLEAR, &zc)) != 0 &&
errno == ENOMEM) {
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
zcmd_free_nvlists(&zc);
return (-1);
}
}
if (!error || ((policy.zrp_request & ZPOOL_TRY_REWIND) &&
errno != EPERM && errno != EACCES)) {
if (policy.zrp_request &
(ZPOOL_DO_REWIND | ZPOOL_TRY_REWIND)) {
(void) zcmd_read_dst_nvlist(hdl, &zc, &nvi);
zpool_rewind_exclaim(hdl, zc.zc_name,
((policy.zrp_request & ZPOOL_TRY_REWIND) != 0),
nvi);
nvlist_free(nvi);
}
zcmd_free_nvlists(&zc);
return (0);
}
zcmd_free_nvlists(&zc);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Similar to zpool_clear(), but takes a GUID (used by fmd).
*/
int
zpool_vdev_clear(zpool_handle_t *zhp, uint64_t guid)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot clear errors for %llx"),
(u_longlong_t)guid);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_guid = guid;
zc.zc_cookie = ZPOOL_NO_REWIND;
if (ioctl(hdl->libzfs_fd, ZFS_IOC_CLEAR, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Change the GUID for a pool.
*/
int
zpool_reguid(zpool_handle_t *zhp)
{
char msg[1024];
libzfs_handle_t *hdl = zhp->zpool_hdl;
zfs_cmd_t zc = {"\0"};
(void) snprintf(msg, sizeof (msg),
dgettext(TEXT_DOMAIN, "cannot reguid '%s'"), zhp->zpool_name);
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
if (zfs_ioctl(hdl, ZFS_IOC_POOL_REGUID, &zc) == 0)
return (0);
return (zpool_standard_error(hdl, errno, msg));
}
/*
* Reopen the pool.
*/
int
zpool_reopen_one(zpool_handle_t *zhp, void *data)
{
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *scrub_restart = data;
int error;
error = lzc_reopen(pool_name, *scrub_restart);
if (error) {
return (zpool_standard_error_fmt(hdl, error,
dgettext(TEXT_DOMAIN, "cannot reopen '%s'"), pool_name));
}
return (0);
}
/* call into libzfs_core to execute the sync IOCTL per pool */
int
zpool_sync_one(zpool_handle_t *zhp, void *data)
{
int ret;
libzfs_handle_t *hdl = zpool_get_handle(zhp);
const char *pool_name = zpool_get_name(zhp);
boolean_t *force = data;
nvlist_t *innvl = fnvlist_alloc();
fnvlist_add_boolean_value(innvl, "force", *force);
if ((ret = lzc_sync(pool_name, innvl, NULL)) != 0) {
nvlist_free(innvl);
return (zpool_standard_error_fmt(hdl, ret,
dgettext(TEXT_DOMAIN, "sync '%s' failed"), pool_name));
}
nvlist_free(innvl);
return (0);
}
#if defined(__sun__) || defined(__sun)
/*
* Convert from a devid string to a path.
*/
static char *
devid_to_path(char *devid_str)
{
ddi_devid_t devid;
char *minor;
char *path;
devid_nmlist_t *list = NULL;
int ret;
if (devid_str_decode(devid_str, &devid, &minor) != 0)
return (NULL);
ret = devid_deviceid_to_nmlist("/dev", devid, minor, &list);
devid_str_free(minor);
devid_free(devid);
if (ret != 0)
return (NULL);
/*
* In a case the strdup() fails, we will just return NULL below.
*/
path = strdup(list[0].devname);
devid_free_nmlist(list);
return (path);
}
/*
* Convert from a path to a devid string.
*/
static char *
path_to_devid(const char *path)
{
int fd;
ddi_devid_t devid;
char *minor, *ret;
if ((fd = open(path, O_RDONLY)) < 0)
return (NULL);
minor = NULL;
ret = NULL;
if (devid_get(fd, &devid) == 0) {
if (devid_get_minor_name(fd, &minor) == 0)
ret = devid_str_encode(devid, minor);
if (minor != NULL)
devid_str_free(minor);
devid_free(devid);
}
(void) close(fd);
return (ret);
}
/*
* Issue the necessary ioctl() to update the stored path value for the vdev. We
* ignore any failure here, since a common case is for an unprivileged user to
* type 'zpool status', and we'll display the correct information anyway.
*/
static void
set_path(zpool_handle_t *zhp, nvlist_t *nv, const char *path)
{
zfs_cmd_t zc = {"\0"};
(void) strncpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
(void) strncpy(zc.zc_value, path, sizeof (zc.zc_value));
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID,
&zc.zc_guid) == 0);
(void) ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_VDEV_SETPATH, &zc);
}
#endif /* sun */
/*
* Remove partition suffix from a vdev path. Partition suffixes may take three
* forms: "-partX", "pX", or "X", where X is a string of digits. The second
* case only occurs when the suffix is preceded by a digit, i.e. "md0p0" The
* third case only occurs when preceded by a string matching the regular
* expression "^([hsv]|xv)d[a-z]+", i.e. a scsi, ide, virtio or xen disk.
*
* caller must free the returned string
*/
char *
zfs_strip_partition(char *path)
{
char *tmp = strdup(path);
char *part = NULL, *d = NULL;
if (!tmp)
return (NULL);
if ((part = strstr(tmp, "-part")) && part != tmp) {
d = part + 5;
} else if ((part = strrchr(tmp, 'p')) &&
part > tmp + 1 && isdigit(*(part-1))) {
d = part + 1;
} else if ((tmp[0] == 'h' || tmp[0] == 's' || tmp[0] == 'v') &&
tmp[1] == 'd') {
for (d = &tmp[2]; isalpha(*d); part = ++d) { }
} else if (strncmp("xvd", tmp, 3) == 0) {
for (d = &tmp[3]; isalpha(*d); part = ++d) { }
}
if (part && d && *d != '\0') {
for (; isdigit(*d); d++) { }
if (*d == '\0')
*part = '\0';
}
return (tmp);
}
/*
* Same as zfs_strip_partition, but allows "/dev/" to be in the pathname
*
* path: /dev/sda1
* returns: /dev/sda
*
* Returned string must be freed.
*/
char *
zfs_strip_partition_path(char *path)
{
char *newpath = strdup(path);
char *sd_offset;
char *new_sd;
if (!newpath)
return (NULL);
/* Point to "sda1" part of "/dev/sda1" */
sd_offset = strrchr(newpath, '/') + 1;
/* Get our new name "sda" */
new_sd = zfs_strip_partition(sd_offset);
if (!new_sd) {
free(newpath);
return (NULL);
}
/* Paste the "sda" where "sda1" was */
strlcpy(sd_offset, new_sd, strlen(sd_offset) + 1);
/* Free temporary "sda" */
free(new_sd);
return (newpath);
}
#define PATH_BUF_LEN 64
/*
* Given a vdev, return the name to display in iostat. If the vdev has a path,
* we use that, stripping off any leading "/dev/dsk/"; if not, we use the type.
* We also check if this is a whole disk, in which case we strip off the
* trailing 's0' slice name.
*
* This routine is also responsible for identifying when disks have been
* reconfigured in a new location. The kernel will have opened the device by
* devid, but the path will still refer to the old location. To catch this, we
* first do a path -> devid translation (which is fast for the common case). If
* the devid matches, we're done. If not, we do a reverse devid -> path
* translation and issue the appropriate ioctl() to update the path of the vdev.
* If 'zhp' is NULL, then this is an exported pool, and we don't need to do any
* of these checks.
*/
char *
zpool_vdev_name(libzfs_handle_t *hdl, zpool_handle_t *zhp, nvlist_t *nv,
int name_flags)
{
char *path, *type, *env;
uint64_t value;
char buf[PATH_BUF_LEN];
char tmpbuf[PATH_BUF_LEN];
/*
* vdev_name will be "root"/"root-0" for the root vdev, but it is the
* zpool name that will be displayed to the user.
*/
verify(nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) == 0);
if (zhp != NULL && strcmp(type, "root") == 0)
return (zfs_strdup(hdl, zpool_get_name(zhp)));
env = getenv("ZPOOL_VDEV_NAME_PATH");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_PATH;
env = getenv("ZPOOL_VDEV_NAME_GUID");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_GUID;
env = getenv("ZPOOL_VDEV_NAME_FOLLOW_LINKS");
if (env && (strtoul(env, NULL, 0) > 0 ||
!strncasecmp(env, "YES", 3) || !strncasecmp(env, "ON", 2)))
name_flags |= VDEV_NAME_FOLLOW_LINKS;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, &value) == 0 ||
name_flags & VDEV_NAME_GUID) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &value);
(void) snprintf(buf, sizeof (buf), "%llu", (u_longlong_t)value);
path = buf;
} else if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &path) == 0) {
#if defined(__sun__) || defined(__sun)
/*
* Live VDEV path updates to a kernel VDEV during a
* zpool_vdev_name lookup are not supported on Linux.
*/
char *devid;
vdev_stat_t *vs;
uint_t vsc;
/*
* If the device is dead (faulted, offline, etc) then don't
* bother opening it. Otherwise we may be forcing the user to
* open a misbehaving device, which can have undesirable
* effects.
*/
if ((nvlist_lookup_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) != 0 ||
vs->vs_state >= VDEV_STATE_DEGRADED) &&
zhp != NULL &&
nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &devid) == 0) {
/*
* Determine if the current path is correct.
*/
char *newdevid = path_to_devid(path);
if (newdevid == NULL ||
strcmp(devid, newdevid) != 0) {
char *newpath;
if ((newpath = devid_to_path(devid)) != NULL) {
/*
* Update the path appropriately.
*/
set_path(zhp, nv, newpath);
if (nvlist_add_string(nv,
ZPOOL_CONFIG_PATH, newpath) == 0)
verify(nvlist_lookup_string(nv,
ZPOOL_CONFIG_PATH,
&path) == 0);
free(newpath);
}
}
if (newdevid)
devid_str_free(newdevid);
}
#endif /* sun */
if (name_flags & VDEV_NAME_FOLLOW_LINKS) {
char *rp = realpath(path, NULL);
if (rp) {
strlcpy(buf, rp, sizeof (buf));
path = buf;
free(rp);
}
}
/*
* For a block device only use the name.
*/
if ((strcmp(type, VDEV_TYPE_DISK) == 0) &&
!(name_flags & VDEV_NAME_PATH)) {
path = strrchr(path, '/');
path++;
}
/*
* Remove the partition from the path it this is a whole disk.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK, &value)
== 0 && value && !(name_flags & VDEV_NAME_PATH)) {
return (zfs_strip_partition(path));
}
} else {
path = type;
/*
* If it's a raidz device, we need to stick in the parity level.
*/
if (strcmp(path, VDEV_TYPE_RAIDZ) == 0) {
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&value) == 0);
(void) snprintf(buf, sizeof (buf), "%s%llu", path,
(u_longlong_t)value);
path = buf;
}
/*
* We identify each top-level vdev by using a <type-id>
* naming convention.
*/
if (name_flags & VDEV_NAME_TYPE_ID) {
uint64_t id;
verify(nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID,
&id) == 0);
(void) snprintf(tmpbuf, sizeof (tmpbuf), "%s-%llu",
path, (u_longlong_t)id);
path = tmpbuf;
}
}
return (zfs_strdup(hdl, path));
}
static int
zbookmark_mem_compare(const void *a, const void *b)
{
return (memcmp(a, b, sizeof (zbookmark_phys_t)));
}
/*
* Retrieve the persistent error log, uniquify the members, and return to the
* caller.
*/
int
zpool_get_errlog(zpool_handle_t *zhp, nvlist_t **nverrlistp)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
uint64_t count;
zbookmark_phys_t *zb = NULL;
int i;
/*
* Retrieve the raw error list from the kernel. If the number of errors
* has increased, allocate more space and continue until we get the
* entire list.
*/
verify(nvlist_lookup_uint64(zhp->zpool_config, ZPOOL_CONFIG_ERRCOUNT,
&count) == 0);
if (count == 0)
return (0);
zc.zc_nvlist_dst = (uintptr_t)zfs_alloc(zhp->zpool_hdl,
count * sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst_size = count;
(void) strcpy(zc.zc_name, zhp->zpool_name);
for (;;) {
if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_ERROR_LOG,
&zc) != 0) {
free((void *)(uintptr_t)zc.zc_nvlist_dst);
if (errno == ENOMEM) {
void *dst;
count = zc.zc_nvlist_dst_size;
dst = zfs_alloc(zhp->zpool_hdl, count *
sizeof (zbookmark_phys_t));
zc.zc_nvlist_dst = (uintptr_t)dst;
} else {
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "errors: List of "
"errors unavailable")));
}
} else {
break;
}
}
/*
* Sort the resulting bookmarks. This is a little confusing due to the
* implementation of ZFS_IOC_ERROR_LOG. The bookmarks are copied last
* to first, and 'zc_nvlist_dst_size' indicates the number of boomarks
* _not_ copied as part of the process. So we point the start of our
* array appropriate and decrement the total number of elements.
*/
zb = ((zbookmark_phys_t *)(uintptr_t)zc.zc_nvlist_dst) +
zc.zc_nvlist_dst_size;
count -= zc.zc_nvlist_dst_size;
qsort(zb, count, sizeof (zbookmark_phys_t), zbookmark_mem_compare);
verify(nvlist_alloc(nverrlistp, 0, KM_SLEEP) == 0);
/*
* Fill in the nverrlistp with nvlist's of dataset and object numbers.
*/
for (i = 0; i < count; i++) {
nvlist_t *nv;
/* ignoring zb_blkid and zb_level for now */
if (i > 0 && zb[i-1].zb_objset == zb[i].zb_objset &&
zb[i-1].zb_object == zb[i].zb_object)
continue;
if (nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) != 0)
goto nomem;
if (nvlist_add_uint64(nv, ZPOOL_ERR_DATASET,
zb[i].zb_objset) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_uint64(nv, ZPOOL_ERR_OBJECT,
zb[i].zb_object) != 0) {
nvlist_free(nv);
goto nomem;
}
if (nvlist_add_nvlist(*nverrlistp, "ejk", nv) != 0) {
nvlist_free(nv);
goto nomem;
}
nvlist_free(nv);
}
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (0);
nomem:
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (no_memory(zhp->zpool_hdl));
}
/*
* Upgrade a ZFS pool to the latest on-disk version.
*/
int
zpool_upgrade(zpool_handle_t *zhp, uint64_t new_version)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strcpy(zc.zc_name, zhp->zpool_name);
zc.zc_cookie = new_version;
if (zfs_ioctl(hdl, ZFS_IOC_POOL_UPGRADE, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot upgrade '%s'"),
zhp->zpool_name));
return (0);
}
void
zfs_save_arguments(int argc, char **argv, char *string, int len)
{
int i;
(void) strlcpy(string, basename(argv[0]), len);
for (i = 1; i < argc; i++) {
(void) strlcat(string, " ", len);
(void) strlcat(string, argv[i], len);
}
}
int
zpool_log_history(libzfs_handle_t *hdl, const char *message)
{
zfs_cmd_t zc = {"\0"};
nvlist_t *args;
int err;
args = fnvlist_alloc();
fnvlist_add_string(args, "message", message);
err = zcmd_write_src_nvlist(hdl, &zc, args);
if (err == 0)
err = ioctl(hdl->libzfs_fd, ZFS_IOC_LOG_HISTORY, &zc);
nvlist_free(args);
zcmd_free_nvlists(&zc);
return (err);
}
/*
* Perform ioctl to get some command history of a pool.
*
* 'buf' is the buffer to fill up to 'len' bytes. 'off' is the
* logical offset of the history buffer to start reading from.
*
* Upon return, 'off' is the next logical offset to read from and
* 'len' is the actual amount of bytes read into 'buf'.
*/
static int
get_history(zpool_handle_t *zhp, char *buf, uint64_t *off, uint64_t *len)
{
zfs_cmd_t zc = {"\0"};
libzfs_handle_t *hdl = zhp->zpool_hdl;
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_history = (uint64_t)(uintptr_t)buf;
zc.zc_history_len = *len;
zc.zc_history_offset = *off;
if (ioctl(hdl->libzfs_fd, ZFS_IOC_POOL_GET_HISTORY, &zc) != 0) {
switch (errno) {
case EPERM:
return (zfs_error_fmt(hdl, EZFS_PERM,
dgettext(TEXT_DOMAIN,
"cannot show history for pool '%s'"),
zhp->zpool_name));
case ENOENT:
return (zfs_error_fmt(hdl, EZFS_NOHISTORY,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s'"), zhp->zpool_name));
case ENOTSUP:
return (zfs_error_fmt(hdl, EZFS_BADVERSION,
dgettext(TEXT_DOMAIN, "cannot get history for pool "
"'%s', pool must be upgraded"), zhp->zpool_name));
default:
return (zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN,
"cannot get history for '%s'"), zhp->zpool_name));
}
}
*len = zc.zc_history_len;
*off = zc.zc_history_offset;
return (0);
}
/*
* Process the buffer of nvlists, unpacking and storing each nvlist record
* into 'records'. 'leftover' is set to the number of bytes that weren't
* processed as there wasn't a complete record.
*/
int
zpool_history_unpack(char *buf, uint64_t bytes_read, uint64_t *leftover,
nvlist_t ***records, uint_t *numrecords)
{
uint64_t reclen;
nvlist_t *nv;
int i;
void *tmp;
while (bytes_read > sizeof (reclen)) {
/* get length of packed record (stored as little endian) */
for (i = 0, reclen = 0; i < sizeof (reclen); i++)
reclen += (uint64_t)(((uchar_t *)buf)[i]) << (8*i);
if (bytes_read < sizeof (reclen) + reclen)
break;
/* unpack record */
if (nvlist_unpack(buf + sizeof (reclen), reclen, &nv, 0) != 0)
return (ENOMEM);
bytes_read -= sizeof (reclen) + reclen;
buf += sizeof (reclen) + reclen;
/* add record to nvlist array */
(*numrecords)++;
if (ISP2(*numrecords + 1)) {
tmp = realloc(*records,
*numrecords * 2 * sizeof (nvlist_t *));
if (tmp == NULL) {
nvlist_free(nv);
(*numrecords)--;
return (ENOMEM);
}
*records = tmp;
}
(*records)[*numrecords - 1] = nv;
}
*leftover = bytes_read;
return (0);
}
/*
* Retrieve the command history of a pool.
*/
int
zpool_get_history(zpool_handle_t *zhp, nvlist_t **nvhisp)
{
char *buf;
int buflen = 128 * 1024;
uint64_t off = 0;
nvlist_t **records = NULL;
uint_t numrecords = 0;
int err, i;
buf = malloc(buflen);
if (buf == NULL)
return (ENOMEM);
do {
uint64_t bytes_read = buflen;
uint64_t leftover;
if ((err = get_history(zhp, buf, &off, &bytes_read)) != 0)
break;
/* if nothing else was read in, we're at EOF, just return */
if (!bytes_read)
break;
if ((err = zpool_history_unpack(buf, bytes_read,
&leftover, &records, &numrecords)) != 0)
break;
off -= leftover;
if (leftover == bytes_read) {
/*
* no progress made, because buffer is not big enough
* to hold this record; resize and retry.
*/
buflen *= 2;
free(buf);
buf = malloc(buflen);
if (buf == NULL)
return (ENOMEM);
}
/* CONSTCOND */
} while (1);
free(buf);
if (!err) {
verify(nvlist_alloc(nvhisp, NV_UNIQUE_NAME, 0) == 0);
verify(nvlist_add_nvlist_array(*nvhisp, ZPOOL_HIST_RECORD,
records, numrecords) == 0);
}
for (i = 0; i < numrecords; i++)
nvlist_free(records[i]);
free(records);
return (err);
}
/*
* Retrieve the next event given the passed 'zevent_fd' file descriptor.
* If there is a new event available 'nvp' will contain a newly allocated
* nvlist and 'dropped' will be set to the number of missed events since
* the last call to this function. When 'nvp' is set to NULL it indicates
* no new events are available. In either case the function returns 0 and
* it is up to the caller to free 'nvp'. In the case of a fatal error the
* function will return a non-zero value. When the function is called in
* blocking mode (the default, unless the ZEVENT_NONBLOCK flag is passed),
* it will not return until a new event is available.
*/
int
zpool_events_next(libzfs_handle_t *hdl, nvlist_t **nvp,
int *dropped, unsigned flags, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
*nvp = NULL;
*dropped = 0;
zc.zc_cleanup_fd = zevent_fd;
if (flags & ZEVENT_NONBLOCK)
zc.zc_guid = ZEVENT_NONBLOCK;
if (zcmd_alloc_dst_nvlist(hdl, &zc, ZEVENT_SIZE) != 0)
return (-1);
retry:
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_NEXT, &zc) != 0) {
switch (errno) {
case ESHUTDOWN:
error = zfs_error_fmt(hdl, EZFS_POOLUNAVAIL,
dgettext(TEXT_DOMAIN, "zfs shutdown"));
goto out;
case ENOENT:
/* Blocking error case should not occur */
if (!(flags & ZEVENT_NONBLOCK))
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
case ENOMEM:
if (zcmd_expand_dst_nvlist(hdl, &zc) != 0) {
error = zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
} else {
goto retry;
}
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
goto out;
}
}
error = zcmd_read_dst_nvlist(hdl, &zc, nvp);
if (error != 0)
goto out;
*dropped = (int)zc.zc_cookie;
out:
zcmd_free_nvlists(&zc);
return (error);
}
/*
* Clear all events.
*/
int
zpool_events_clear(libzfs_handle_t *hdl, int *count)
{
zfs_cmd_t zc = {"\0"};
char msg[1024];
(void) snprintf(msg, sizeof (msg), dgettext(TEXT_DOMAIN,
"cannot clear events"));
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_CLEAR, &zc) != 0)
return (zpool_standard_error_fmt(hdl, errno, msg));
if (count != NULL)
*count = (int)zc.zc_cookie; /* # of events cleared */
return (0);
}
/*
* Seek to a specific EID, ZEVENT_SEEK_START, or ZEVENT_SEEK_END for
* the passed zevent_fd file handle. On success zero is returned,
* otherwise -1 is returned and hdl->libzfs_error is set to the errno.
*/
int
zpool_events_seek(libzfs_handle_t *hdl, uint64_t eid, int zevent_fd)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
zc.zc_guid = eid;
zc.zc_cleanup_fd = zevent_fd;
if (zfs_ioctl(hdl, ZFS_IOC_EVENTS_SEEK, &zc) != 0) {
switch (errno) {
case ENOENT:
error = zfs_error_fmt(hdl, EZFS_NOENT,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
case ENOMEM:
error = zfs_error_fmt(hdl, EZFS_NOMEM,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
default:
error = zpool_standard_error_fmt(hdl, errno,
dgettext(TEXT_DOMAIN, "cannot get event"));
break;
}
}
return (error);
}
void
zpool_obj_to_path(zpool_handle_t *zhp, uint64_t dsobj, uint64_t obj,
char *pathname, size_t len)
{
zfs_cmd_t zc = {"\0"};
boolean_t mounted = B_FALSE;
char *mntpnt = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
if (dsobj == 0) {
/* special case for the MOS */
(void) snprintf(pathname, len, "<metadata>:<0x%llx>",
(longlong_t)obj);
return;
}
/* get the dataset's name */
(void) strlcpy(zc.zc_name, zhp->zpool_name, sizeof (zc.zc_name));
zc.zc_obj = dsobj;
if (ioctl(zhp->zpool_hdl->libzfs_fd,
ZFS_IOC_DSOBJ_TO_DSNAME, &zc) != 0) {
/* just write out a path of two object numbers */
(void) snprintf(pathname, len, "<0x%llx>:<0x%llx>",
(longlong_t)dsobj, (longlong_t)obj);
return;
}
(void) strlcpy(dsname, zc.zc_value, sizeof (dsname));
/* find out if the dataset is mounted */
mounted = is_mounted(zhp->zpool_hdl, dsname, &mntpnt);
/* get the corrupted object's path */
(void) strlcpy(zc.zc_name, dsname, sizeof (zc.zc_name));
zc.zc_obj = obj;
if (ioctl(zhp->zpool_hdl->libzfs_fd, ZFS_IOC_OBJ_TO_PATH,
&zc) == 0) {
if (mounted) {
(void) snprintf(pathname, len, "%s%s", mntpnt,
zc.zc_value);
} else {
(void) snprintf(pathname, len, "%s:%s",
dsname, zc.zc_value);
}
} else {
(void) snprintf(pathname, len, "%s:<0x%llx>", dsname,
(longlong_t)obj);
}
free(mntpnt);
}
/*
* Read the EFI label from the config, if a label does not exist then
* pass back the error to the caller. If the caller has passed a non-NULL
* diskaddr argument then we set it to the starting address of the EFI
* partition.
*/
static int
read_efi_label(nvlist_t *config, diskaddr_t *sb)
{
char *path;
int fd;
char diskname[MAXPATHLEN];
int err = -1;
if (nvlist_lookup_string(config, ZPOOL_CONFIG_PATH, &path) != 0)
return (err);
(void) snprintf(diskname, sizeof (diskname), "%s%s", DISK_ROOT,
strrchr(path, '/'));
if ((fd = open(diskname, O_RDONLY|O_DIRECT)) >= 0) {
struct dk_gpt *vtoc;
if ((err = efi_alloc_and_read(fd, &vtoc)) >= 0) {
if (sb != NULL)
*sb = vtoc->efi_parts[0].p_start;
efi_free(vtoc);
}
(void) close(fd);
}
return (err);
}
/*
* determine where a partition starts on a disk in the current
* configuration
*/
static diskaddr_t
find_start_block(nvlist_t *config)
{
nvlist_t **child;
uint_t c, children;
diskaddr_t sb = MAXOFFSET_T;
uint64_t wholedisk;
if (nvlist_lookup_nvlist_array(config,
ZPOOL_CONFIG_CHILDREN, &child, &children) != 0) {
if (nvlist_lookup_uint64(config,
ZPOOL_CONFIG_WHOLE_DISK,
&wholedisk) != 0 || !wholedisk) {
return (MAXOFFSET_T);
}
if (read_efi_label(config, &sb) < 0)
sb = MAXOFFSET_T;
return (sb);
}
for (c = 0; c < children; c++) {
sb = find_start_block(child[c]);
if (sb != MAXOFFSET_T) {
return (sb);
}
}
return (MAXOFFSET_T);
}
static int
zpool_label_disk_check(char *path)
{
struct dk_gpt *vtoc;
int fd, err;
if ((fd = open(path, O_RDONLY|O_DIRECT)) < 0)
return (errno);
if ((err = efi_alloc_and_read(fd, &vtoc)) != 0) {
(void) close(fd);
return (err);
}
if (vtoc->efi_flags & EFI_GPT_PRIMARY_CORRUPT) {
efi_free(vtoc);
(void) close(fd);
return (EIDRM);
}
efi_free(vtoc);
(void) close(fd);
return (0);
}
/*
* Generate a unique partition name for the ZFS member. Partitions must
* have unique names to ensure udev will be able to create symlinks under
* /dev/disk/by-partlabel/ for all pool members. The partition names are
* of the form <pool>-<unique-id>.
*/
static void
zpool_label_name(char *label_name, int label_size)
{
uint64_t id = 0;
int fd;
fd = open("/dev/urandom", O_RDONLY);
if (fd >= 0) {
if (read(fd, &id, sizeof (id)) != sizeof (id))
id = 0;
close(fd);
}
if (id == 0)
id = (((uint64_t)rand()) << 32) | (uint64_t)rand();
snprintf(label_name, label_size, "zfs-%016llx", (u_longlong_t)id);
}
/*
* Label an individual disk. The name provided is the short name,
* stripped of any leading /dev path.
*/
int
zpool_label_disk(libzfs_handle_t *hdl, zpool_handle_t *zhp, char *name)
{
char path[MAXPATHLEN];
struct dk_gpt *vtoc;
int rval, fd;
size_t resv = EFI_MIN_RESV_SIZE;
uint64_t slice_size;
diskaddr_t start_block;
char errbuf[1024];
/* prepare an error message just in case */
(void) snprintf(errbuf, sizeof (errbuf),
dgettext(TEXT_DOMAIN, "cannot label '%s'"), name);
if (zhp) {
nvlist_t *nvroot;
verify(nvlist_lookup_nvlist(zhp->zpool_config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
if (zhp->zpool_start_block == 0)
start_block = find_start_block(nvroot);
else
start_block = zhp->zpool_start_block;
zhp->zpool_start_block = start_block;
} else {
/* new pool */
start_block = NEW_START_BLOCK;
}
(void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
if ((fd = open(path, O_RDWR|O_DIRECT|O_EXCL)) < 0) {
/*
* This shouldn't happen. We've long since verified that this
* is a valid device.
*/
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"label '%s': unable to open device: %d"), path, errno);
return (zfs_error(hdl, EZFS_OPENFAILED, errbuf));
}
if (efi_alloc_and_init(fd, EFI_NUMPAR, &vtoc) != 0) {
/*
* The only way this can fail is if we run out of memory, or we
* were unable to read the disk's capacity
*/
if (errno == ENOMEM)
(void) no_memory(hdl);
(void) close(fd);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "cannot "
"label '%s': unable to read disk capacity"), path);
return (zfs_error(hdl, EZFS_NOCAP, errbuf));
}
slice_size = vtoc->efi_last_u_lba + 1;
slice_size -= EFI_MIN_RESV_SIZE;
if (start_block == MAXOFFSET_T)
start_block = NEW_START_BLOCK;
slice_size -= start_block;
slice_size = P2ALIGN(slice_size, PARTITION_END_ALIGNMENT);
vtoc->efi_parts[0].p_start = start_block;
vtoc->efi_parts[0].p_size = slice_size;
/*
* Why we use V_USR: V_BACKUP confuses users, and is considered
* disposable by some EFI utilities (since EFI doesn't have a backup
* slice). V_UNASSIGNED is supposed to be used only for zero size
* partitions, and efi_write() will fail if we use it. V_ROOT, V_BOOT,
* etc. were all pretty specific. V_USR is as close to reality as we
* can get, in the absence of V_OTHER.
*/
vtoc->efi_parts[0].p_tag = V_USR;
zpool_label_name(vtoc->efi_parts[0].p_name, EFI_PART_NAME_LEN);
vtoc->efi_parts[8].p_start = slice_size + start_block;
vtoc->efi_parts[8].p_size = resv;
vtoc->efi_parts[8].p_tag = V_RESERVED;
rval = efi_write(fd, vtoc);
/* Flush the buffers to disk and invalidate the page cache. */
(void) fsync(fd);
(void) ioctl(fd, BLKFLSBUF);
if (rval == 0)
rval = efi_rescan(fd);
/*
* Some block drivers (like pcata) may not support EFI GPT labels.
* Print out a helpful error message directing the user to manually
* label the disk and give a specific slice.
*/
if (rval != 0) {
(void) close(fd);
efi_free(vtoc);
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "try using "
"parted(8) and then provide a specific slice: %d"), rval);
return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
}
(void) close(fd);
efi_free(vtoc);
(void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
(void) zfs_append_partition(path, MAXPATHLEN);
/* Wait to udev to signal use the device has settled. */
rval = zpool_label_disk_wait(path, DISK_LABEL_WAIT);
if (rval) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "failed to "
"detect device partitions on '%s': %d"), path, rval);
return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
}
/* We can't be to paranoid. Read the label back and verify it. */
(void) snprintf(path, sizeof (path), "%s/%s", DISK_ROOT, name);
rval = zpool_label_disk_check(path);
if (rval) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "freshly written "
"EFI label on '%s' is damaged. Ensure\nthis device "
"is not in in use, and is functioning properly: %d"),
path, rval);
return (zfs_error(hdl, EZFS_LABELFAILED, errbuf));
}
return (0);
}
/*
* Allocate and return the underlying device name for a device mapper device.
* If a device mapper device maps to multiple devices, return the first device.
*
* For example, dm_name = "/dev/dm-0" could return "/dev/sda". Symlinks to a
* DM device (like /dev/disk/by-vdev/A0) are also allowed.
*
* Returns device name, or NULL on error or no match. If dm_name is not a DM
* device then return NULL.
*
* NOTE: The returned name string must be *freed*.
*/
char *
dm_get_underlying_path(char *dm_name)
{
DIR *dp = NULL;
struct dirent *ep;
char *realp;
char *tmp = NULL;
char *path = NULL;
char *dev_str;
int size;
if (dm_name == NULL)
return (NULL);
/* dm name may be a symlink (like /dev/disk/by-vdev/A0) */
realp = realpath(dm_name, NULL);
if (realp == NULL)
return (NULL);
/*
* If they preface 'dev' with a path (like "/dev") then strip it off.
* We just want the 'dm-N' part.
*/
tmp = strrchr(realp, '/');
if (tmp != NULL)
dev_str = tmp + 1; /* +1 since we want the chr after '/' */
else
dev_str = tmp;
size = asprintf(&tmp, "/sys/block/%s/slaves/", dev_str);
if (size == -1 || !tmp)
goto end;
dp = opendir(tmp);
if (dp == NULL)
goto end;
/* Return first sd* entry in /sys/block/dm-N/slaves/ */
while ((ep = readdir(dp))) {
if (ep->d_type != DT_DIR) { /* skip "." and ".." dirs */
size = asprintf(&path, "/dev/%s", ep->d_name);
break;
}
}
end:
if (dp != NULL)
closedir(dp);
free(tmp);
free(realp);
return (path);
}
/*
* Return 1 if device is a device mapper or multipath device.
* Return 0 if not.
*/
int
zfs_dev_is_dm(char *dev_name)
{
char *tmp;
tmp = dm_get_underlying_path(dev_name);
if (tmp == NULL)
return (0);
free(tmp);
return (1);
}
/*
* By "whole disk" we mean an entire physical disk (something we can
* label, toggle the write cache on, etc.) as opposed to the full
* capacity of a pseudo-device such as lofi or did. We act as if we
* are labeling the disk, which should be a pretty good test of whether
* it's a viable device or not. Returns B_TRUE if it is and B_FALSE if
* it isn't.
*/
int
zfs_dev_is_whole_disk(char *dev_name)
{
struct dk_gpt *label;
int fd;
if ((fd = open(dev_name, O_RDONLY | O_DIRECT)) < 0)
return (0);
if (efi_alloc_and_init(fd, EFI_NUMPAR, &label) != 0) {
(void) close(fd);
return (0);
}
efi_free(label);
(void) close(fd);
return (1);
}
/*
* Lookup the underlying device for a device name
*
* Often you'll have a symlink to a device, a partition device,
* or a multipath device, and want to look up the underlying device.
* This function returns the underlying device name. If the device
* name is already the underlying device, then just return the same
* name. If the device is a DM device with multiple underlying devices
* then return the first one.
*
* For example:
*
* 1. /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001 -> ../../sda
* dev_name: /dev/disk/by-id/ata-QEMU_HARDDISK_QM00001
* returns: /dev/sda
*
* 2. /dev/mapper/mpatha (made up of /dev/sda and /dev/sdb)
* dev_name: /dev/mapper/mpatha
* returns: /dev/sda (first device)
*
* 3. /dev/sda (already the underlying device)
* dev_name: /dev/sda
* returns: /dev/sda
*
* 4. /dev/dm-3 (mapped to /dev/sda)
* dev_name: /dev/dm-3
* returns: /dev/sda
*
* 5. /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9 -> ../../sdb9
* dev_name: /dev/disk/by-id/scsi-0QEMU_drive-scsi0-0-0-0-part9
* returns: /dev/sdb
*
* 6. /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a -> ../dev/sda2
* dev_name: /dev/disk/by-uuid/5df030cf-3cd9-46e4-8e99-3ccb462a4e9a
* returns: /dev/sda
*
* Returns underlying device name, or NULL on error or no match.
*
* NOTE: The returned name string must be *freed*.
*/
char *
zfs_get_underlying_path(char *dev_name)
{
char *name = NULL;
char *tmp;
if (dev_name == NULL)
return (NULL);
tmp = dm_get_underlying_path(dev_name);
/* dev_name not a DM device, so just un-symlinkize it */
if (tmp == NULL)
tmp = realpath(dev_name, NULL);
if (tmp != NULL) {
name = zfs_strip_partition_path(tmp);
free(tmp);
}
return (name);
}
/*
* Given a dev name like "sda", return the full enclosure sysfs path to
* the disk. You can also pass in the name with "/dev" prepended
* to it (like /dev/sda).
*
* For example, disk "sda" in enclosure slot 1:
* dev: "sda"
* returns: "/sys/class/enclosure/1:0:3:0/Slot 1"
*
* 'dev' must be a non-devicemapper device.
*
* Returned string must be freed.
*/
char *
zfs_get_enclosure_sysfs_path(char *dev_name)
{
DIR *dp = NULL;
struct dirent *ep;
char buf[MAXPATHLEN];
char *tmp1 = NULL;
char *tmp2 = NULL;
char *tmp3 = NULL;
char *path = NULL;
size_t size;
int tmpsize;
if (dev_name == NULL)
return (NULL);
/* If they preface 'dev' with a path (like "/dev") then strip it off */
tmp1 = strrchr(dev_name, '/');
if (tmp1 != NULL)
dev_name = tmp1 + 1; /* +1 since we want the chr after '/' */
tmpsize = asprintf(&tmp1, "/sys/block/%s/device", dev_name);
if (tmpsize == -1 || tmp1 == NULL) {
tmp1 = NULL;
goto end;
}
dp = opendir(tmp1);
if (dp == NULL) {
tmp1 = NULL; /* To make free() at the end a NOP */
goto end;
}
/*
* Look though all sysfs entries in /sys/block/<dev>/device for
* the enclosure symlink.
*/
while ((ep = readdir(dp))) {
/* Ignore everything that's not our enclosure_device link */
if (strstr(ep->d_name, "enclosure_device") == NULL)
continue;
if (asprintf(&tmp2, "%s/%s", tmp1, ep->d_name) == -1 ||
tmp2 == NULL)
break;
size = readlink(tmp2, buf, sizeof (buf));
/* Did readlink fail or crop the link name? */
if (size == -1 || size >= sizeof (buf)) {
free(tmp2);
tmp2 = NULL; /* To make free() at the end a NOP */
break;
}
/*
* We got a valid link. readlink() doesn't terminate strings
* so we have to do it.
*/
buf[size] = '\0';
/*
* Our link will look like:
*
* "../../../../port-11:1:2/..STUFF../enclosure/1:0:3:0/SLOT 1"
*
* We want to grab the "enclosure/1:0:3:0/SLOT 1" part
*/
tmp3 = strstr(buf, "enclosure");
if (tmp3 == NULL)
break;
if (asprintf(&path, "/sys/class/%s", tmp3) == -1) {
/* If asprintf() fails, 'path' is undefined */
path = NULL;
break;
}
if (path == NULL)
break;
}
end:
free(tmp2);
free(tmp1);
if (dp != NULL)
closedir(dp);
return (path);
}
diff --git a/lib/libzfs/libzfs_util.c b/lib/libzfs/libzfs_util.c
index 12f65a90c1bb..855c72fab3ff 100644
--- a/lib/libzfs/libzfs_util.c
+++ b/lib/libzfs/libzfs_util.c
@@ -1,2172 +1,2180 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2018, Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2014 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
* Copyright (c) 2017 Datto Inc.
*/
/*
* Internal utility routines for the ZFS library.
*/
#include <errno.h>
#include <fcntl.h>
#include <libintl.h>
#include <stdarg.h>
#include <stdio.h>
#include <stdlib.h>
#include <strings.h>
#include <unistd.h>
#include <ctype.h>
#include <math.h>
#include <sys/stat.h>
#include <sys/mnttab.h>
#include <sys/mntent.h>
#include <sys/types.h>
#include <sys/wait.h>
#include <libzfs.h>
#include <libzfs_core.h>
#include "libzfs_impl.h"
#include "zfs_prop.h"
#include "zfeature_common.h"
#include <zfs_fletcher.h>
int
libzfs_errno(libzfs_handle_t *hdl)
{
return (hdl->libzfs_error);
}
const char *
libzfs_error_init(int error)
{
switch (error) {
case ENXIO:
return (dgettext(TEXT_DOMAIN, "The ZFS modules are not "
"loaded.\nTry running '/sbin/modprobe zfs' as root "
"to load them.\n"));
case ENOENT:
return (dgettext(TEXT_DOMAIN, "/dev/zfs and /proc/self/mounts "
"are required.\nTry running 'udevadm trigger' and 'mount "
"-t proc proc /proc' as root.\n"));
case ENOEXEC:
return (dgettext(TEXT_DOMAIN, "The ZFS modules cannot be "
"auto-loaded.\nTry running '/sbin/modprobe zfs' as "
"root to manually load them.\n"));
case EACCES:
return (dgettext(TEXT_DOMAIN, "Permission denied the "
"ZFS utilities must be run as root.\n"));
default:
return (dgettext(TEXT_DOMAIN, "Failed to initialize the "
"libzfs library.\n"));
}
}
const char *
libzfs_error_action(libzfs_handle_t *hdl)
{
return (hdl->libzfs_action);
}
const char *
libzfs_error_description(libzfs_handle_t *hdl)
{
if (hdl->libzfs_desc[0] != '\0')
return (hdl->libzfs_desc);
switch (hdl->libzfs_error) {
case EZFS_NOMEM:
return (dgettext(TEXT_DOMAIN, "out of memory"));
case EZFS_BADPROP:
return (dgettext(TEXT_DOMAIN, "invalid property value"));
case EZFS_PROPREADONLY:
return (dgettext(TEXT_DOMAIN, "read-only property"));
case EZFS_PROPTYPE:
return (dgettext(TEXT_DOMAIN, "property doesn't apply to "
"datasets of this type"));
case EZFS_PROPNONINHERIT:
return (dgettext(TEXT_DOMAIN, "property cannot be inherited"));
case EZFS_PROPSPACE:
return (dgettext(TEXT_DOMAIN, "invalid quota or reservation"));
case EZFS_BADTYPE:
return (dgettext(TEXT_DOMAIN, "operation not applicable to "
"datasets of this type"));
case EZFS_BUSY:
return (dgettext(TEXT_DOMAIN, "pool or dataset is busy"));
case EZFS_EXISTS:
return (dgettext(TEXT_DOMAIN, "pool or dataset exists"));
case EZFS_NOENT:
return (dgettext(TEXT_DOMAIN, "no such pool or dataset"));
case EZFS_BADSTREAM:
return (dgettext(TEXT_DOMAIN, "invalid backup stream"));
case EZFS_DSREADONLY:
return (dgettext(TEXT_DOMAIN, "dataset is read-only"));
case EZFS_VOLTOOBIG:
return (dgettext(TEXT_DOMAIN, "volume size exceeds limit for "
"this system"));
case EZFS_INVALIDNAME:
return (dgettext(TEXT_DOMAIN, "invalid name"));
case EZFS_BADRESTORE:
return (dgettext(TEXT_DOMAIN, "unable to restore to "
"destination"));
case EZFS_BADBACKUP:
return (dgettext(TEXT_DOMAIN, "backup failed"));
case EZFS_BADTARGET:
return (dgettext(TEXT_DOMAIN, "invalid target vdev"));
case EZFS_NODEVICE:
return (dgettext(TEXT_DOMAIN, "no such device in pool"));
case EZFS_BADDEV:
return (dgettext(TEXT_DOMAIN, "invalid device"));
case EZFS_NOREPLICAS:
return (dgettext(TEXT_DOMAIN, "no valid replicas"));
case EZFS_RESILVERING:
return (dgettext(TEXT_DOMAIN, "currently resilvering"));
case EZFS_BADVERSION:
return (dgettext(TEXT_DOMAIN, "unsupported version or "
"feature"));
case EZFS_POOLUNAVAIL:
return (dgettext(TEXT_DOMAIN, "pool is unavailable"));
case EZFS_DEVOVERFLOW:
return (dgettext(TEXT_DOMAIN, "too many devices in one vdev"));
case EZFS_BADPATH:
return (dgettext(TEXT_DOMAIN, "must be an absolute path"));
case EZFS_CROSSTARGET:
return (dgettext(TEXT_DOMAIN, "operation crosses datasets or "
"pools"));
case EZFS_ZONED:
return (dgettext(TEXT_DOMAIN, "dataset in use by local zone"));
case EZFS_MOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "mount failed"));
case EZFS_UMOUNTFAILED:
return (dgettext(TEXT_DOMAIN, "umount failed"));
case EZFS_UNSHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "unshare(1M) failed"));
case EZFS_SHARENFSFAILED:
return (dgettext(TEXT_DOMAIN, "share(1M) failed"));
case EZFS_UNSHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "smb remove share failed"));
case EZFS_SHARESMBFAILED:
return (dgettext(TEXT_DOMAIN, "smb add share failed"));
case EZFS_PERM:
return (dgettext(TEXT_DOMAIN, "permission denied"));
case EZFS_NOSPC:
return (dgettext(TEXT_DOMAIN, "out of space"));
case EZFS_FAULT:
return (dgettext(TEXT_DOMAIN, "bad address"));
case EZFS_IO:
return (dgettext(TEXT_DOMAIN, "I/O error"));
case EZFS_INTR:
return (dgettext(TEXT_DOMAIN, "signal received"));
case EZFS_ISSPARE:
return (dgettext(TEXT_DOMAIN, "device is reserved as a hot "
"spare"));
case EZFS_INVALCONFIG:
return (dgettext(TEXT_DOMAIN, "invalid vdev configuration"));
case EZFS_RECURSIVE:
return (dgettext(TEXT_DOMAIN, "recursive dataset dependency"));
case EZFS_NOHISTORY:
return (dgettext(TEXT_DOMAIN, "no history available"));
case EZFS_POOLPROPS:
return (dgettext(TEXT_DOMAIN, "failed to retrieve "
"pool properties"));
case EZFS_POOL_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this type of pool"));
case EZFS_POOL_INVALARG:
return (dgettext(TEXT_DOMAIN, "invalid argument for "
"this pool operation"));
case EZFS_NAMETOOLONG:
return (dgettext(TEXT_DOMAIN, "dataset name is too long"));
case EZFS_OPENFAILED:
return (dgettext(TEXT_DOMAIN, "open failed"));
case EZFS_NOCAP:
return (dgettext(TEXT_DOMAIN,
"disk capacity information could not be retrieved"));
case EZFS_LABELFAILED:
return (dgettext(TEXT_DOMAIN, "write of label failed"));
case EZFS_BADWHO:
return (dgettext(TEXT_DOMAIN, "invalid user/group"));
case EZFS_BADPERM:
return (dgettext(TEXT_DOMAIN, "invalid permission"));
case EZFS_BADPERMSET:
return (dgettext(TEXT_DOMAIN, "invalid permission set name"));
case EZFS_NODELEGATION:
return (dgettext(TEXT_DOMAIN, "delegated administration is "
"disabled on pool"));
case EZFS_BADCACHE:
return (dgettext(TEXT_DOMAIN, "invalid or missing cache file"));
case EZFS_ISL2CACHE:
return (dgettext(TEXT_DOMAIN, "device is in use as a cache"));
case EZFS_VDEVNOTSUP:
return (dgettext(TEXT_DOMAIN, "vdev specification is not "
"supported"));
case EZFS_NOTSUP:
return (dgettext(TEXT_DOMAIN, "operation not supported "
"on this dataset"));
case EZFS_ACTIVE_SPARE:
return (dgettext(TEXT_DOMAIN, "pool has active shared spare "
"device"));
case EZFS_UNPLAYED_LOGS:
return (dgettext(TEXT_DOMAIN, "log device has unplayed intent "
"logs"));
case EZFS_REFTAG_RELE:
return (dgettext(TEXT_DOMAIN, "no such tag on this dataset"));
case EZFS_REFTAG_HOLD:
return (dgettext(TEXT_DOMAIN, "tag already exists on this "
"dataset"));
case EZFS_TAGTOOLONG:
return (dgettext(TEXT_DOMAIN, "tag too long"));
case EZFS_PIPEFAILED:
return (dgettext(TEXT_DOMAIN, "pipe create failed"));
case EZFS_THREADCREATEFAILED:
return (dgettext(TEXT_DOMAIN, "thread create failed"));
case EZFS_POSTSPLIT_ONLINE:
return (dgettext(TEXT_DOMAIN, "disk was split from this pool "
"into a new one"));
case EZFS_SCRUB_PAUSED:
return (dgettext(TEXT_DOMAIN, "scrub is paused; "
"use 'zpool scrub' to resume"));
case EZFS_SCRUBBING:
return (dgettext(TEXT_DOMAIN, "currently scrubbing; "
"use 'zpool scrub -s' to cancel current scrub"));
case EZFS_NO_SCRUB:
return (dgettext(TEXT_DOMAIN, "there is no active scrub"));
case EZFS_DIFF:
return (dgettext(TEXT_DOMAIN, "unable to generate diffs"));
case EZFS_DIFFDATA:
return (dgettext(TEXT_DOMAIN, "invalid diff data"));
case EZFS_POOLREADONLY:
return (dgettext(TEXT_DOMAIN, "pool is read-only"));
+ case EZFS_NO_PENDING:
+ return (dgettext(TEXT_DOMAIN, "operation is not "
+ "in progress"));
case EZFS_ACTIVE_POOL:
return (dgettext(TEXT_DOMAIN, "pool is imported on a "
"different host"));
case EZFS_CRYPTOFAILED:
return (dgettext(TEXT_DOMAIN, "encryption failure"));
case EZFS_UNKNOWN:
return (dgettext(TEXT_DOMAIN, "unknown error"));
default:
assert(hdl->libzfs_error == 0);
return (dgettext(TEXT_DOMAIN, "no error"));
}
}
/*PRINTFLIKE2*/
void
zfs_error_aux(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
(void) vsnprintf(hdl->libzfs_desc, sizeof (hdl->libzfs_desc),
fmt, ap);
hdl->libzfs_desc_active = 1;
va_end(ap);
}
static void
zfs_verror(libzfs_handle_t *hdl, int error, const char *fmt, va_list ap)
{
(void) vsnprintf(hdl->libzfs_action, sizeof (hdl->libzfs_action),
fmt, ap);
hdl->libzfs_error = error;
if (hdl->libzfs_desc_active)
hdl->libzfs_desc_active = 0;
else
hdl->libzfs_desc[0] = '\0';
if (hdl->libzfs_printerr) {
if (error == EZFS_UNKNOWN) {
(void) fprintf(stderr, dgettext(TEXT_DOMAIN, "internal "
"error: %s\n"), libzfs_error_description(hdl));
abort();
}
(void) fprintf(stderr, "%s: %s\n", hdl->libzfs_action,
libzfs_error_description(hdl));
if (error == EZFS_NOMEM)
exit(1);
}
}
int
zfs_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_error_fmt(hdl, error, "%s", msg));
}
/*PRINTFLIKE3*/
int
zfs_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
zfs_verror(hdl, error, fmt, ap);
va_end(ap);
return (-1);
}
static int
zfs_common_error(libzfs_handle_t *hdl, int error, const char *fmt,
va_list ap)
{
switch (error) {
case EPERM:
case EACCES:
zfs_verror(hdl, EZFS_PERM, fmt, ap);
return (-1);
case ECANCELED:
zfs_verror(hdl, EZFS_NODELEGATION, fmt, ap);
return (-1);
case EIO:
zfs_verror(hdl, EZFS_IO, fmt, ap);
return (-1);
case EFAULT:
zfs_verror(hdl, EZFS_FAULT, fmt, ap);
return (-1);
case EINTR:
zfs_verror(hdl, EZFS_INTR, fmt, ap);
return (-1);
}
return (0);
}
int
zfs_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zfs_standard_error_fmt(hdl, error, "%s", msg));
}
/*PRINTFLIKE3*/
int
zfs_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENXIO:
case ENODEV:
case EPIPE:
zfs_verror(hdl, EZFS_IO, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset does not exist"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"dataset is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_NAMETOOLONG, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_BADVERSION, fmt, ap);
break;
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
default:
zfs_error_aux(hdl, strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
break;
}
va_end(ap);
return (-1);
}
int
zpool_standard_error(libzfs_handle_t *hdl, int error, const char *msg)
{
return (zpool_standard_error_fmt(hdl, error, "%s", msg));
}
/*PRINTFLIKE3*/
int
zpool_standard_error_fmt(libzfs_handle_t *hdl, int error, const char *fmt, ...)
{
va_list ap;
va_start(ap, fmt);
if (zfs_common_error(hdl, error, fmt, ap) != 0) {
va_end(ap);
return (-1);
}
switch (error) {
case ENODEV:
zfs_verror(hdl, EZFS_NODEVICE, fmt, ap);
break;
case ENOENT:
zfs_error_aux(hdl,
dgettext(TEXT_DOMAIN, "no such pool or dataset"));
zfs_verror(hdl, EZFS_NOENT, fmt, ap);
break;
case EEXIST:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool already exists"));
zfs_verror(hdl, EZFS_EXISTS, fmt, ap);
break;
case EBUSY:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN, "pool is busy"));
zfs_verror(hdl, EZFS_BUSY, fmt, ap);
break;
+ /* There is no pending operation to cancel */
+ case ENOTACTIVE:
+ zfs_verror(hdl, EZFS_NO_PENDING, fmt, ap);
+ break;
+
case ENXIO:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"one or more devices is currently unavailable"));
zfs_verror(hdl, EZFS_BADDEV, fmt, ap);
break;
case ENAMETOOLONG:
zfs_verror(hdl, EZFS_DEVOVERFLOW, fmt, ap);
break;
case ENOTSUP:
zfs_verror(hdl, EZFS_POOL_NOTSUP, fmt, ap);
break;
case EINVAL:
zfs_verror(hdl, EZFS_POOL_INVALARG, fmt, ap);
break;
case ENOSPC:
case EDQUOT:
zfs_verror(hdl, EZFS_NOSPC, fmt, ap);
return (-1);
case EAGAIN:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"pool I/O is currently suspended"));
zfs_verror(hdl, EZFS_POOLUNAVAIL, fmt, ap);
break;
case EROFS:
zfs_verror(hdl, EZFS_POOLREADONLY, fmt, ap);
break;
case EDOM:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"block size out of range or does not match"));
zfs_verror(hdl, EZFS_BADPROP, fmt, ap);
break;
case EREMOTEIO:
zfs_verror(hdl, EZFS_ACTIVE_POOL, fmt, ap);
break;
default:
zfs_error_aux(hdl, strerror(error));
zfs_verror(hdl, EZFS_UNKNOWN, fmt, ap);
}
va_end(ap);
return (-1);
}
/*
* Display an out of memory error message and abort the current program.
*/
int
no_memory(libzfs_handle_t *hdl)
{
return (zfs_error(hdl, EZFS_NOMEM, "internal error"));
}
/*
* A safe form of malloc() which will die if the allocation fails.
*/
void *
zfs_alloc(libzfs_handle_t *hdl, size_t size)
{
void *data;
if ((data = calloc(1, size)) == NULL)
(void) no_memory(hdl);
return (data);
}
/*
* A safe form of asprintf() which will die if the allocation fails.
*/
/*PRINTFLIKE2*/
char *
zfs_asprintf(libzfs_handle_t *hdl, const char *fmt, ...)
{
va_list ap;
char *ret;
int err;
va_start(ap, fmt);
err = vasprintf(&ret, fmt, ap);
va_end(ap);
if (err < 0)
(void) no_memory(hdl);
return (ret);
}
/*
* A safe form of realloc(), which also zeroes newly allocated space.
*/
void *
zfs_realloc(libzfs_handle_t *hdl, void *ptr, size_t oldsize, size_t newsize)
{
void *ret;
if ((ret = realloc(ptr, newsize)) == NULL) {
(void) no_memory(hdl);
return (NULL);
}
bzero((char *)ret + oldsize, (newsize - oldsize));
return (ret);
}
/*
* A safe form of strdup() which will die if the allocation fails.
*/
char *
zfs_strdup(libzfs_handle_t *hdl, const char *str)
{
char *ret;
if ((ret = strdup(str)) == NULL)
(void) no_memory(hdl);
return (ret);
}
/*
* Convert a number to an appropriately human-readable output.
*/
void
zfs_nicenum_format(uint64_t num, char *buf, size_t buflen,
enum zfs_nicenum_format format)
{
uint64_t n = num;
int index = 0;
const char *u;
const char *units[3][7] = {
[ZFS_NICENUM_1024] = {"", "K", "M", "G", "T", "P", "E"},
[ZFS_NICENUM_BYTES] = {"B", "K", "M", "G", "T", "P", "E"},
[ZFS_NICENUM_TIME] = {"ns", "us", "ms", "s", "?", "?", "?"}
};
const int units_len[] = {[ZFS_NICENUM_1024] = 6,
[ZFS_NICENUM_BYTES] = 6,
[ZFS_NICENUM_TIME] = 4};
const int k_unit[] = { [ZFS_NICENUM_1024] = 1024,
[ZFS_NICENUM_BYTES] = 1024,
[ZFS_NICENUM_TIME] = 1000};
double val;
if (format == ZFS_NICENUM_RAW) {
snprintf(buf, buflen, "%llu", (u_longlong_t)num);
return;
} else if (format == ZFS_NICENUM_RAWTIME && num > 0) {
snprintf(buf, buflen, "%llu", (u_longlong_t)num);
return;
} else if (format == ZFS_NICENUM_RAWTIME && num == 0) {
snprintf(buf, buflen, "%s", "-");
return;
}
while (n >= k_unit[format] && index < units_len[format]) {
n /= k_unit[format];
index++;
}
u = units[format][index];
/* Don't print zero latencies since they're invalid */
if ((format == ZFS_NICENUM_TIME) && (num == 0)) {
(void) snprintf(buf, buflen, "-");
} else if ((index == 0) || ((num %
(uint64_t)powl(k_unit[format], index)) == 0)) {
/*
* If this is an even multiple of the base, always display
* without any decimal precision.
*/
(void) snprintf(buf, buflen, "%llu%s", (u_longlong_t)n, u);
} else {
/*
* We want to choose a precision that reflects the best choice
* for fitting in 5 characters. This can get rather tricky when
* we have numbers that are very close to an order of magnitude.
* For example, when displaying 10239 (which is really 9.999K),
* we want only a single place of precision for 10.0K. We could
* develop some complex heuristics for this, but it's much
* easier just to try each combination in turn.
*/
int i;
for (i = 2; i >= 0; i--) {
val = (double)num /
(uint64_t)powl(k_unit[format], index);
/*
* Don't print floating point values for time. Note,
* we use floor() instead of round() here, since
* round can result in undesirable results. For
* example, if "num" is in the range of
* 999500-999999, it will print out "1000us". This
* doesn't happen if we use floor().
*/
if (format == ZFS_NICENUM_TIME) {
if (snprintf(buf, buflen, "%d%s",
(unsigned int) floor(val), u) <= 5)
break;
} else {
if (snprintf(buf, buflen, "%.*f%s", i,
val, u) <= 5)
break;
}
}
}
}
/*
* Convert a number to an appropriately human-readable output.
*/
void
zfs_nicenum(uint64_t num, char *buf, size_t buflen)
{
zfs_nicenum_format(num, buf, buflen, ZFS_NICENUM_1024);
}
/*
* Convert a time to an appropriately human-readable output.
* @num: Time in nanoseconds
*/
void
zfs_nicetime(uint64_t num, char *buf, size_t buflen)
{
zfs_nicenum_format(num, buf, buflen, ZFS_NICENUM_TIME);
}
/*
* Print out a raw number with correct column spacing
*/
void
zfs_niceraw(uint64_t num, char *buf, size_t buflen)
{
zfs_nicenum_format(num, buf, buflen, ZFS_NICENUM_RAW);
}
/*
* Convert a number of bytes to an appropriately human-readable output.
*/
void
zfs_nicebytes(uint64_t num, char *buf, size_t buflen)
{
zfs_nicenum_format(num, buf, buflen, ZFS_NICENUM_BYTES);
}
void
libzfs_print_on_error(libzfs_handle_t *hdl, boolean_t printerr)
{
hdl->libzfs_printerr = printerr;
}
static int
libzfs_module_loaded(const char *module)
{
const char path_prefix[] = "/sys/module/";
char path[256];
memcpy(path, path_prefix, sizeof (path_prefix) - 1);
strcpy(path + sizeof (path_prefix) - 1, module);
return (access(path, F_OK) == 0);
}
/*
* Read lines from an open file descriptor and store them in an array of
* strings until EOF. lines[] will be allocated and populated with all the
* lines read. All newlines are replaced with NULL terminators for
* convenience. lines[] must be freed after use with libzfs_free_str_array().
*
* Returns the number of lines read.
*/
static int
libzfs_read_stdout_from_fd(int fd, char **lines[])
{
FILE *fp;
int lines_cnt = 0;
size_t len = 0;
char *line = NULL;
char **tmp_lines = NULL, **tmp;
char *nl = NULL;
int rc;
fp = fdopen(fd, "r");
if (fp == NULL)
return (0);
while (1) {
rc = getline(&line, &len, fp);
if (rc == -1)
break;
tmp = realloc(tmp_lines, sizeof (*tmp_lines) * (lines_cnt + 1));
if (tmp == NULL) {
/* Return the lines we were able to process */
break;
}
tmp_lines = tmp;
/* Terminate newlines */
if ((nl = strchr(line, '\n')) != NULL)
*nl = '\0';
tmp_lines[lines_cnt] = line;
lines_cnt++;
line = NULL;
}
fclose(fp);
*lines = tmp_lines;
return (lines_cnt);
}
static int
libzfs_run_process_impl(const char *path, char *argv[], char *env[], int flags,
char **lines[], int *lines_cnt)
{
pid_t pid;
int error, devnull_fd;
int link[2];
/*
* Setup a pipe between our child and parent process if we're
* reading stdout.
*/
if ((lines != NULL) && pipe(link) == -1)
return (-ESTRPIPE);
pid = vfork();
if (pid == 0) {
/* Child process */
devnull_fd = open("/dev/null", O_WRONLY);
if (devnull_fd < 0)
_exit(-1);
if (!(flags & STDOUT_VERBOSE) && (lines == NULL))
(void) dup2(devnull_fd, STDOUT_FILENO);
else if (lines != NULL) {
/* Save the output to lines[] */
dup2(link[1], STDOUT_FILENO);
close(link[0]);
close(link[1]);
}
if (!(flags & STDERR_VERBOSE))
(void) dup2(devnull_fd, STDERR_FILENO);
close(devnull_fd);
if (flags & NO_DEFAULT_PATH) {
if (env == NULL)
execv(path, argv);
else
execve(path, argv, env);
} else {
if (env == NULL)
execvp(path, argv);
else
execvpe(path, argv, env);
}
_exit(-1);
} else if (pid > 0) {
/* Parent process */
int status;
while ((error = waitpid(pid, &status, 0)) == -1 &&
errno == EINTR) { }
if (error < 0 || !WIFEXITED(status))
return (-1);
if (lines != NULL) {
close(link[1]);
*lines_cnt = libzfs_read_stdout_from_fd(link[0], lines);
}
return (WEXITSTATUS(status));
}
return (-1);
}
int
libzfs_run_process(const char *path, char *argv[], int flags)
{
return (libzfs_run_process_impl(path, argv, NULL, flags, NULL, NULL));
}
/*
* Run a command and store its stdout lines in an array of strings (lines[]).
* lines[] is allocated and populated for you, and the number of lines is set in
* lines_cnt. lines[] must be freed after use with libzfs_free_str_array().
* All newlines (\n) in lines[] are terminated for convenience.
*/
int
libzfs_run_process_get_stdout(const char *path, char *argv[], char *env[],
char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, 0, lines, lines_cnt));
}
/*
* Same as libzfs_run_process_get_stdout(), but run without $PATH set. This
* means that *path needs to be the full path to the executable.
*/
int
libzfs_run_process_get_stdout_nopath(const char *path, char *argv[],
char *env[], char **lines[], int *lines_cnt)
{
return (libzfs_run_process_impl(path, argv, env, NO_DEFAULT_PATH,
lines, lines_cnt));
}
/*
* Free an array of strings. Free both the strings contained in the array and
* the array itself.
*/
void
libzfs_free_str_array(char **strs, int count)
{
while (--count >= 0)
free(strs[count]);
free(strs);
}
/*
* Returns 1 if environment variable is set to "YES", "yes", "ON", "on", or
* a non-zero number.
*
* Returns 0 otherwise.
*/
int
libzfs_envvar_is_set(char *envvar)
{
char *env = getenv(envvar);
if (env && (strtoul(env, NULL, 0) > 0 ||
(!strncasecmp(env, "YES", 3) && strnlen(env, 4) == 3) ||
(!strncasecmp(env, "ON", 2) && strnlen(env, 3) == 2)))
return (1);
return (0);
}
/*
* Verify the required ZFS_DEV device is available and optionally attempt
* to load the ZFS modules. Under normal circumstances the modules
* should already have been loaded by some external mechanism.
*
* Environment variables:
* - ZFS_MODULE_LOADING="YES|yes|ON|on" - Attempt to load modules.
* - ZFS_MODULE_TIMEOUT="<seconds>" - Seconds to wait for ZFS_DEV
*/
static int
libzfs_load_module(const char *module)
{
char *argv[4] = {"/sbin/modprobe", "-q", (char *)module, (char *)0};
char *load_str, *timeout_str;
long timeout = 10; /* seconds */
long busy_timeout = 10; /* milliseconds */
int load = 0, fd;
hrtime_t start;
/* Optionally request module loading */
if (!libzfs_module_loaded(module)) {
load_str = getenv("ZFS_MODULE_LOADING");
if (load_str) {
if (!strncasecmp(load_str, "YES", strlen("YES")) ||
!strncasecmp(load_str, "ON", strlen("ON")))
load = 1;
else
load = 0;
}
if (load) {
if (libzfs_run_process("/sbin/modprobe", argv, 0))
return (ENOEXEC);
if (!libzfs_module_loaded(module))
return (ENXIO);
}
}
/*
* Device creation by udev is asynchronous and waiting may be
* required. Busy wait for 10ms and then fall back to polling every
* 10ms for the allowed timeout (default 10s, max 10m). This is
* done to optimize for the common case where the device is
* immediately available and to avoid penalizing the possible
* case where udev is slow or unable to create the device.
*/
timeout_str = getenv("ZFS_MODULE_TIMEOUT");
if (timeout_str) {
timeout = strtol(timeout_str, NULL, 0);
timeout = MAX(MIN(timeout, (10 * 60)), 0); /* 0 <= N <= 600 */
}
start = gethrtime();
do {
fd = open(ZFS_DEV, O_RDWR);
if (fd >= 0) {
(void) close(fd);
return (0);
} else if (errno != ENOENT) {
return (errno);
} else if (NSEC2MSEC(gethrtime() - start) < busy_timeout) {
sched_yield();
} else {
usleep(10 * MILLISEC);
}
} while (NSEC2MSEC(gethrtime() - start) < (timeout * MILLISEC));
return (ENOENT);
}
libzfs_handle_t *
libzfs_init(void)
{
libzfs_handle_t *hdl;
int error;
error = libzfs_load_module(ZFS_DRIVER);
if (error) {
errno = error;
return (NULL);
}
if ((hdl = calloc(1, sizeof (libzfs_handle_t))) == NULL) {
return (NULL);
}
if ((hdl->libzfs_fd = open(ZFS_DEV, O_RDWR)) < 0) {
free(hdl);
return (NULL);
}
#ifdef HAVE_SETMNTENT
if ((hdl->libzfs_mnttab = setmntent(MNTTAB, "r")) == NULL) {
#else
if ((hdl->libzfs_mnttab = fopen(MNTTAB, "r")) == NULL) {
#endif
(void) close(hdl->libzfs_fd);
free(hdl);
return (NULL);
}
hdl->libzfs_sharetab = fopen(ZFS_SHARETAB, "r");
if (libzfs_core_init() != 0) {
(void) close(hdl->libzfs_fd);
(void) fclose(hdl->libzfs_mnttab);
if (hdl->libzfs_sharetab)
(void) fclose(hdl->libzfs_sharetab);
free(hdl);
return (NULL);
}
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
libzfs_mnttab_init(hdl);
fletcher_4_init();
if (getenv("ZFS_PROP_DEBUG") != NULL) {
hdl->libzfs_prop_debug = B_TRUE;
}
return (hdl);
}
void
libzfs_fini(libzfs_handle_t *hdl)
{
(void) close(hdl->libzfs_fd);
if (hdl->libzfs_mnttab)
#ifdef HAVE_SETMNTENT
(void) endmntent(hdl->libzfs_mnttab);
#else
(void) fclose(hdl->libzfs_mnttab);
#endif
if (hdl->libzfs_sharetab)
(void) fclose(hdl->libzfs_sharetab);
zfs_uninit_libshare(hdl);
zpool_free_handles(hdl);
namespace_clear(hdl);
libzfs_mnttab_fini(hdl);
libzfs_core_fini();
fletcher_4_fini();
free(hdl);
}
libzfs_handle_t *
zpool_get_handle(zpool_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
libzfs_handle_t *
zfs_get_handle(zfs_handle_t *zhp)
{
return (zhp->zfs_hdl);
}
zpool_handle_t *
zfs_get_pool_handle(const zfs_handle_t *zhp)
{
return (zhp->zpool_hdl);
}
/*
* Given a name, determine whether or not it's a valid path
* (starts with '/' or "./"). If so, walk the mnttab trying
* to match the device number. If not, treat the path as an
* fs/vol/snap/bkmark name.
*/
zfs_handle_t *
zfs_path_to_zhandle(libzfs_handle_t *hdl, char *path, zfs_type_t argtype)
{
struct stat64 statbuf;
struct extmnttab entry;
int ret;
if (path[0] != '/' && strncmp(path, "./", strlen("./")) != 0) {
/*
* It's not a valid path, assume it's a name of type 'argtype'.
*/
return (zfs_open(hdl, path, argtype));
}
if (stat64(path, &statbuf) != 0) {
(void) fprintf(stderr, "%s: %s\n", path, strerror(errno));
return (NULL);
}
/* Reopen MNTTAB to prevent reading stale data from open file */
if (freopen(MNTTAB, "r", hdl->libzfs_mnttab) == NULL)
return (NULL);
while ((ret = getextmntent(hdl->libzfs_mnttab, &entry, 0)) == 0) {
if (makedevice(entry.mnt_major, entry.mnt_minor) ==
statbuf.st_dev) {
break;
}
}
if (ret != 0) {
return (NULL);
}
if (strcmp(entry.mnt_fstype, MNTTYPE_ZFS) != 0) {
(void) fprintf(stderr, gettext("'%s': not a ZFS filesystem\n"),
path);
return (NULL);
}
return (zfs_open(hdl, entry.mnt_special, ZFS_TYPE_FILESYSTEM));
}
/*
* Append partition suffix to an otherwise fully qualified device path.
* This is used to generate the name the full path as its stored in
* ZPOOL_CONFIG_PATH for whole disk devices. On success the new length
* of 'path' will be returned on error a negative value is returned.
*/
int
zfs_append_partition(char *path, size_t max_len)
{
int len = strlen(path);
if ((strncmp(path, UDISK_ROOT, strlen(UDISK_ROOT)) == 0) ||
(strncmp(path, ZVOL_ROOT, strlen(ZVOL_ROOT)) == 0)) {
if (len + 6 >= max_len)
return (-1);
(void) strcat(path, "-part1");
len += 6;
} else {
if (len + 2 >= max_len)
return (-1);
if (isdigit(path[len-1])) {
(void) strcat(path, "p1");
len += 2;
} else {
(void) strcat(path, "1");
len += 1;
}
}
return (len);
}
/*
* Given a shorthand device name check if a file by that name exists in any
* of the 'zpool_default_import_path' or ZPOOL_IMPORT_PATH directories. If
* one is found, store its fully qualified path in the 'path' buffer passed
* by the caller and return 0, otherwise return an error.
*/
int
zfs_resolve_shortname(const char *name, char *path, size_t len)
{
int i, error = -1;
char *dir, *env, *envdup;
env = getenv("ZPOOL_IMPORT_PATH");
errno = ENOENT;
if (env) {
envdup = strdup(env);
dir = strtok(envdup, ":");
while (dir && error) {
(void) snprintf(path, len, "%s/%s", dir, name);
error = access(path, F_OK);
dir = strtok(NULL, ":");
}
free(envdup);
} else {
for (i = 0; i < DEFAULT_IMPORT_PATH_SIZE && error < 0; i++) {
(void) snprintf(path, len, "%s/%s",
zpool_default_import_path[i], name);
error = access(path, F_OK);
}
}
return (error ? ENOENT : 0);
}
/*
* Given a shorthand device name look for a match against 'cmp_name'. This
* is done by checking all prefix expansions using either the default
* 'zpool_default_import_paths' or the ZPOOL_IMPORT_PATH environment
* variable. Proper partition suffixes will be appended if this is a
* whole disk. When a match is found 0 is returned otherwise ENOENT.
*/
static int
zfs_strcmp_shortname(char *name, char *cmp_name, int wholedisk)
{
int path_len, cmp_len, i = 0, error = ENOENT;
char *dir, *env, *envdup = NULL;
char path_name[MAXPATHLEN];
cmp_len = strlen(cmp_name);
env = getenv("ZPOOL_IMPORT_PATH");
if (env) {
envdup = strdup(env);
dir = strtok(envdup, ":");
} else {
dir = zpool_default_import_path[i];
}
while (dir) {
/* Trim trailing directory slashes from ZPOOL_IMPORT_PATH */
while (dir[strlen(dir)-1] == '/')
dir[strlen(dir)-1] = '\0';
path_len = snprintf(path_name, MAXPATHLEN, "%s/%s", dir, name);
if (wholedisk)
path_len = zfs_append_partition(path_name, MAXPATHLEN);
if ((path_len == cmp_len) && strcmp(path_name, cmp_name) == 0) {
error = 0;
break;
}
if (env) {
dir = strtok(NULL, ":");
} else if (++i < DEFAULT_IMPORT_PATH_SIZE) {
dir = zpool_default_import_path[i];
} else {
dir = NULL;
}
}
if (env)
free(envdup);
return (error);
}
/*
* Given either a shorthand or fully qualified path name look for a match
* against 'cmp'. The passed name will be expanded as needed for comparison
* purposes and redundant slashes stripped to ensure an accurate match.
*/
int
zfs_strcmp_pathname(char *name, char *cmp, int wholedisk)
{
int path_len, cmp_len;
char path_name[MAXPATHLEN];
char cmp_name[MAXPATHLEN];
char *dir, *dup;
/* Strip redundant slashes if one exists due to ZPOOL_IMPORT_PATH */
memset(cmp_name, 0, MAXPATHLEN);
dup = strdup(cmp);
dir = strtok(dup, "/");
while (dir) {
strlcat(cmp_name, "/", sizeof (cmp_name));
strlcat(cmp_name, dir, sizeof (cmp_name));
dir = strtok(NULL, "/");
}
free(dup);
if (name[0] != '/')
return (zfs_strcmp_shortname(name, cmp_name, wholedisk));
(void) strlcpy(path_name, name, MAXPATHLEN);
path_len = strlen(path_name);
cmp_len = strlen(cmp_name);
if (wholedisk) {
path_len = zfs_append_partition(path_name, MAXPATHLEN);
if (path_len == -1)
return (ENOMEM);
}
if ((path_len != cmp_len) || strcmp(path_name, cmp_name))
return (ENOENT);
return (0);
}
/*
* Given a full path to a device determine if that device appears in the
* import search path. If it does return the first match and store the
* index in the passed 'order' variable, otherwise return an error.
*/
int
zfs_path_order(char *name, int *order)
{
int i = 0, error = ENOENT;
char *dir, *env, *envdup;
env = getenv("ZPOOL_IMPORT_PATH");
if (env) {
envdup = strdup(env);
dir = strtok(envdup, ":");
while (dir) {
if (strncmp(name, dir, strlen(dir)) == 0) {
*order = i;
error = 0;
break;
}
dir = strtok(NULL, ":");
i++;
}
free(envdup);
} else {
for (i = 0; i < DEFAULT_IMPORT_PATH_SIZE; i++) {
if (strncmp(name, zpool_default_import_path[i],
strlen(zpool_default_import_path[i])) == 0) {
*order = i;
error = 0;
break;
}
}
}
return (error);
}
/*
* Initialize the zc_nvlist_dst member to prepare for receiving an nvlist from
* an ioctl().
*/
int
zcmd_alloc_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, size_t len)
{
if (len == 0)
len = 16 * 1024;
zc->zc_nvlist_dst_size = len;
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called when an ioctl() which returns an nvlist fails with ENOMEM. This will
* expand the nvlist to the size specified in 'zc_nvlist_dst_size', which was
* filled in by the kernel to indicate the actual required size.
*/
int
zcmd_expand_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_dst =
(uint64_t)(uintptr_t)zfs_alloc(hdl, zc->zc_nvlist_dst_size);
if (zc->zc_nvlist_dst == 0)
return (-1);
return (0);
}
/*
* Called to free the src and dst nvlists stored in the command structure.
*/
void
zcmd_free_nvlists(zfs_cmd_t *zc)
{
free((void *)(uintptr_t)zc->zc_nvlist_conf);
free((void *)(uintptr_t)zc->zc_nvlist_src);
free((void *)(uintptr_t)zc->zc_nvlist_dst);
zc->zc_nvlist_conf = 0;
zc->zc_nvlist_src = 0;
zc->zc_nvlist_dst = 0;
}
static int
zcmd_write_nvlist_com(libzfs_handle_t *hdl, uint64_t *outnv, uint64_t *outlen,
nvlist_t *nvl)
{
char *packed;
size_t len;
verify(nvlist_size(nvl, &len, NV_ENCODE_NATIVE) == 0);
if ((packed = zfs_alloc(hdl, len)) == NULL)
return (-1);
verify(nvlist_pack(nvl, &packed, &len, NV_ENCODE_NATIVE, 0) == 0);
*outnv = (uint64_t)(uintptr_t)packed;
*outlen = len;
return (0);
}
int
zcmd_write_conf_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_conf,
&zc->zc_nvlist_conf_size, nvl));
}
int
zcmd_write_src_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t *nvl)
{
return (zcmd_write_nvlist_com(hdl, &zc->zc_nvlist_src,
&zc->zc_nvlist_src_size, nvl));
}
/*
* Unpacks an nvlist from the ZFS ioctl command structure.
*/
int
zcmd_read_dst_nvlist(libzfs_handle_t *hdl, zfs_cmd_t *zc, nvlist_t **nvlp)
{
if (nvlist_unpack((void *)(uintptr_t)zc->zc_nvlist_dst,
zc->zc_nvlist_dst_size, nvlp, 0) != 0)
return (no_memory(hdl));
return (0);
}
int
zfs_ioctl(libzfs_handle_t *hdl, int request, zfs_cmd_t *zc)
{
return (ioctl(hdl->libzfs_fd, request, zc));
}
/*
* ================================================================
* API shared by zfs and zpool property management
* ================================================================
*/
static void
zprop_print_headers(zprop_get_cbdata_t *cbp, zfs_type_t type)
{
zprop_list_t *pl = cbp->cb_proplist;
int i;
char *title;
size_t len;
cbp->cb_first = B_FALSE;
if (cbp->cb_scripted)
return;
/*
* Start with the length of the column headers.
*/
cbp->cb_colwidths[GET_COL_NAME] = strlen(dgettext(TEXT_DOMAIN, "NAME"));
cbp->cb_colwidths[GET_COL_PROPERTY] = strlen(dgettext(TEXT_DOMAIN,
"PROPERTY"));
cbp->cb_colwidths[GET_COL_VALUE] = strlen(dgettext(TEXT_DOMAIN,
"VALUE"));
cbp->cb_colwidths[GET_COL_RECVD] = strlen(dgettext(TEXT_DOMAIN,
"RECEIVED"));
cbp->cb_colwidths[GET_COL_SOURCE] = strlen(dgettext(TEXT_DOMAIN,
"SOURCE"));
/* first property is always NAME */
assert(cbp->cb_proplist->pl_prop ==
((type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME : ZFS_PROP_NAME));
/*
* Go through and calculate the widths for each column. For the
* 'source' column, we kludge it up by taking the worst-case scenario of
* inheriting from the longest name. This is acceptable because in the
* majority of cases 'SOURCE' is the last column displayed, and we don't
* use the width anyway. Note that the 'VALUE' column can be oversized,
* if the name of the property is much longer than any values we find.
*/
for (pl = cbp->cb_proplist; pl != NULL; pl = pl->pl_next) {
/*
* 'PROPERTY' column
*/
if (pl->pl_prop != ZPROP_INVAL) {
const char *propname = (type == ZFS_TYPE_POOL) ?
zpool_prop_to_name(pl->pl_prop) :
zfs_prop_to_name(pl->pl_prop);
len = strlen(propname);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
} else {
len = strlen(pl->pl_user_prop);
if (len > cbp->cb_colwidths[GET_COL_PROPERTY])
cbp->cb_colwidths[GET_COL_PROPERTY] = len;
}
/*
* 'VALUE' column. The first property is always the 'name'
* property that was tacked on either by /sbin/zfs's
* zfs_do_get() or when calling zprop_expand_list(), so we
* ignore its width. If the user specified the name property
* to display, then it will be later in the list in any case.
*/
if (pl != cbp->cb_proplist &&
pl->pl_width > cbp->cb_colwidths[GET_COL_VALUE])
cbp->cb_colwidths[GET_COL_VALUE] = pl->pl_width;
/* 'RECEIVED' column. */
if (pl != cbp->cb_proplist &&
pl->pl_recvd_width > cbp->cb_colwidths[GET_COL_RECVD])
cbp->cb_colwidths[GET_COL_RECVD] = pl->pl_recvd_width;
/*
* 'NAME' and 'SOURCE' columns
*/
if (pl->pl_prop == (type == ZFS_TYPE_POOL ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME) &&
pl->pl_width > cbp->cb_colwidths[GET_COL_NAME]) {
cbp->cb_colwidths[GET_COL_NAME] = pl->pl_width;
cbp->cb_colwidths[GET_COL_SOURCE] = pl->pl_width +
strlen(dgettext(TEXT_DOMAIN, "inherited from"));
}
}
/*
* Now go through and print the headers.
*/
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
title = dgettext(TEXT_DOMAIN, "NAME");
break;
case GET_COL_PROPERTY:
title = dgettext(TEXT_DOMAIN, "PROPERTY");
break;
case GET_COL_VALUE:
title = dgettext(TEXT_DOMAIN, "VALUE");
break;
case GET_COL_RECVD:
title = dgettext(TEXT_DOMAIN, "RECEIVED");
break;
case GET_COL_SOURCE:
title = dgettext(TEXT_DOMAIN, "SOURCE");
break;
default:
title = NULL;
}
if (title != NULL) {
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", title);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
title);
}
}
(void) printf("\n");
}
/*
* Display a single line of output, according to the settings in the callback
* structure.
*/
void
zprop_print_one_property(const char *name, zprop_get_cbdata_t *cbp,
const char *propname, const char *value, zprop_source_t sourcetype,
const char *source, const char *recvd_value)
{
int i;
const char *str = NULL;
char buf[128];
/*
* Ignore those source types that the user has chosen to ignore.
*/
if ((sourcetype & cbp->cb_sources) == 0)
return;
if (cbp->cb_first)
zprop_print_headers(cbp, cbp->cb_type);
for (i = 0; i < ZFS_GET_NCOLS; i++) {
switch (cbp->cb_columns[i]) {
case GET_COL_NAME:
str = name;
break;
case GET_COL_PROPERTY:
str = propname;
break;
case GET_COL_VALUE:
str = value;
break;
case GET_COL_SOURCE:
switch (sourcetype) {
case ZPROP_SRC_NONE:
str = "-";
break;
case ZPROP_SRC_DEFAULT:
str = "default";
break;
case ZPROP_SRC_LOCAL:
str = "local";
break;
case ZPROP_SRC_TEMPORARY:
str = "temporary";
break;
case ZPROP_SRC_INHERITED:
(void) snprintf(buf, sizeof (buf),
"inherited from %s", source);
str = buf;
break;
case ZPROP_SRC_RECEIVED:
str = "received";
break;
default:
str = NULL;
assert(!"unhandled zprop_source_t");
}
break;
case GET_COL_RECVD:
str = (recvd_value == NULL ? "-" : recvd_value);
break;
default:
continue;
}
if (i == (ZFS_GET_NCOLS - 1) ||
cbp->cb_columns[i + 1] == GET_COL_NONE)
(void) printf("%s", str);
else if (cbp->cb_scripted)
(void) printf("%s\t", str);
else
(void) printf("%-*s ",
cbp->cb_colwidths[cbp->cb_columns[i]],
str);
}
(void) printf("\n");
}
/*
* Given a numeric suffix, convert the value into a number of bits that the
* resulting value must be shifted.
*/
static int
str2shift(libzfs_handle_t *hdl, const char *buf)
{
const char *ends = "BKMGTPEZ";
int i;
if (buf[0] == '\0')
return (0);
for (i = 0; i < strlen(ends); i++) {
if (toupper(buf[0]) == ends[i])
break;
}
if (i == strlen(ends)) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Allow 'G' = 'GB' = 'GiB', case-insensitively.
* However, 'BB' and 'BiB' are disallowed.
*/
if (buf[1] == '\0' ||
(toupper(buf[0]) != 'B' &&
((toupper(buf[1]) == 'B' && buf[2] == '\0') ||
(toupper(buf[1]) == 'I' && toupper(buf[2]) == 'B' &&
buf[3] == '\0'))))
return (10 * i);
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid numeric suffix '%s'"), buf);
return (-1);
}
/*
* Convert a string of the form '100G' into a real number. Used when setting
* properties or creating a volume. 'buf' is used to place an extended error
* message for the caller to use.
*/
int
zfs_nicestrtonum(libzfs_handle_t *hdl, const char *value, uint64_t *num)
{
char *end;
int shift;
*num = 0;
/* Check to see if this looks like a number. */
if ((value[0] < '0' || value[0] > '9') && value[0] != '.') {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"bad numeric value '%s'"), value);
return (-1);
}
/* Rely on strtoull() to process the numeric portion. */
errno = 0;
*num = strtoull(value, &end, 10);
/*
* Check for ERANGE, which indicates that the value is too large to fit
* in a 64-bit value.
*/
if (errno == ERANGE) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
/*
* If we have a decimal value, then do the computation with floating
* point arithmetic. Otherwise, use standard arithmetic.
*/
if (*end == '.') {
double fval = strtod(value, &end);
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
fval *= pow(2, shift);
if (fval > UINT64_MAX) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num = (uint64_t)fval;
} else {
if ((shift = str2shift(hdl, end)) == -1)
return (-1);
/* Check for overflow */
if (shift >= 64 || (*num << shift) >> shift != *num) {
if (hdl)
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"numeric value is too large"));
return (-1);
}
*num <<= shift;
}
return (0);
}
/*
* Given a propname=value nvpair to set, parse any numeric properties
* (index, boolean, etc) if they are specified as strings and add the
* resulting nvpair to the returned nvlist.
*
* At the DSL layer, all properties are either 64-bit numbers or strings.
* We want the user to be able to ignore this fact and specify properties
* as native values (numbers, for example) or as strings (to simplify
* command line utilities). This also handles converting index types
* (compression, checksum, etc) from strings to their on-disk index.
*/
int
zprop_parse_value(libzfs_handle_t *hdl, nvpair_t *elem, int prop,
zfs_type_t type, nvlist_t *ret, char **svalp, uint64_t *ivalp,
const char *errbuf)
{
data_type_t datatype = nvpair_type(elem);
zprop_type_t proptype;
const char *propname;
char *value;
boolean_t isnone = B_FALSE;
boolean_t isauto = B_FALSE;
int err = 0;
if (type == ZFS_TYPE_POOL) {
proptype = zpool_prop_get_type(prop);
propname = zpool_prop_to_name(prop);
} else {
proptype = zfs_prop_get_type(prop);
propname = zfs_prop_to_name(prop);
}
/*
* Convert any properties to the internal DSL value types.
*/
*svalp = NULL;
*ivalp = 0;
switch (proptype) {
case PROP_TYPE_STRING:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
err = nvpair_value_string(elem, svalp);
if (err != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is invalid"), nvpair_name(elem));
goto error;
}
if (strlen(*svalp) >= ZFS_MAXPROPLEN) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' is too long"), nvpair_name(elem));
goto error;
}
break;
case PROP_TYPE_NUMBER:
if (datatype == DATA_TYPE_STRING) {
(void) nvpair_value_string(elem, &value);
if (strcmp(value, "none") == 0) {
isnone = B_TRUE;
} else if (strcmp(value, "auto") == 0) {
isauto = B_TRUE;
} else if (zfs_nicestrtonum(hdl, value, ivalp) != 0) {
goto error;
}
} else if (datatype == DATA_TYPE_UINT64) {
(void) nvpair_value_uint64(elem, ivalp);
} else {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a number"), nvpair_name(elem));
goto error;
}
/*
* Quota special: force 'none' and don't allow 0.
*/
if ((type & ZFS_TYPE_DATASET) && *ivalp == 0 && !isnone &&
(prop == ZFS_PROP_QUOTA || prop == ZFS_PROP_REFQUOTA)) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"use 'none' to disable quota/refquota"));
goto error;
}
/*
* Special handling for "*_limit=none". In this case it's not
* 0 but UINT64_MAX.
*/
if ((type & ZFS_TYPE_DATASET) && isnone &&
(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT)) {
*ivalp = UINT64_MAX;
}
/*
* Special handling for setting 'refreservation' to 'auto'. Use
* UINT64_MAX to tell the caller to use zfs_fix_auto_resv().
* 'auto' is only allowed on volumes.
*/
if (isauto) {
switch (prop) {
case ZFS_PROP_REFRESERVATION:
if ((type & ZFS_TYPE_VOLUME) == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s=auto' only allowed on "
"volumes"), nvpair_name(elem));
goto error;
}
*ivalp = UINT64_MAX;
break;
default:
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'auto' is invalid value for '%s'"),
nvpair_name(elem));
goto error;
}
}
break;
case PROP_TYPE_INDEX:
if (datatype != DATA_TYPE_STRING) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be a string"), nvpair_name(elem));
goto error;
}
(void) nvpair_value_string(elem, &value);
if (zprop_string_to_index(prop, value, ivalp, type) != 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"'%s' must be one of '%s'"), propname,
zprop_values(prop, type));
goto error;
}
break;
default:
abort();
}
/*
* Add the result to our return set of properties.
*/
if (*svalp != NULL) {
if (nvlist_add_string(ret, propname, *svalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
} else {
if (nvlist_add_uint64(ret, propname, *ivalp) != 0) {
(void) no_memory(hdl);
return (-1);
}
}
return (0);
error:
(void) zfs_error(hdl, EZFS_BADPROP, errbuf);
return (-1);
}
static int
addlist(libzfs_handle_t *hdl, char *propname, zprop_list_t **listp,
zfs_type_t type)
{
int prop;
zprop_list_t *entry;
prop = zprop_name_to_prop(propname, type);
if (prop != ZPROP_INVAL && !zprop_valid_for_type(prop, type, B_FALSE))
prop = ZPROP_INVAL;
/*
* When no property table entry can be found, return failure if
* this is a pool property or if this isn't a user-defined
* dataset property,
*/
if (prop == ZPROP_INVAL && ((type == ZFS_TYPE_POOL &&
!zpool_prop_feature(propname) &&
!zpool_prop_unsupported(propname)) ||
(type == ZFS_TYPE_DATASET && !zfs_prop_user(propname) &&
!zfs_prop_userquota(propname) && !zfs_prop_written(propname)))) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"invalid property '%s'"), propname);
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = prop;
if (prop == ZPROP_INVAL) {
if ((entry->pl_user_prop = zfs_strdup(hdl, propname)) ==
NULL) {
free(entry);
return (-1);
}
entry->pl_width = strlen(propname);
} else {
entry->pl_width = zprop_width(prop, &entry->pl_fixed,
type);
}
*listp = entry;
return (0);
}
/*
* Given a comma-separated list of properties, construct a property list
* containing both user-defined and native properties. This function will
* return a NULL list if 'all' is specified, which can later be expanded
* by zprop_expand_list().
*/
int
zprop_get_list(libzfs_handle_t *hdl, char *props, zprop_list_t **listp,
zfs_type_t type)
{
*listp = NULL;
/*
* If 'all' is specified, return a NULL list.
*/
if (strcmp(props, "all") == 0)
return (0);
/*
* If no props were specified, return an error.
*/
if (props[0] == '\0') {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"no properties specified"));
return (zfs_error(hdl, EZFS_BADPROP, dgettext(TEXT_DOMAIN,
"bad property list")));
}
/*
* It would be nice to use getsubopt() here, but the inclusion of column
* aliases makes this more effort than it's worth.
*/
while (*props != '\0') {
size_t len;
char *p;
char c;
if ((p = strchr(props, ',')) == NULL) {
len = strlen(props);
p = props + len;
} else {
len = p - props;
}
/*
* Check for empty options.
*/
if (len == 0) {
zfs_error_aux(hdl, dgettext(TEXT_DOMAIN,
"empty property name"));
return (zfs_error(hdl, EZFS_BADPROP,
dgettext(TEXT_DOMAIN, "bad property list")));
}
/*
* Check all regular property names.
*/
c = props[len];
props[len] = '\0';
if (strcmp(props, "space") == 0) {
static char *spaceprops[] = {
"name", "avail", "used", "usedbysnapshots",
"usedbydataset", "usedbyrefreservation",
"usedbychildren", NULL
};
int i;
for (i = 0; spaceprops[i]; i++) {
if (addlist(hdl, spaceprops[i], listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
} else {
if (addlist(hdl, props, listp, type))
return (-1);
listp = &(*listp)->pl_next;
}
props = p;
if (c == ',')
props++;
}
return (0);
}
void
zprop_free_list(zprop_list_t *pl)
{
zprop_list_t *next;
while (pl != NULL) {
next = pl->pl_next;
free(pl->pl_user_prop);
free(pl);
pl = next;
}
}
typedef struct expand_data {
zprop_list_t **last;
libzfs_handle_t *hdl;
zfs_type_t type;
} expand_data_t;
int
zprop_expand_list_cb(int prop, void *cb)
{
zprop_list_t *entry;
expand_data_t *edp = cb;
if ((entry = zfs_alloc(edp->hdl, sizeof (zprop_list_t))) == NULL)
return (ZPROP_INVAL);
entry->pl_prop = prop;
entry->pl_width = zprop_width(prop, &entry->pl_fixed, edp->type);
entry->pl_all = B_TRUE;
*(edp->last) = entry;
edp->last = &entry->pl_next;
return (ZPROP_CONT);
}
int
zprop_expand_list(libzfs_handle_t *hdl, zprop_list_t **plp, zfs_type_t type)
{
zprop_list_t *entry;
zprop_list_t **last;
expand_data_t exp;
if (*plp == NULL) {
/*
* If this is the very first time we've been called for an 'all'
* specification, expand the list to include all native
* properties.
*/
last = plp;
exp.last = last;
exp.hdl = hdl;
exp.type = type;
if (zprop_iter_common(zprop_expand_list_cb, &exp, B_FALSE,
B_FALSE, type) == ZPROP_INVAL)
return (-1);
/*
* Add 'name' to the beginning of the list, which is handled
* specially.
*/
if ((entry = zfs_alloc(hdl, sizeof (zprop_list_t))) == NULL)
return (-1);
entry->pl_prop = (type == ZFS_TYPE_POOL) ? ZPOOL_PROP_NAME :
ZFS_PROP_NAME;
entry->pl_width = zprop_width(entry->pl_prop,
&entry->pl_fixed, type);
entry->pl_all = B_TRUE;
entry->pl_next = *plp;
*plp = entry;
}
return (0);
}
int
zprop_iter(zprop_func func, void *cb, boolean_t show_all, boolean_t ordered,
zfs_type_t type)
{
return (zprop_iter_common(func, cb, show_all, ordered, type));
}
diff --git a/lib/libzfs_core/libzfs_core.c b/lib/libzfs_core/libzfs_core.c
index 93704223eb6d..42362e3211c0 100644
--- a/lib/libzfs_core/libzfs_core.c
+++ b/lib/libzfs_core/libzfs_core.c
@@ -1,1221 +1,1231 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright 2017 RackTop Systems.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
* LibZFS_Core (lzc) is intended to replace most functionality in libzfs.
* It has the following characteristics:
*
* - Thread Safe. libzfs_core is accessible concurrently from multiple
* threads. This is accomplished primarily by avoiding global data
* (e.g. caching). Since it's thread-safe, there is no reason for a
* process to have multiple libzfs "instances". Therefore, we store
* our few pieces of data (e.g. the file descriptor) in global
* variables. The fd is reference-counted so that the libzfs_core
* library can be "initialized" multiple times (e.g. by different
* consumers within the same process).
*
* - Committed Interface. The libzfs_core interface will be committed,
* therefore consumers can compile against it and be confident that
* their code will continue to work on future releases of this code.
* Currently, the interface is Evolving (not Committed), but we intend
* to commit to it once it is more complete and we determine that it
* meets the needs of all consumers.
*
* - Programmatic Error Handling. libzfs_core communicates errors with
* defined error numbers, and doesn't print anything to stdout/stderr.
*
* - Thin Layer. libzfs_core is a thin layer, marshaling arguments
* to/from the kernel ioctls. There is generally a 1:1 correspondence
* between libzfs_core functions and ioctls to /dev/zfs.
*
* - Clear Atomicity. Because libzfs_core functions are generally 1:1
* with kernel ioctls, and kernel ioctls are general atomic, each
* libzfs_core function is atomic. For example, creating multiple
* snapshots with a single call to lzc_snapshot() is atomic -- it
* can't fail with only some of the requested snapshots created, even
* in the event of power loss or system crash.
*
* - Continued libzfs Support. Some higher-level operations (e.g.
* support for "zfs send -R") are too complicated to fit the scope of
* libzfs_core. This functionality will continue to live in libzfs.
* Where appropriate, libzfs will use the underlying atomic operations
* of libzfs_core. For example, libzfs may implement "zfs send -R |
* zfs receive" by using individual "send one snapshot", rename,
* destroy, and "receive one snapshot" operations in libzfs_core.
* /sbin/zfs and /zbin/zpool will link with both libzfs and
* libzfs_core. Other consumers should aim to use only libzfs_core,
* since that will be the supported, stable interface going forwards.
*/
#include <libzfs_core.h>
#include <ctype.h>
#include <unistd.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <pthread.h>
#include <sys/nvpair.h>
#include <sys/param.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/zfs_ioctl.h>
static int g_fd = -1;
static pthread_mutex_t g_lock = PTHREAD_MUTEX_INITIALIZER;
static int g_refcount;
int
libzfs_core_init(void)
{
(void) pthread_mutex_lock(&g_lock);
if (g_refcount == 0) {
g_fd = open("/dev/zfs", O_RDWR);
if (g_fd < 0) {
(void) pthread_mutex_unlock(&g_lock);
return (errno);
}
}
g_refcount++;
(void) pthread_mutex_unlock(&g_lock);
return (0);
}
void
libzfs_core_fini(void)
{
(void) pthread_mutex_lock(&g_lock);
ASSERT3S(g_refcount, >, 0);
if (g_refcount > 0)
g_refcount--;
if (g_refcount == 0 && g_fd != -1) {
(void) close(g_fd);
g_fd = -1;
}
(void) pthread_mutex_unlock(&g_lock);
}
static int
lzc_ioctl(zfs_ioc_t ioc, const char *name,
nvlist_t *source, nvlist_t **resultp)
{
zfs_cmd_t zc = {"\0"};
int error = 0;
char *packed = NULL;
size_t size = 0;
ASSERT3S(g_refcount, >, 0);
VERIFY3S(g_fd, !=, -1);
if (name != NULL)
(void) strlcpy(zc.zc_name, name, sizeof (zc.zc_name));
if (source != NULL) {
packed = fnvlist_pack(source, &size);
zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
zc.zc_nvlist_src_size = size;
}
if (resultp != NULL) {
*resultp = NULL;
if (ioc == ZFS_IOC_CHANNEL_PROGRAM) {
zc.zc_nvlist_dst_size = fnvlist_lookup_uint64(source,
ZCP_ARG_MEMLIMIT);
} else {
zc.zc_nvlist_dst_size = MAX(size * 2, 128 * 1024);
}
zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
malloc(zc.zc_nvlist_dst_size);
if (zc.zc_nvlist_dst == (uint64_t)0) {
error = ENOMEM;
goto out;
}
}
while (ioctl(g_fd, ioc, &zc) != 0) {
/*
* If ioctl exited with ENOMEM, we retry the ioctl after
* increasing the size of the destination nvlist.
*
* Channel programs that exit with ENOMEM ran over the
* lua memory sandbox; they should not be retried.
*/
if (errno == ENOMEM && resultp != NULL &&
ioc != ZFS_IOC_CHANNEL_PROGRAM) {
free((void *)(uintptr_t)zc.zc_nvlist_dst);
zc.zc_nvlist_dst_size *= 2;
zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
malloc(zc.zc_nvlist_dst_size);
if (zc.zc_nvlist_dst == (uint64_t)0) {
error = ENOMEM;
goto out;
}
} else {
error = errno;
break;
}
}
if (zc.zc_nvlist_dst_filled) {
*resultp = fnvlist_unpack((void *)(uintptr_t)zc.zc_nvlist_dst,
zc.zc_nvlist_dst_size);
}
out:
if (packed != NULL)
fnvlist_pack_free(packed, size);
free((void *)(uintptr_t)zc.zc_nvlist_dst);
return (error);
}
int
lzc_create(const char *fsname, enum lzc_dataset_type type, nvlist_t *props,
uint8_t *wkeydata, uint_t wkeylen)
{
int error;
nvlist_t *hidden_args = NULL;
nvlist_t *args = fnvlist_alloc();
fnvlist_add_int32(args, "type", (dmu_objset_type_t)type);
if (props != NULL)
fnvlist_add_nvlist(args, "props", props);
if (wkeydata != NULL) {
hidden_args = fnvlist_alloc();
fnvlist_add_uint8_array(hidden_args, "wkeydata", wkeydata,
wkeylen);
fnvlist_add_nvlist(args, ZPOOL_HIDDEN_ARGS, hidden_args);
}
error = lzc_ioctl(ZFS_IOC_CREATE, fsname, args, NULL);
nvlist_free(hidden_args);
nvlist_free(args);
return (error);
}
int
lzc_clone(const char *fsname, const char *origin, nvlist_t *props)
{
int error;
nvlist_t *hidden_args = NULL;
nvlist_t *args = fnvlist_alloc();
fnvlist_add_string(args, "origin", origin);
if (props != NULL)
fnvlist_add_nvlist(args, "props", props);
error = lzc_ioctl(ZFS_IOC_CLONE, fsname, args, NULL);
nvlist_free(hidden_args);
nvlist_free(args);
return (error);
}
int
lzc_promote(const char *fsname, char *snapnamebuf, int snapnamelen)
{
/*
* The promote ioctl is still legacy, so we need to construct our
* own zfs_cmd_t rather than using lzc_ioctl().
*/
zfs_cmd_t zc = { "\0" };
ASSERT3S(g_refcount, >, 0);
VERIFY3S(g_fd, !=, -1);
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_name));
if (ioctl(g_fd, ZFS_IOC_PROMOTE, &zc) != 0) {
int error = errno;
if (error == EEXIST && snapnamebuf != NULL)
(void) strlcpy(snapnamebuf, zc.zc_string, snapnamelen);
return (error);
}
return (0);
}
+int
+lzc_remap(const char *fsname)
+{
+ int error;
+ nvlist_t *args = fnvlist_alloc();
+ error = lzc_ioctl(ZFS_IOC_REMAP, fsname, args, NULL);
+ nvlist_free(args);
+ return (error);
+}
+
/*
* Creates snapshots.
*
* The keys in the snaps nvlist are the snapshots to be created.
* They must all be in the same pool.
*
* The props nvlist is properties to set. Currently only user properties
* are supported. { user:prop_name -> string value }
*
* The returned results nvlist will have an entry for each snapshot that failed.
* The value will be the (int32) error code.
*
* The return value will be 0 if all snapshots were created, otherwise it will
* be the errno of a (unspecified) snapshot that failed.
*/
int
lzc_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t **errlist)
{
nvpair_t *elem;
nvlist_t *args;
int error;
char pool[ZFS_MAX_DATASET_NAME_LEN];
*errlist = NULL;
/* determine the pool name */
elem = nvlist_next_nvpair(snaps, NULL);
if (elem == NULL)
return (0);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
args = fnvlist_alloc();
fnvlist_add_nvlist(args, "snaps", snaps);
if (props != NULL)
fnvlist_add_nvlist(args, "props", props);
error = lzc_ioctl(ZFS_IOC_SNAPSHOT, pool, args, errlist);
nvlist_free(args);
return (error);
}
/*
* Destroys snapshots.
*
* The keys in the snaps nvlist are the snapshots to be destroyed.
* They must all be in the same pool.
*
* Snapshots that do not exist will be silently ignored.
*
* If 'defer' is not set, and a snapshot has user holds or clones, the
* destroy operation will fail and none of the snapshots will be
* destroyed.
*
* If 'defer' is set, and a snapshot has user holds or clones, it will be
* marked for deferred destruction, and will be destroyed when the last hold
* or clone is removed/destroyed.
*
* The return value will be 0 if all snapshots were destroyed (or marked for
* later destruction if 'defer' is set) or didn't exist to begin with.
*
* Otherwise the return value will be the errno of a (unspecified) snapshot
* that failed, no snapshots will be destroyed, and the errlist will have an
* entry for each snapshot that failed. The value in the errlist will be
* the (int32) error code.
*/
int
lzc_destroy_snaps(nvlist_t *snaps, boolean_t defer, nvlist_t **errlist)
{
nvpair_t *elem;
nvlist_t *args;
int error;
char pool[ZFS_MAX_DATASET_NAME_LEN];
/* determine the pool name */
elem = nvlist_next_nvpair(snaps, NULL);
if (elem == NULL)
return (0);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
args = fnvlist_alloc();
fnvlist_add_nvlist(args, "snaps", snaps);
if (defer)
fnvlist_add_boolean(args, "defer");
error = lzc_ioctl(ZFS_IOC_DESTROY_SNAPS, pool, args, errlist);
nvlist_free(args);
return (error);
}
int
lzc_snaprange_space(const char *firstsnap, const char *lastsnap,
uint64_t *usedp)
{
nvlist_t *args;
nvlist_t *result;
int err;
char fs[ZFS_MAX_DATASET_NAME_LEN];
char *atp;
/* determine the fs name */
(void) strlcpy(fs, firstsnap, sizeof (fs));
atp = strchr(fs, '@');
if (atp == NULL)
return (EINVAL);
*atp = '\0';
args = fnvlist_alloc();
fnvlist_add_string(args, "firstsnap", firstsnap);
err = lzc_ioctl(ZFS_IOC_SPACE_SNAPS, lastsnap, args, &result);
nvlist_free(args);
if (err == 0)
*usedp = fnvlist_lookup_uint64(result, "used");
fnvlist_free(result);
return (err);
}
boolean_t
lzc_exists(const char *dataset)
{
/*
* The objset_stats ioctl is still legacy, so we need to construct our
* own zfs_cmd_t rather than using lzc_ioctl().
*/
zfs_cmd_t zc = {"\0"};
ASSERT3S(g_refcount, >, 0);
VERIFY3S(g_fd, !=, -1);
(void) strlcpy(zc.zc_name, dataset, sizeof (zc.zc_name));
return (ioctl(g_fd, ZFS_IOC_OBJSET_STATS, &zc) == 0);
}
/*
* outnvl is unused.
* It was added to preserve the function signature in case it is
* needed in the future.
*/
/*ARGSUSED*/
int
lzc_sync(const char *pool_name, nvlist_t *innvl, nvlist_t **outnvl)
{
return (lzc_ioctl(ZFS_IOC_POOL_SYNC, pool_name, innvl, NULL));
}
/*
* Create "user holds" on snapshots. If there is a hold on a snapshot,
* the snapshot can not be destroyed. (However, it can be marked for deletion
* by lzc_destroy_snaps(defer=B_TRUE).)
*
* The keys in the nvlist are snapshot names.
* The snapshots must all be in the same pool.
* The value is the name of the hold (string type).
*
* If cleanup_fd is not -1, it must be the result of open("/dev/zfs", O_EXCL).
* In this case, when the cleanup_fd is closed (including on process
* termination), the holds will be released. If the system is shut down
* uncleanly, the holds will be released when the pool is next opened
* or imported.
*
* Holds for snapshots which don't exist will be skipped and have an entry
* added to errlist, but will not cause an overall failure.
*
* The return value will be 0 if all holds, for snapshots that existed,
* were successfully created.
*
* Otherwise the return value will be the errno of a (unspecified) hold that
* failed and no holds will be created.
*
* In all cases the errlist will have an entry for each hold that failed
* (name = snapshot), with its value being the error code (int32).
*/
int
lzc_hold(nvlist_t *holds, int cleanup_fd, nvlist_t **errlist)
{
char pool[ZFS_MAX_DATASET_NAME_LEN];
nvlist_t *args;
nvpair_t *elem;
int error;
/* determine the pool name */
elem = nvlist_next_nvpair(holds, NULL);
if (elem == NULL)
return (0);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
args = fnvlist_alloc();
fnvlist_add_nvlist(args, "holds", holds);
if (cleanup_fd != -1)
fnvlist_add_int32(args, "cleanup_fd", cleanup_fd);
error = lzc_ioctl(ZFS_IOC_HOLD, pool, args, errlist);
nvlist_free(args);
return (error);
}
/*
* Release "user holds" on snapshots. If the snapshot has been marked for
* deferred destroy (by lzc_destroy_snaps(defer=B_TRUE)), it does not have
* any clones, and all the user holds are removed, then the snapshot will be
* destroyed.
*
* The keys in the nvlist are snapshot names.
* The snapshots must all be in the same pool.
* The value is an nvlist whose keys are the holds to remove.
*
* Holds which failed to release because they didn't exist will have an entry
* added to errlist, but will not cause an overall failure.
*
* The return value will be 0 if the nvl holds was empty or all holds that
* existed, were successfully removed.
*
* Otherwise the return value will be the errno of a (unspecified) hold that
* failed to release and no holds will be released.
*
* In all cases the errlist will have an entry for each hold that failed to
* to release.
*/
int
lzc_release(nvlist_t *holds, nvlist_t **errlist)
{
char pool[ZFS_MAX_DATASET_NAME_LEN];
nvpair_t *elem;
/* determine the pool name */
elem = nvlist_next_nvpair(holds, NULL);
if (elem == NULL)
return (0);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/@")] = '\0';
return (lzc_ioctl(ZFS_IOC_RELEASE, pool, holds, errlist));
}
/*
* Retrieve list of user holds on the specified snapshot.
*
* On success, *holdsp will be set to an nvlist which the caller must free.
* The keys are the names of the holds, and the value is the creation time
* of the hold (uint64) in seconds since the epoch.
*/
int
lzc_get_holds(const char *snapname, nvlist_t **holdsp)
{
return (lzc_ioctl(ZFS_IOC_GET_HOLDS, snapname, NULL, holdsp));
}
/*
* Generate a zfs send stream for the specified snapshot and write it to
* the specified file descriptor.
*
* "snapname" is the full name of the snapshot to send (e.g. "pool/fs@snap")
*
* If "from" is NULL, a full (non-incremental) stream will be sent.
* If "from" is non-NULL, it must be the full name of a snapshot or
* bookmark to send an incremental from (e.g. "pool/fs@earlier_snap" or
* "pool/fs#earlier_bmark"). If non-NULL, the specified snapshot or
* bookmark must represent an earlier point in the history of "snapname").
* It can be an earlier snapshot in the same filesystem or zvol as "snapname",
* or it can be the origin of "snapname"'s filesystem, or an earlier
* snapshot in the origin, etc.
*
* "fd" is the file descriptor to write the send stream to.
*
* If "flags" contains LZC_SEND_FLAG_LARGE_BLOCK, the stream is permitted
* to contain DRR_WRITE records with drr_length > 128K, and DRR_OBJECT
* records with drr_blksz > 128K.
*
* If "flags" contains LZC_SEND_FLAG_EMBED_DATA, the stream is permitted
* to contain DRR_WRITE_EMBEDDED records with drr_etype==BP_EMBEDDED_TYPE_DATA,
* which the receiving system must support (as indicated by support
* for the "embedded_data" feature).
*/
int
lzc_send(const char *snapname, const char *from, int fd,
enum lzc_send_flags flags)
{
return (lzc_send_resume(snapname, from, fd, flags, 0, 0));
}
int
lzc_send_resume(const char *snapname, const char *from, int fd,
enum lzc_send_flags flags, uint64_t resumeobj, uint64_t resumeoff)
{
nvlist_t *args;
int err;
args = fnvlist_alloc();
fnvlist_add_int32(args, "fd", fd);
if (from != NULL)
fnvlist_add_string(args, "fromsnap", from);
if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
fnvlist_add_boolean(args, "largeblockok");
if (flags & LZC_SEND_FLAG_EMBED_DATA)
fnvlist_add_boolean(args, "embedok");
if (flags & LZC_SEND_FLAG_COMPRESS)
fnvlist_add_boolean(args, "compressok");
if (flags & LZC_SEND_FLAG_RAW)
fnvlist_add_boolean(args, "rawok");
if (resumeobj != 0 || resumeoff != 0) {
fnvlist_add_uint64(args, "resume_object", resumeobj);
fnvlist_add_uint64(args, "resume_offset", resumeoff);
}
err = lzc_ioctl(ZFS_IOC_SEND_NEW, snapname, args, NULL);
nvlist_free(args);
return (err);
}
/*
* "from" can be NULL, a snapshot, or a bookmark.
*
* If from is NULL, a full (non-incremental) stream will be estimated. This
* is calculated very efficiently.
*
* If from is a snapshot, lzc_send_space uses the deadlists attached to
* each snapshot to efficiently estimate the stream size.
*
* If from is a bookmark, the indirect blocks in the destination snapshot
* are traversed, looking for blocks with a birth time since the creation TXG of
* the snapshot this bookmark was created from. This will result in
* significantly more I/O and be less efficient than a send space estimation on
* an equivalent snapshot.
*/
int
lzc_send_space(const char *snapname, const char *from,
enum lzc_send_flags flags, uint64_t *spacep)
{
nvlist_t *args;
nvlist_t *result;
int err;
args = fnvlist_alloc();
if (from != NULL)
fnvlist_add_string(args, "from", from);
if (flags & LZC_SEND_FLAG_LARGE_BLOCK)
fnvlist_add_boolean(args, "largeblockok");
if (flags & LZC_SEND_FLAG_EMBED_DATA)
fnvlist_add_boolean(args, "embedok");
if (flags & LZC_SEND_FLAG_COMPRESS)
fnvlist_add_boolean(args, "compressok");
if (flags & LZC_SEND_FLAG_RAW)
fnvlist_add_boolean(args, "rawok");
err = lzc_ioctl(ZFS_IOC_SEND_SPACE, snapname, args, &result);
nvlist_free(args);
if (err == 0)
*spacep = fnvlist_lookup_uint64(result, "space");
nvlist_free(result);
return (err);
}
static int
recv_read(int fd, void *buf, int ilen)
{
char *cp = buf;
int rv;
int len = ilen;
do {
rv = read(fd, cp, len);
cp += rv;
len -= rv;
} while (rv > 0);
if (rv < 0 || len != 0)
return (EIO);
return (0);
}
/*
* Linux adds ZFS_IOC_RECV_NEW for resumable and raw streams and preserves the
* legacy ZFS_IOC_RECV user/kernel interface. The new interface supports all
* stream options but is currently only used for resumable streams. This way
* updated user space utilities will interoperate with older kernel modules.
*
* Non-Linux OpenZFS platforms have opted to modify the legacy interface.
*/
static int
recv_impl(const char *snapname, nvlist_t *recvdprops, nvlist_t *localprops,
const char *origin, boolean_t force, boolean_t resumable, boolean_t raw,
int input_fd, const dmu_replay_record_t *begin_record, int cleanup_fd,
uint64_t *read_bytes, uint64_t *errflags, uint64_t *action_handle,
nvlist_t **errors)
{
dmu_replay_record_t drr;
char fsname[MAXPATHLEN];
char *atp;
int error;
ASSERT3S(g_refcount, >, 0);
VERIFY3S(g_fd, !=, -1);
/* Set 'fsname' to the name of containing filesystem */
(void) strlcpy(fsname, snapname, sizeof (fsname));
atp = strchr(fsname, '@');
if (atp == NULL)
return (EINVAL);
*atp = '\0';
/* If the fs does not exist, try its parent. */
if (!lzc_exists(fsname)) {
char *slashp = strrchr(fsname, '/');
if (slashp == NULL)
return (ENOENT);
*slashp = '\0';
}
/*
* The begin_record is normally a non-byteswapped BEGIN record.
* For resumable streams it may be set to any non-byteswapped
* dmu_replay_record_t.
*/
if (begin_record == NULL) {
error = recv_read(input_fd, &drr, sizeof (drr));
if (error != 0)
return (error);
} else {
drr = *begin_record;
}
if (resumable || raw) {
nvlist_t *outnvl = NULL;
nvlist_t *innvl = fnvlist_alloc();
fnvlist_add_string(innvl, "snapname", snapname);
if (recvdprops != NULL)
fnvlist_add_nvlist(innvl, "props", recvdprops);
if (localprops != NULL)
fnvlist_add_nvlist(innvl, "localprops", localprops);
if (origin != NULL && strlen(origin))
fnvlist_add_string(innvl, "origin", origin);
fnvlist_add_byte_array(innvl, "begin_record",
(uchar_t *)&drr, sizeof (drr));
fnvlist_add_int32(innvl, "input_fd", input_fd);
if (force)
fnvlist_add_boolean(innvl, "force");
if (resumable)
fnvlist_add_boolean(innvl, "resumable");
if (cleanup_fd >= 0)
fnvlist_add_int32(innvl, "cleanup_fd", cleanup_fd);
if (action_handle != NULL)
fnvlist_add_uint64(innvl, "action_handle",
*action_handle);
error = lzc_ioctl(ZFS_IOC_RECV_NEW, fsname, innvl, &outnvl);
if (error == 0 && read_bytes != NULL)
error = nvlist_lookup_uint64(outnvl, "read_bytes",
read_bytes);
if (error == 0 && errflags != NULL)
error = nvlist_lookup_uint64(outnvl, "error_flags",
errflags);
if (error == 0 && action_handle != NULL)
error = nvlist_lookup_uint64(outnvl, "action_handle",
action_handle);
if (error == 0 && errors != NULL) {
nvlist_t *nvl;
error = nvlist_lookup_nvlist(outnvl, "errors", &nvl);
if (error == 0)
*errors = fnvlist_dup(nvl);
}
fnvlist_free(innvl);
fnvlist_free(outnvl);
} else {
zfs_cmd_t zc = {"\0"};
char *packed = NULL;
size_t size;
ASSERT3S(g_refcount, >, 0);
(void) strlcpy(zc.zc_name, fsname, sizeof (zc.zc_value));
(void) strlcpy(zc.zc_value, snapname, sizeof (zc.zc_value));
if (recvdprops != NULL) {
packed = fnvlist_pack(recvdprops, &size);
zc.zc_nvlist_src = (uint64_t)(uintptr_t)packed;
zc.zc_nvlist_src_size = size;
}
if (localprops != NULL) {
packed = fnvlist_pack(localprops, &size);
zc.zc_nvlist_conf = (uint64_t)(uintptr_t)packed;
zc.zc_nvlist_conf_size = size;
}
if (origin != NULL)
(void) strlcpy(zc.zc_string, origin,
sizeof (zc.zc_string));
ASSERT3S(drr.drr_type, ==, DRR_BEGIN);
zc.zc_begin_record = drr.drr_u.drr_begin;
zc.zc_guid = force;
zc.zc_cookie = input_fd;
zc.zc_cleanup_fd = -1;
zc.zc_action_handle = 0;
if (cleanup_fd >= 0)
zc.zc_cleanup_fd = cleanup_fd;
if (action_handle != NULL)
zc.zc_action_handle = *action_handle;
zc.zc_nvlist_dst_size = 128 * 1024;
zc.zc_nvlist_dst = (uint64_t)(uintptr_t)
malloc(zc.zc_nvlist_dst_size);
error = ioctl(g_fd, ZFS_IOC_RECV, &zc);
if (error != 0) {
error = errno;
} else {
if (read_bytes != NULL)
*read_bytes = zc.zc_cookie;
if (errflags != NULL)
*errflags = zc.zc_obj;
if (action_handle != NULL)
*action_handle = zc.zc_action_handle;
if (errors != NULL)
VERIFY0(nvlist_unpack(
(void *)(uintptr_t)zc.zc_nvlist_dst,
zc.zc_nvlist_dst_size, errors, KM_SLEEP));
}
if (packed != NULL)
fnvlist_pack_free(packed, size);
free((void *)(uintptr_t)zc.zc_nvlist_dst);
}
return (error);
}
/*
* The simplest receive case: receive from the specified fd, creating the
* specified snapshot. Apply the specified properties as "received" properties
* (which can be overridden by locally-set properties). If the stream is a
* clone, its origin snapshot must be specified by 'origin'. The 'force'
* flag will cause the target filesystem to be rolled back or destroyed if
* necessary to receive.
*
* Return 0 on success or an errno on failure.
*
* Note: this interface does not work on dedup'd streams
* (those with DMU_BACKUP_FEATURE_DEDUP).
*/
int
lzc_receive(const char *snapname, nvlist_t *props, const char *origin,
boolean_t force, boolean_t raw, int fd)
{
return (recv_impl(snapname, props, NULL, origin, force, B_FALSE, raw,
fd, NULL, -1, NULL, NULL, NULL, NULL));
}
/*
* Like lzc_receive, but if the receive fails due to premature stream
* termination, the intermediate state will be preserved on disk. In this
* case, ECKSUM will be returned. The receive may subsequently be resumed
* with a resuming send stream generated by lzc_send_resume().
*/
int
lzc_receive_resumable(const char *snapname, nvlist_t *props, const char *origin,
boolean_t force, boolean_t raw, int fd)
{
return (recv_impl(snapname, props, NULL, origin, force, B_TRUE, raw,
fd, NULL, -1, NULL, NULL, NULL, NULL));
}
/*
* Like lzc_receive, but allows the caller to read the begin record and then to
* pass it in. That could be useful if the caller wants to derive, for example,
* the snapname or the origin parameters based on the information contained in
* the begin record.
* The begin record must be in its original form as read from the stream,
* in other words, it should not be byteswapped.
*
* The 'resumable' parameter allows to obtain the same behavior as with
* lzc_receive_resumable.
*/
int
lzc_receive_with_header(const char *snapname, nvlist_t *props,
const char *origin, boolean_t force, boolean_t resumable, boolean_t raw,
int fd, const dmu_replay_record_t *begin_record)
{
if (begin_record == NULL)
return (EINVAL);
return (recv_impl(snapname, props, NULL, origin, force, resumable, raw,
fd, begin_record, -1, NULL, NULL, NULL, NULL));
}
/*
* Like lzc_receive, but allows the caller to pass all supported arguments
* and retrieve all values returned. The only additional input parameter
* is 'cleanup_fd' which is used to set a cleanup-on-exit file descriptor.
*
* The following parameters all provide return values. Several may be set
* in the failure case and will contain additional information.
*
* The 'read_bytes' value will be set to the total number of bytes read.
*
* The 'errflags' value will contain zprop_errflags_t flags which are
* used to describe any failures.
*
* The 'action_handle' is used to pass the handle for this guid/ds mapping.
* It should be set to zero on first call and will contain an updated handle
* on success, it should be passed in subsequent calls.
*
* The 'errors' nvlist contains an entry for each unapplied received
* property. Callers are responsible for freeing this nvlist.
*/
int lzc_receive_one(const char *snapname, nvlist_t *props,
const char *origin, boolean_t force, boolean_t resumable, boolean_t raw,
int input_fd, const dmu_replay_record_t *begin_record, int cleanup_fd,
uint64_t *read_bytes, uint64_t *errflags, uint64_t *action_handle,
nvlist_t **errors)
{
return (recv_impl(snapname, props, NULL, origin, force, resumable,
raw, input_fd, begin_record, cleanup_fd, read_bytes, errflags,
action_handle, errors));
}
/*
* Like lzc_receive_one, but allows the caller to pass an additional 'cmdprops'
* argument.
*
* The 'cmdprops' nvlist contains both override ('zfs receive -o') and
* exclude ('zfs receive -x') properties. Callers are responsible for freeing
* this nvlist
*/
int lzc_receive_with_cmdprops(const char *snapname, nvlist_t *props,
nvlist_t *cmdprops, const char *origin, boolean_t force,
boolean_t resumable, boolean_t raw, int input_fd,
const dmu_replay_record_t *begin_record, int cleanup_fd,
uint64_t *read_bytes, uint64_t *errflags, uint64_t *action_handle,
nvlist_t **errors)
{
return (recv_impl(snapname, props, cmdprops, origin, force, resumable,
raw, input_fd, begin_record, cleanup_fd, read_bytes, errflags,
action_handle, errors));
}
/*
* Roll back this filesystem or volume to its most recent snapshot.
* If snapnamebuf is not NULL, it will be filled in with the name
* of the most recent snapshot.
* Note that the latest snapshot may change if a new one is concurrently
* created or the current one is destroyed. lzc_rollback_to can be used
* to roll back to a specific latest snapshot.
*
* Return 0 on success or an errno on failure.
*/
int
lzc_rollback(const char *fsname, char *snapnamebuf, int snapnamelen)
{
nvlist_t *args;
nvlist_t *result;
int err;
args = fnvlist_alloc();
err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
nvlist_free(args);
if (err == 0 && snapnamebuf != NULL) {
const char *snapname = fnvlist_lookup_string(result, "target");
(void) strlcpy(snapnamebuf, snapname, snapnamelen);
}
nvlist_free(result);
return (err);
}
/*
* Roll back this filesystem or volume to the specified snapshot,
* if possible.
*
* Return 0 on success or an errno on failure.
*/
int
lzc_rollback_to(const char *fsname, const char *snapname)
{
nvlist_t *args;
nvlist_t *result;
int err;
args = fnvlist_alloc();
fnvlist_add_string(args, "target", snapname);
err = lzc_ioctl(ZFS_IOC_ROLLBACK, fsname, args, &result);
nvlist_free(args);
nvlist_free(result);
return (err);
}
/*
* Creates bookmarks.
*
* The bookmarks nvlist maps from name of the bookmark (e.g. "pool/fs#bmark") to
* the name of the snapshot (e.g. "pool/fs@snap"). All the bookmarks and
* snapshots must be in the same pool.
*
* The returned results nvlist will have an entry for each bookmark that failed.
* The value will be the (int32) error code.
*
* The return value will be 0 if all bookmarks were created, otherwise it will
* be the errno of a (undetermined) bookmarks that failed.
*/
int
lzc_bookmark(nvlist_t *bookmarks, nvlist_t **errlist)
{
nvpair_t *elem;
int error;
char pool[ZFS_MAX_DATASET_NAME_LEN];
/* determine the pool name */
elem = nvlist_next_nvpair(bookmarks, NULL);
if (elem == NULL)
return (0);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/#")] = '\0';
error = lzc_ioctl(ZFS_IOC_BOOKMARK, pool, bookmarks, errlist);
return (error);
}
/*
* Retrieve bookmarks.
*
* Retrieve the list of bookmarks for the given file system. The props
* parameter is an nvlist of property names (with no values) that will be
* returned for each bookmark.
*
* The following are valid properties on bookmarks, all of which are numbers
* (represented as uint64 in the nvlist)
*
* "guid" - globally unique identifier of the snapshot it refers to
* "createtxg" - txg when the snapshot it refers to was created
* "creation" - timestamp when the snapshot it refers to was created
*
* The format of the returned nvlist as follows:
* <short name of bookmark> -> {
* <name of property> -> {
* "value" -> uint64
* }
* }
*/
int
lzc_get_bookmarks(const char *fsname, nvlist_t *props, nvlist_t **bmarks)
{
return (lzc_ioctl(ZFS_IOC_GET_BOOKMARKS, fsname, props, bmarks));
}
/*
* Destroys bookmarks.
*
* The keys in the bmarks nvlist are the bookmarks to be destroyed.
* They must all be in the same pool. Bookmarks are specified as
* <fs>#<bmark>.
*
* Bookmarks that do not exist will be silently ignored.
*
* The return value will be 0 if all bookmarks that existed were destroyed.
*
* Otherwise the return value will be the errno of a (undetermined) bookmark
* that failed, no bookmarks will be destroyed, and the errlist will have an
* entry for each bookmarks that failed. The value in the errlist will be
* the (int32) error code.
*/
int
lzc_destroy_bookmarks(nvlist_t *bmarks, nvlist_t **errlist)
{
nvpair_t *elem;
int error;
char pool[ZFS_MAX_DATASET_NAME_LEN];
/* determine the pool name */
elem = nvlist_next_nvpair(bmarks, NULL);
if (elem == NULL)
return (0);
(void) strlcpy(pool, nvpair_name(elem), sizeof (pool));
pool[strcspn(pool, "/#")] = '\0';
error = lzc_ioctl(ZFS_IOC_DESTROY_BOOKMARKS, pool, bmarks, errlist);
return (error);
}
static int
lzc_channel_program_impl(const char *pool, const char *program, boolean_t sync,
uint64_t instrlimit, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
{
int error;
nvlist_t *args;
args = fnvlist_alloc();
fnvlist_add_string(args, ZCP_ARG_PROGRAM, program);
fnvlist_add_nvlist(args, ZCP_ARG_ARGLIST, argnvl);
fnvlist_add_boolean_value(args, ZCP_ARG_SYNC, sync);
fnvlist_add_uint64(args, ZCP_ARG_INSTRLIMIT, instrlimit);
fnvlist_add_uint64(args, ZCP_ARG_MEMLIMIT, memlimit);
error = lzc_ioctl(ZFS_IOC_CHANNEL_PROGRAM, pool, args, outnvl);
fnvlist_free(args);
return (error);
}
/*
* Executes a channel program.
*
* If this function returns 0 the channel program was successfully loaded and
* ran without failing. Note that individual commands the channel program ran
* may have failed and the channel program is responsible for reporting such
* errors through outnvl if they are important.
*
* This method may also return:
*
* EINVAL The program contains syntax errors, or an invalid memory or time
* limit was given. No part of the channel program was executed.
* If caused by syntax errors, 'outnvl' contains information about the
* errors.
*
* ECHRNG The program was executed, but encountered a runtime error, such as
* calling a function with incorrect arguments, invoking the error()
* function directly, failing an assert() command, etc. Some portion
* of the channel program may have executed and committed changes.
* Information about the failure can be found in 'outnvl'.
*
* ENOMEM The program fully executed, but the output buffer was not large
* enough to store the returned value. No output is returned through
* 'outnvl'.
*
* ENOSPC The program was terminated because it exceeded its memory usage
* limit. Some portion of the channel program may have executed and
* committed changes to disk. No output is returned through 'outnvl'.
*
* ETIME The program was terminated because it exceeded its Lua instruction
* limit. Some portion of the channel program may have executed and
* committed changes to disk. No output is returned through 'outnvl'.
*/
int
lzc_channel_program(const char *pool, const char *program, uint64_t instrlimit,
uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
{
return (lzc_channel_program_impl(pool, program, B_TRUE, instrlimit,
memlimit, argnvl, outnvl));
}
/*
* Executes a read-only channel program.
*
* A read-only channel program works programmatically the same way as a
* normal channel program executed with lzc_channel_program(). The only
* difference is it runs exclusively in open-context and therefore can
* return faster. The downside to that, is that the program cannot change
* on-disk state by calling functions from the zfs.sync submodule.
*
* The return values of this function (and their meaning) are exactly the
* same as the ones described in lzc_channel_program().
*/
int
lzc_channel_program_nosync(const char *pool, const char *program,
uint64_t timeout, uint64_t memlimit, nvlist_t *argnvl, nvlist_t **outnvl)
{
return (lzc_channel_program_impl(pool, program, B_FALSE, timeout,
memlimit, argnvl, outnvl));
}
/*
* Performs key management functions
*
* crypto_cmd should be a value from zfs_ioc_crypto_cmd_t. If the command
* specifies to load or change a wrapping key, the key should be specified in
* the hidden_args nvlist so that it is not logged
*/
int
lzc_load_key(const char *fsname, boolean_t noop, uint8_t *wkeydata,
uint_t wkeylen)
{
int error;
nvlist_t *ioc_args;
nvlist_t *hidden_args;
if (wkeydata == NULL)
return (EINVAL);
ioc_args = fnvlist_alloc();
hidden_args = fnvlist_alloc();
fnvlist_add_uint8_array(hidden_args, "wkeydata", wkeydata, wkeylen);
fnvlist_add_nvlist(ioc_args, ZPOOL_HIDDEN_ARGS, hidden_args);
if (noop)
fnvlist_add_boolean(ioc_args, "noop");
error = lzc_ioctl(ZFS_IOC_LOAD_KEY, fsname, ioc_args, NULL);
nvlist_free(hidden_args);
nvlist_free(ioc_args);
return (error);
}
int
lzc_unload_key(const char *fsname)
{
return (lzc_ioctl(ZFS_IOC_UNLOAD_KEY, fsname, NULL, NULL));
}
int
lzc_change_key(const char *fsname, uint64_t crypt_cmd, nvlist_t *props,
uint8_t *wkeydata, uint_t wkeylen)
{
int error;
nvlist_t *ioc_args = fnvlist_alloc();
nvlist_t *hidden_args = NULL;
fnvlist_add_uint64(ioc_args, "crypt_cmd", crypt_cmd);
if (wkeydata != NULL) {
hidden_args = fnvlist_alloc();
fnvlist_add_uint8_array(hidden_args, "wkeydata", wkeydata,
wkeylen);
fnvlist_add_nvlist(ioc_args, ZPOOL_HIDDEN_ARGS, hidden_args);
}
if (props != NULL)
fnvlist_add_nvlist(ioc_args, "props", props);
error = lzc_ioctl(ZFS_IOC_CHANGE_KEY, fsname, ioc_args, NULL);
nvlist_free(hidden_args);
nvlist_free(ioc_args);
return (error);
}
int
lzc_reopen(const char *pool_name, boolean_t scrub_restart)
{
nvlist_t *args = fnvlist_alloc();
int error;
fnvlist_add_boolean_value(args, "scrub_restart", scrub_restart);
error = lzc_ioctl(ZFS_IOC_POOL_REOPEN, pool_name, args, NULL);
nvlist_free(args);
return (error);
}
diff --git a/lib/libzpool/Makefile.am b/lib/libzpool/Makefile.am
index 4edc60cb01df..d4fc201fa7d2 100644
--- a/lib/libzpool/Makefile.am
+++ b/lib/libzpool/Makefile.am
@@ -1,189 +1,193 @@
include $(top_srcdir)/config/Rules.am
VPATH = \
$(top_srcdir)/module/zfs \
$(top_srcdir)/module/zcommon \
$(top_srcdir)/module/lua \
$(top_srcdir)/lib/libzpool
# Suppress unused but set variable warnings often due to ASSERTs
AM_CFLAGS += $(NO_UNUSED_BUT_SET_VARIABLE)
# Includes kernel code generate warnings for large stack frames
AM_CFLAGS += $(FRAME_LARGER_THAN)
DEFAULT_INCLUDES += \
-I$(top_srcdir)/include \
-I$(top_srcdir)/lib/libspl/include
lib_LTLIBRARIES = libzpool.la
USER_C = \
kernel.c \
taskq.c \
util.c
KERNEL_C = \
zfeature_common.c \
zfs_comutil.c \
zfs_deleg.c \
zfs_fletcher.c \
zfs_fletcher_aarch64_neon.c \
zfs_fletcher_avx512.c \
zfs_fletcher_intel.c \
zfs_fletcher_sse.c \
zfs_fletcher_superscalar.c \
zfs_fletcher_superscalar4.c \
zfs_namecheck.c \
zfs_prop.c \
zfs_uio.c \
zpool_prop.c \
zprop_common.c \
abd.c \
arc.c \
blkptr.c \
bplist.c \
bpobj.c \
bptree.c \
bqueue.c \
dbuf.c \
dbuf_stats.c \
ddt.c \
ddt_zap.c \
dmu.c \
dmu_diff.c \
dmu_object.c \
dmu_objset.c \
dmu_send.c \
dmu_traverse.c \
dmu_tx.c \
dmu_zfetch.c \
dnode.c \
dnode_sync.c \
dsl_bookmark.c \
dsl_dataset.c \
dsl_deadlist.c \
dsl_deleg.c \
dsl_dir.c \
dsl_crypt.c \
dsl_pool.c \
dsl_prop.c \
dsl_scan.c \
dsl_synctask.c \
dsl_destroy.c \
dsl_userhold.c \
edonr_zfs.c \
hkdf.c \
fm.c \
gzip.c \
lzjb.c \
lz4.c \
metaslab.c \
mmp.c \
multilist.c \
pathname.c \
range_tree.c \
refcount.c \
rrwlock.c \
sa.c \
sha256.c \
skein_zfs.c \
spa.c \
spa_boot.c \
spa_config.c \
spa_errlog.c \
spa_history.c \
spa_misc.c \
spa_stats.c \
space_map.c \
space_reftree.c \
txg.c \
trace.c \
uberblock.c \
unique.c \
vdev.c \
vdev_cache.c \
vdev_file.c \
+ vdev_indirect_births.c \
+ vdev_indirect.c \
+ vdev_indirect_mapping.c \
vdev_label.c \
vdev_mirror.c \
vdev_missing.c \
vdev_queue.c \
vdev_raidz.c \
+ vdev_raidz_math_aarch64_neon.c \
+ vdev_raidz_math_aarch64_neonx2.c \
+ vdev_raidz_math_avx2.c \
+ vdev_raidz_math_avx512bw.c \
+ vdev_raidz_math_avx512f.c \
vdev_raidz_math.c \
vdev_raidz_math_scalar.c \
vdev_raidz_math_sse2.c \
vdev_raidz_math_ssse3.c \
- vdev_raidz_math_avx2.c \
- vdev_raidz_math_avx512f.c \
- vdev_raidz_math_avx512bw.c \
- vdev_raidz_math_aarch64_neon.c \
- vdev_raidz_math_aarch64_neonx2.c \
+ vdev_removal.c \
vdev_root.c \
zap.c \
zap_leaf.c \
zap_micro.c \
zcp.c \
zcp_get.c \
zcp_global.c \
zcp_iter.c \
zcp_synctask.c \
zfeature.c \
zfs_byteswap.c \
zfs_debug.c \
zfs_fm.c \
zfs_fuid.c \
zfs_sa.c \
zfs_znode.c \
zfs_ratelimit.c \
zfs_rlock.c \
zil.c \
zio.c \
zio_checksum.c \
zio_compress.c \
zio_crypt.c \
zio_inject.c \
zle.c \
zrlock.c
LUA_C = \
lapi.c \
lauxlib.c \
lbaselib.c \
lcode.c \
lcompat.c \
lcorolib.c \
lctype.c \
ldebug.c \
ldo.c \
lfunc.c \
lgc.c \
llex.c \
lmem.c \
lobject.c \
lopcodes.c \
lparser.c \
lstate.c \
lstring.c \
lstrlib.c \
ltable.c \
ltablib.c \
ltm.c \
lvm.c \
lzio.c
nodist_libzpool_la_SOURCES = \
$(USER_C) \
$(KERNEL_C) \
$(LUA_C)
libzpool_la_LIBADD = \
$(top_builddir)/lib/libavl/libavl.la \
$(top_builddir)/lib/libicp/libicp.la \
$(top_builddir)/lib/libnvpair/libnvpair.la \
$(top_builddir)/lib/libspl/libspl.la \
$(top_builddir)/lib/libunicode/libunicode.la
libzpool_la_LIBADD += $(ZLIB) -ldl
libzpool_la_LDFLAGS = -pthread -version-info 2:0:0
EXTRA_DIST = $(USER_C)
diff --git a/man/man5/zfs-module-parameters.5 b/man/man5/zfs-module-parameters.5
index e3c6dd91a59e..818f26449ce9 100644
--- a/man/man5/zfs-module-parameters.5
+++ b/man/man5/zfs-module-parameters.5
@@ -1,2634 +1,2634 @@
'\" te
.\" Copyright (c) 2013 by Turbo Fredriksson <turbo@bayour.com>. All rights reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.TH ZFS-MODULE-PARAMETERS 5 "Oct 28, 2017"
.SH NAME
zfs\-module\-parameters \- ZFS module parameters
.SH DESCRIPTION
.sp
.LP
Description of the different parameters to the ZFS module.
.SS "Module parameters"
.sp
.LP
.sp
.ne 2
.na
\fBdbuf_cache_max_bytes\fR (ulong)
.ad
.RS 12n
Maximum size in bytes of the dbuf cache. When \fB0\fR this value will default
to \fB1/2^dbuf_cache_shift\fR (1/32) of the target ARC size, otherwise the
provided value in bytes will be used. The behavior of the dbuf cache and its
associated settings can be observed via the \fB/proc/spl/kstat/zfs/dbufstats\fR
kstat.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBdbuf_cache_hiwater_pct\fR (uint)
.ad
.RS 12n
The percentage over \fBdbuf_cache_max_bytes\fR when dbufs must be evicted
directly.
.sp
Default value: \fB10\fR%.
.RE
.sp
.ne 2
.na
\fBdbuf_cache_lowater_pct\fR (uint)
.ad
.RS 12n
The percentage below \fBdbuf_cache_max_bytes\fR when the evict thread stops
evicting dbufs.
.sp
Default value: \fB10\fR%.
.RE
.sp
.ne 2
.na
\fBdbuf_cache_shift\fR (int)
.ad
.RS 12n
Set the size of the dbuf cache, \fBdbuf_cache_max_bytes\fR, to a log2 fraction
of the target arc size.
.sp
Default value: \fB5\fR.
.RE
.sp
.ne 2
.na
\fBignore_hole_birth\fR (int)
.ad
.RS 12n
When set, the hole_birth optimization will not be used, and all holes will
always be sent on zfs send. Useful if you suspect your datasets are affected
by a bug in hole_birth.
.sp
Use \fB1\fR for on (default) and \fB0\fR for off.
.RE
.sp
.ne 2
.na
\fBl2arc_feed_again\fR (int)
.ad
.RS 12n
Turbo L2ARC warm-up. When the L2ARC is cold the fill interval will be set as
fast as possible.
.sp
Use \fB1\fR for yes (default) and \fB0\fR to disable.
.RE
.sp
.ne 2
.na
\fBl2arc_feed_min_ms\fR (ulong)
.ad
.RS 12n
Min feed interval in milliseconds. Requires \fBl2arc_feed_again=1\fR and only
applicable in related situations.
.sp
Default value: \fB200\fR.
.RE
.sp
.ne 2
.na
\fBl2arc_feed_secs\fR (ulong)
.ad
.RS 12n
Seconds between L2ARC writing
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBl2arc_headroom\fR (ulong)
.ad
.RS 12n
How far through the ARC lists to search for L2ARC cacheable content, expressed
as a multiplier of \fBl2arc_write_max\fR
.sp
Default value: \fB2\fR.
.RE
.sp
.ne 2
.na
\fBl2arc_headroom_boost\fR (ulong)
.ad
.RS 12n
Scales \fBl2arc_headroom\fR by this percentage when L2ARC contents are being
successfully compressed before writing. A value of 100 disables this feature.
.sp
Default value: \fB200\fR%.
.RE
.sp
.ne 2
.na
\fBl2arc_noprefetch\fR (int)
.ad
.RS 12n
Do not write buffers to L2ARC if they were prefetched but not used by
applications
.sp
Use \fB1\fR for yes (default) and \fB0\fR to disable.
.RE
.sp
.ne 2
.na
\fBl2arc_norw\fR (int)
.ad
.RS 12n
No reads during writes
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBl2arc_write_boost\fR (ulong)
.ad
.RS 12n
Cold L2ARC devices will have \fBl2arc_write_max\fR increased by this amount
while they remain cold.
.sp
Default value: \fB8,388,608\fR.
.RE
.sp
.ne 2
.na
\fBl2arc_write_max\fR (ulong)
.ad
.RS 12n
Max write bytes per interval
.sp
Default value: \fB8,388,608\fR.
.RE
.sp
.ne 2
.na
\fBmetaslab_aliquot\fR (ulong)
.ad
.RS 12n
Metaslab granularity, in bytes. This is roughly similar to what would be
referred to as the "stripe size" in traditional RAID arrays. In normal
operation, ZFS will try to write this amount of data to a top-level vdev
before moving on to the next one.
.sp
Default value: \fB524,288\fR.
.RE
.sp
.ne 2
.na
\fBmetaslab_bias_enabled\fR (int)
.ad
.RS 12n
Enable metaslab group biasing based on its vdev's over- or under-utilization
relative to the pool.
.sp
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
.sp
.ne 2
.na
\fBzfs_metaslab_segment_weight_enabled\fR (int)
.ad
.RS 12n
Enable/disable segment-based metaslab selection.
.sp
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
.sp
.ne 2
.na
\fBzfs_metaslab_switch_threshold\fR (int)
.ad
.RS 12n
When using segment-based metaslab selection, continue allocating
from the active metaslab until \fBzfs_metaslab_switch_threshold\fR
worth of buckets have been exhausted.
.sp
Default value: \fB2\fR.
.RE
.sp
.ne 2
.na
\fBmetaslab_debug_load\fR (int)
.ad
.RS 12n
Load all metaslabs during pool import.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBmetaslab_debug_unload\fR (int)
.ad
.RS 12n
Prevent metaslabs from being unloaded.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBmetaslab_fragmentation_factor_enabled\fR (int)
.ad
.RS 12n
Enable use of the fragmentation metric in computing metaslab weights.
.sp
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
.sp
.ne 2
.na
\fBmetaslabs_per_vdev\fR (int)
.ad
.RS 12n
When a vdev is added, it will be divided into approximately (but no more than) this number of metaslabs.
.sp
Default value: \fB200\fR.
.RE
.sp
.ne 2
.na
\fBmetaslab_preload_enabled\fR (int)
.ad
.RS 12n
Enable metaslab group preloading.
.sp
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
.sp
.ne 2
.na
\fBmetaslab_lba_weighting_enabled\fR (int)
.ad
.RS 12n
Give more weight to metaslabs with lower LBAs, assuming they have
greater bandwidth as is typically the case on a modern constant
angular velocity disk drive.
.sp
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
.sp
.ne 2
.na
\fBspa_config_path\fR (charp)
.ad
.RS 12n
SPA config file
.sp
Default value: \fB/etc/zfs/zpool.cache\fR.
.RE
.sp
.ne 2
.na
\fBspa_asize_inflation\fR (int)
.ad
.RS 12n
Multiplication factor used to estimate actual disk consumption from the
size of data being written. The default value is a worst case estimate,
but lower values may be valid for a given pool depending on its
configuration. Pool administrators who understand the factors involved
may wish to specify a more realistic inflation factor, particularly if
they operate close to quota or capacity limits.
.sp
Default value: \fB24\fR.
.RE
.sp
.ne 2
.na
\fBspa_load_verify_data\fR (int)
.ad
.RS 12n
Whether to traverse data blocks during an "extreme rewind" (\fB-X\fR)
import. Use 0 to disable and 1 to enable.
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification. If this parameter is set to 0,
the traversal skips non-metadata blocks. It can be toggled once the
import has started to stop or start the traversal of non-metadata blocks.
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBspa_load_verify_metadata\fR (int)
.ad
.RS 12n
Whether to traverse blocks during an "extreme rewind" (\fB-X\fR)
pool import. Use 0 to disable and 1 to enable.
An extreme rewind import normally performs a full traversal of all
blocks in the pool for verification. If this parameter is set to 0,
the traversal is not performed. It can be toggled once the import has
started to stop or start the traversal.
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBspa_load_verify_maxinflight\fR (int)
.ad
.RS 12n
Maximum concurrent I/Os during the traversal performed during an "extreme
rewind" (\fB-X\fR) pool import.
.sp
Default value: \fB10000\fR.
.RE
.sp
.ne 2
.na
\fBspa_slop_shift\fR (int)
.ad
.RS 12n
Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space
in the pool to be consumed. This ensures that we don't run the pool
completely out of space, due to unaccounted changes (e.g. to the MOS).
It also limits the worst-case time to allocate space. If we have
less than this amount of free space, most ZPL operations (e.g. write,
create) will return ENOSPC.
.sp
Default value: \fB5\fR.
.RE
.sp
.ne 2
.na
\fBzfetch_array_rd_sz\fR (ulong)
.ad
.RS 12n
If prefetching is enabled, disable prefetching for reads larger than this size.
.sp
Default value: \fB1,048,576\fR.
.RE
.sp
.ne 2
.na
\fBzfetch_max_distance\fR (uint)
.ad
.RS 12n
Max bytes to prefetch per stream (default 8MB).
.sp
Default value: \fB8,388,608\fR.
.RE
.sp
.ne 2
.na
\fBzfetch_max_streams\fR (uint)
.ad
.RS 12n
Max number of streams per zfetch (prefetch streams per file).
.sp
Default value: \fB8\fR.
.RE
.sp
.ne 2
.na
\fBzfetch_min_sec_reap\fR (uint)
.ad
.RS 12n
Min time before an active prefetch stream can be reclaimed
.sp
Default value: \fB2\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_dnode_limit\fR (ulong)
.ad
.RS 12n
When the number of bytes consumed by dnodes in the ARC exceeds this number of
bytes, try to unpin some of it in response to demand for non-metadata. This
value acts as a ceiling to the amount of dnode metadata, and defaults to 0 which
indicates that a percent which is based on \fBzfs_arc_dnode_limit_percent\fR of
the ARC meta buffers that may be used for dnodes.
See also \fBzfs_arc_meta_prune\fR which serves a similar purpose but is used
when the amount of metadata in the ARC exceeds \fBzfs_arc_meta_limit\fR rather
than in response to overall demand for non-metadata.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_dnode_limit_percent\fR (ulong)
.ad
.RS 12n
Percentage that can be consumed by dnodes of ARC meta buffers.
.sp
See also \fBzfs_arc_dnode_limit\fR which serves a similar purpose but has a
higher priority if set to nonzero value.
.sp
Default value: \fB10\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_arc_dnode_reduce_percent\fR (ulong)
.ad
.RS 12n
Percentage of ARC dnodes to try to scan in response to demand for non-metadata
when the number of bytes consumed by dnodes exceeds \fBzfs_arc_dnode_limit\fR.
.sp
Default value: \fB10\fR% of the number of dnodes in the ARC.
.RE
.sp
.ne 2
.na
\fBzfs_arc_average_blocksize\fR (int)
.ad
.RS 12n
The ARC's buffer hash table is sized based on the assumption of an average
block size of \fBzfs_arc_average_blocksize\fR (default 8K). This works out
to roughly 1MB of hash table per 1GB of physical memory with 8-byte pointers.
For configurations with a known larger average block size this value can be
increased to reduce the memory footprint.
.sp
Default value: \fB8192\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_evict_batch_limit\fR (int)
.ad
.RS 12n
Number ARC headers to evict per sub-list before proceeding to another sub-list.
This batch-style operation prevents entire sub-lists from being evicted at once
but comes at a cost of additional unlocking and locking.
.sp
Default value: \fB10\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_grow_retry\fR (int)
.ad
.RS 12n
If set to a non zero value, it will replace the arc_grow_retry value with this value.
The arc_grow_retry value (default 5) is the number of seconds the ARC will wait before
trying to resume growth after a memory pressure event.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_lotsfree_percent\fR (int)
.ad
.RS 12n
Throttle I/O when free system memory drops below this percentage of total
system memory. Setting this value to 0 will disable the throttle.
.sp
Default value: \fB10\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_arc_max\fR (ulong)
.ad
.RS 12n
Max arc size of ARC in bytes. If set to 0 then it will consume 1/2 of system
RAM. This value must be at least 67108864 (64 megabytes).
.sp
This value can be changed dynamically with some caveats. It cannot be set back
to 0 while running and reducing it below the current ARC size will not cause
the ARC to shrink without memory pressure to induce shrinking.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_meta_adjust_restarts\fR (ulong)
.ad
.RS 12n
The number of restart passes to make while scanning the ARC attempting
the free buffers in order to stay below the \fBzfs_arc_meta_limit\fR.
This value should not need to be tuned but is available to facilitate
performance analysis.
.sp
Default value: \fB4096\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_meta_limit\fR (ulong)
.ad
.RS 12n
The maximum allowed size in bytes that meta data buffers are allowed to
consume in the ARC. When this limit is reached meta data buffers will
be reclaimed even if the overall arc_c_max has not been reached. This
value defaults to 0 which indicates that a percent which is based on
\fBzfs_arc_meta_limit_percent\fR of the ARC may be used for meta data.
.sp
This value my be changed dynamically except that it cannot be set back to 0
for a specific percent of the ARC; it must be set to an explicit value.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_meta_limit_percent\fR (ulong)
.ad
.RS 12n
Percentage of ARC buffers that can be used for meta data.
See also \fBzfs_arc_meta_limit\fR which serves a similar purpose but has a
higher priority if set to nonzero value.
.sp
Default value: \fB75\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_arc_meta_min\fR (ulong)
.ad
.RS 12n
The minimum allowed size in bytes that meta data buffers may consume in
the ARC. This value defaults to 0 which disables a floor on the amount
of the ARC devoted meta data.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_meta_prune\fR (int)
.ad
.RS 12n
The number of dentries and inodes to be scanned looking for entries
which can be dropped. This may be required when the ARC reaches the
\fBzfs_arc_meta_limit\fR because dentries and inodes can pin buffers
in the ARC. Increasing this value will cause to dentry and inode caches
to be pruned more aggressively. Setting this value to 0 will disable
pruning the inode and dentry caches.
.sp
Default value: \fB10,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_meta_strategy\fR (int)
.ad
.RS 12n
Define the strategy for ARC meta data buffer eviction (meta reclaim strategy).
A value of 0 (META_ONLY) will evict only the ARC meta data buffers.
A value of 1 (BALANCED) indicates that additional data buffers may be evicted if
that is required to in order to evict the required number of meta data buffers.
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_min\fR (ulong)
.ad
.RS 12n
Min arc size of ARC in bytes. If set to 0 then arc_c_min will default to
consuming the larger of 32M or 1/32 of total system memory.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_min_prefetch_ms\fR (int)
.ad
.RS 12n
Minimum time prefetched blocks are locked in the ARC, specified in ms.
A value of \fB0\fR will default to 1000 ms.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_min_prescient_prefetch_ms\fR (int)
.ad
.RS 12n
Minimum time "prescient prefetched" blocks are locked in the ARC, specified
in ms. These blocks are meant to be prefetched fairly aggresively ahead of
the code that may use them. A value of \fB0\fR will default to 6000 ms.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_multilist_num_sublists\fR (int)
.ad
.RS 12n
To allow more fine-grained locking, each ARC state contains a series
of lists for both data and meta data objects. Locking is performed at
the level of these "sub-lists". This parameters controls the number of
sub-lists per ARC state, and also applies to other uses of the
multilist data structure.
.sp
Default value: \fB4\fR or the number of online CPUs, whichever is greater
.RE
.sp
.ne 2
.na
\fBzfs_arc_overflow_shift\fR (int)
.ad
.RS 12n
The ARC size is considered to be overflowing if it exceeds the current
ARC target size (arc_c) by a threshold determined by this parameter.
The threshold is calculated as a fraction of arc_c using the formula
"arc_c >> \fBzfs_arc_overflow_shift\fR".
The default value of 8 causes the ARC to be considered to be overflowing
if it exceeds the target size by 1/256th (0.3%) of the target size.
When the ARC is overflowing, new buffer allocations are stalled until
the reclaim thread catches up and the overflow condition no longer exists.
.sp
Default value: \fB8\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_p_min_shift\fR (int)
.ad
.RS 12n
If set to a non zero value, this will update arc_p_min_shift (default 4)
with the new value.
arc_p_min_shift is used to shift of arc_c for calculating both min and max
max arc_p
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_p_dampener_disable\fR (int)
.ad
.RS 12n
Disable arc_p adapt dampener
.sp
Use \fB1\fR for yes (default) and \fB0\fR to disable.
.RE
.sp
.ne 2
.na
\fBzfs_arc_shrink_shift\fR (int)
.ad
.RS 12n
If set to a non zero value, this will update arc_shrink_shift (default 7)
with the new value.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_arc_pc_percent\fR (uint)
.ad
.RS 12n
Percent of pagecache to reclaim arc to
This tunable allows ZFS arc to play more nicely with the kernel's LRU
pagecache. It can guarantee that the arc size won't collapse under scanning
pressure on the pagecache, yet still allows arc to be reclaimed down to
zfs_arc_min if necessary. This value is specified as percent of pagecache
size (as measured by NR_FILE_PAGES) where that percent may exceed 100. This
only operates during memory pressure/reclaim.
.sp
Default value: \fB0\fR% (disabled).
.RE
.sp
.ne 2
.na
\fBzfs_arc_sys_free\fR (ulong)
.ad
.RS 12n
The target number of bytes the ARC should leave as free memory on the system.
Defaults to the larger of 1/64 of physical memory or 512K. Setting this
option to a non-zero value will override the default.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_autoimport_disable\fR (int)
.ad
.RS 12n
Disable pool import at module load by ignoring the cache file (typically \fB/etc/zfs/zpool.cache\fR).
.sp
Use \fB1\fR for yes (default) and \fB0\fR for no.
.RE
.sp
.ne 2
.na
\fBzfs_checksums_per_second\fR (int)
.ad
.RS 12n
Rate limit checksum events to this many per second. Note that this should
not be set below the zed thresholds (currently 10 checksums over 10 sec)
or else zed may not trigger any action.
.sp
Default value: 20
.RE
.sp
.ne 2
.na
\fBzfs_commit_timeout_pct\fR (int)
.ad
.RS 12n
This controls the amount of time that a ZIL block (lwb) will remain "open"
when it isn't "full", and it has a thread waiting for it to be committed to
stable storage. The timeout is scaled based on a percentage of the last lwb
latency to avoid significantly impacting the latency of each individual
transaction record (itx).
.sp
Default value: \fB5\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_dbgmsg_enable\fR (int)
.ad
.RS 12n
Internally ZFS keeps a small log to facilitate debugging. By default the log
is disabled, to enable it set this option to 1. The contents of the log can
be accessed by reading the /proc/spl/kstat/zfs/dbgmsg file. Writing 0 to
this proc file clears the log.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_dbgmsg_maxsize\fR (int)
.ad
.RS 12n
The maximum size in bytes of the internal ZFS debug log.
.sp
Default value: \fB4M\fR.
.RE
.sp
.ne 2
.na
\fBzfs_dbuf_state_index\fR (int)
.ad
.RS 12n
This feature is currently unused. It is normally used for controlling what
reporting is available under /proc/spl/kstat/zfs.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_deadman_enabled\fR (int)
.ad
.RS 12n
When a pool sync operation takes longer than \fBzfs_deadman_synctime_ms\fR
milliseconds, or when an individual I/O takes longer than
\fBzfs_deadman_ziotime_ms\fR milliseconds, then the operation is considered to
be "hung". If \fBzfs_deadman_enabled\fR is set then the deadman behavior is
invoked as described by the \fBzfs_deadman_failmode\fR module option.
By default the deadman is enabled and configured to \fBwait\fR which results
in "hung" I/Os only being logged. The deadman is automatically disabled
when a pool gets suspended.
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBzfs_deadman_failmode\fR (charp)
.ad
.RS 12n
Controls the failure behavior when the deadman detects a "hung" I/O. Valid
values are \fBwait\fR, \fBcontinue\fR, and \fBpanic\fR.
.sp
\fBwait\fR - Wait for a "hung" I/O to complete. For each "hung" I/O a
"deadman" event will be posted describing that I/O.
.sp
\fBcontinue\fR - Attempt to recover from a "hung" I/O by re-dispatching it
to the I/O pipeline if possible.
.sp
\fBpanic\fR - Panic the system. This can be used to facilitate an automatic
fail-over to a properly configured fail-over partner.
.sp
Default value: \fBwait\fR.
.RE
.sp
.ne 2
.na
\fBzfs_deadman_checktime_ms\fR (int)
.ad
.RS 12n
Check time in milliseconds. This defines the frequency at which we check
for hung I/O and potentially invoke the \fBzfs_deadman_failmode\fR behavior.
.sp
Default value: \fB60,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_deadman_synctime_ms\fR (ulong)
.ad
.RS 12n
Interval in milliseconds after which the deadman is triggered and also
the interval after which a pool sync operation is considered to be "hung".
Once this limit is exceeded the deadman will be invoked every
\fBzfs_deadman_checktime_ms\fR milliseconds until the pool sync completes.
.sp
Default value: \fB600,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_deadman_ziotime_ms\fR (ulong)
.ad
.RS 12n
Interval in milliseconds after which the deadman is triggered and an
individual IO operation is considered to be "hung". As long as the I/O
remains "hung" the deadman will be invoked every \fBzfs_deadman_checktime_ms\fR
milliseconds until the I/O completes.
.sp
Default value: \fB300,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_dedup_prefetch\fR (int)
.ad
.RS 12n
Enable prefetching dedup-ed blks
.sp
Use \fB1\fR for yes and \fB0\fR to disable (default).
.RE
.sp
.ne 2
.na
\fBzfs_delay_min_dirty_percent\fR (int)
.ad
.RS 12n
Start to delay each transaction once there is this amount of dirty data,
expressed as a percentage of \fBzfs_dirty_data_max\fR.
This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
See the section "ZFS TRANSACTION DELAY".
.sp
Default value: \fB60\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_delay_scale\fR (int)
.ad
.RS 12n
This controls how quickly the transaction delay approaches infinity.
Larger values cause longer delays for a given amount of dirty data.
.sp
For the smoothest delay, this value should be about 1 billion divided
by the maximum number of operations per second. This will smoothly
handle between 10x and 1/10th this number.
.sp
See the section "ZFS TRANSACTION DELAY".
.sp
Note: \fBzfs_delay_scale\fR * \fBzfs_dirty_data_max\fR must be < 2^64.
.sp
Default value: \fB500,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_delays_per_second\fR (int)
.ad
.RS 12n
Rate limit IO delay events to this many per second.
.sp
Default value: 20
.RE
.sp
.ne 2
.na
\fBzfs_delete_blocks\fR (ulong)
.ad
.RS 12n
This is the used to define a large file for the purposes of delete. Files
containing more than \fBzfs_delete_blocks\fR will be deleted asynchronously
while smaller files are deleted synchronously. Decreasing this value will
reduce the time spent in an unlink(2) system call at the expense of a longer
delay before the freed space is available.
.sp
Default value: \fB20,480\fR.
.RE
.sp
.ne 2
.na
\fBzfs_dirty_data_max\fR (int)
.ad
.RS 12n
Determines the dirty space limit in bytes. Once this limit is exceeded, new
writes are halted until space frees up. This parameter takes precedence
over \fBzfs_dirty_data_max_percent\fR.
See the section "ZFS TRANSACTION DELAY".
.sp
Default value: \fB10\fR% of physical RAM, capped at \fBzfs_dirty_data_max_max\fR.
.RE
.sp
.ne 2
.na
\fBzfs_dirty_data_max_max\fR (int)
.ad
.RS 12n
Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed in bytes.
This limit is only enforced at module load time, and will be ignored if
\fBzfs_dirty_data_max\fR is later changed. This parameter takes
precedence over \fBzfs_dirty_data_max_max_percent\fR. See the section
"ZFS TRANSACTION DELAY".
.sp
Default value: \fB25\fR% of physical RAM.
.RE
.sp
.ne 2
.na
\fBzfs_dirty_data_max_max_percent\fR (int)
.ad
.RS 12n
Maximum allowable value of \fBzfs_dirty_data_max\fR, expressed as a
percentage of physical RAM. This limit is only enforced at module load
time, and will be ignored if \fBzfs_dirty_data_max\fR is later changed.
The parameter \fBzfs_dirty_data_max_max\fR takes precedence over this
one. See the section "ZFS TRANSACTION DELAY".
.sp
Default value: \fB25\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_dirty_data_max_percent\fR (int)
.ad
.RS 12n
Determines the dirty space limit, expressed as a percentage of all
memory. Once this limit is exceeded, new writes are halted until space frees
up. The parameter \fBzfs_dirty_data_max\fR takes precedence over this
one. See the section "ZFS TRANSACTION DELAY".
.sp
Default value: \fB10\fR%, subject to \fBzfs_dirty_data_max_max\fR.
.RE
.sp
.ne 2
.na
\fBzfs_dirty_data_sync\fR (int)
.ad
.RS 12n
Start syncing out a transaction group if there is at least this much dirty data.
.sp
Default value: \fB67,108,864\fR.
.RE
.sp
.ne 2
.na
\fBzfs_fletcher_4_impl\fR (string)
.ad
.RS 12n
Select a fletcher 4 implementation.
.sp
Supported selectors are: \fBfastest\fR, \fBscalar\fR, \fBsse2\fR, \fBssse3\fR,
\fBavx2\fR, \fBavx512f\fR, and \fBaarch64_neon\fR.
All of the selectors except \fBfastest\fR and \fBscalar\fR require instruction
set extensions to be available and will only appear if ZFS detects that they are
present at runtime. If multiple implementations of fletcher 4 are available,
the \fBfastest\fR will be chosen using a micro benchmark. Selecting \fBscalar\fR
results in the original, CPU based calculation, being used. Selecting any option
other than \fBfastest\fR and \fBscalar\fR results in vector instructions from
the respective CPU instruction set being used.
.sp
Default value: \fBfastest\fR.
.RE
.sp
.ne 2
.na
\fBzfs_free_bpobj_enabled\fR (int)
.ad
.RS 12n
Enable/disable the processing of the free_bpobj object.
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
-\fBzfs_free_max_blocks\fR (ulong)
+\fBzfs_async_block_max_blocks\fR (ulong)
.ad
.RS 12n
Maximum number of blocks freed in a single txg.
.sp
Default value: \fB100,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_async_read_max_active\fR (int)
.ad
.RS 12n
Maximum asynchronous read I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB3\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_async_read_min_active\fR (int)
.ad
.RS 12n
Minimum asynchronous read I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_async_write_active_max_dirty_percent\fR (int)
.ad
.RS 12n
When the pool has more than
\fBzfs_vdev_async_write_active_max_dirty_percent\fR dirty data, use
\fBzfs_vdev_async_write_max_active\fR to limit active async writes. If
the dirty data is between min and max, the active I/O limit is linearly
interpolated. See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB60\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_async_write_active_min_dirty_percent\fR (int)
.ad
.RS 12n
When the pool has less than
\fBzfs_vdev_async_write_active_min_dirty_percent\fR dirty data, use
\fBzfs_vdev_async_write_min_active\fR to limit active async writes. If
the dirty data is between min and max, the active I/O limit is linearly
interpolated. See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB30\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_async_write_max_active\fR (int)
.ad
.RS 12n
Maximum asynchronous write I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB10\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_async_write_min_active\fR (int)
.ad
.RS 12n
Minimum asynchronous write I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Lower values are associated with better latency on rotational media but poorer
resilver performance. The default value of 2 was chosen as a compromise. A
value of 3 has been shown to improve resilver performance further at a cost of
further increasing latency.
.sp
Default value: \fB2\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_max_active\fR (int)
.ad
.RS 12n
The maximum number of I/Os active to each device. Ideally, this will be >=
the sum of each queue's max_active. It must be at least the sum of each
queue's min_active. See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB1,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_scrub_max_active\fR (int)
.ad
.RS 12n
Maximum scrub I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB2\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_scrub_min_active\fR (int)
.ad
.RS 12n
Minimum scrub I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_sync_read_max_active\fR (int)
.ad
.RS 12n
Maximum synchronous read I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB10\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_sync_read_min_active\fR (int)
.ad
.RS 12n
Minimum synchronous read I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB10\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_sync_write_max_active\fR (int)
.ad
.RS 12n
Maximum synchronous write I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB10\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_sync_write_min_active\fR (int)
.ad
.RS 12n
Minimum synchronous write I/Os active to each device.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB10\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_queue_depth_pct\fR (int)
.ad
.RS 12n
Maximum number of queued allocations per top-level vdev expressed as
a percentage of \fBzfs_vdev_async_write_max_active\fR which allows the
system to detect devices that are more capable of handling allocations
and to allocate more blocks to those devices. It allows for dynamic
allocation distribution when devices are imbalanced as fuller devices
will tend to be slower than empty devices.
See also \fBzio_dva_throttle_enabled\fR.
.sp
Default value: \fB1000\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_expire_snapshot\fR (int)
.ad
.RS 12n
Seconds to expire .zfs/snapshot
.sp
Default value: \fB300\fR.
.RE
.sp
.ne 2
.na
\fBzfs_admin_snapshot\fR (int)
.ad
.RS 12n
Allow the creation, removal, or renaming of entries in the .zfs/snapshot
directory to cause the creation, destruction, or renaming of snapshots.
When enabled this functionality works both locally and over NFS exports
which have the 'no_root_squash' option set. This functionality is disabled
by default.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_flags\fR (int)
.ad
.RS 12n
Set additional debugging flags. The following flags may be bitwise-or'd
together.
.sp
.TS
box;
rB lB
lB lB
r l.
Value Symbolic Name
Description
_
1 ZFS_DEBUG_DPRINTF
Enable dprintf entries in the debug log.
_
2 ZFS_DEBUG_DBUF_VERIFY *
Enable extra dbuf verifications.
_
4 ZFS_DEBUG_DNODE_VERIFY *
Enable extra dnode verifications.
_
8 ZFS_DEBUG_SNAPNAMES
Enable snapshot name verification.
_
16 ZFS_DEBUG_MODIFY
Check for illegally modified ARC buffers.
_
32 ZFS_DEBUG_SPA
Enable spa_dbgmsg entries in the debug log.
_
64 ZFS_DEBUG_ZIO_FREE
Enable verification of block frees.
_
128 ZFS_DEBUG_HISTOGRAM_VERIFY
Enable extra spacemap histogram verifications.
_
256 ZFS_DEBUG_METASLAB_VERIFY
Verify space accounting on disk matches in-core range_trees.
_
512 ZFS_DEBUG_SET_ERROR
Enable SET_ERROR and dprintf entries in the debug log.
.TE
.sp
* Requires debug build.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_free_leak_on_eio\fR (int)
.ad
.RS 12n
If destroy encounters an EIO while reading metadata (e.g. indirect
blocks), space referenced by the missing metadata can not be freed.
Normally this causes the background destroy to become "stalled", as
it is unable to make forward progress. While in this stalled state,
all remaining space to free from the error-encountering filesystem is
"temporarily leaked". Set this flag to cause it to ignore the EIO,
permanently leak the space from indirect blocks that can not be read,
and continue to free everything else that it can.
The default, "stalling" behavior is useful if the storage partially
fails (i.e. some but not all i/os fail), and then later recovers. In
this case, we will be able to continue pool operations while it is
partially failed, and when it recovers, we can continue to free the
space, with no leaks. However, note that this case is actually
fairly rare.
Typically pools either (a) fail completely (but perhaps temporarily,
e.g. a top-level vdev going offline), or (b) have localized,
permanent errors (e.g. disk returns the wrong data due to bit flip or
firmware bug). In case (a), this setting does not matter because the
pool will be suspended and the sync thread will not be able to make
forward progress regardless. In case (b), because the error is
permanent, the best we can do is leak the minimum amount of space,
which is what setting this flag will do. Therefore, it is reasonable
for this flag to normally be set, but we chose the more conservative
approach of not setting it, so that there is no possibility of
leaking space in the "partial temporary" failure case.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_free_min_time_ms\fR (int)
.ad
.RS 12n
During a \fBzfs destroy\fR operation using \fBfeature@async_destroy\fR a minimum
of this much time will be spent working on freeing blocks per txg.
.sp
Default value: \fB1,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_immediate_write_sz\fR (long)
.ad
.RS 12n
Largest data block to write to zil. Larger blocks will be treated as if the
dataset being written to had the property setting \fBlogbias=throughput\fR.
.sp
Default value: \fB32,768\fR.
.RE
.sp
.ne 2
.na
\fBzfs_max_recordsize\fR (int)
.ad
.RS 12n
We currently support block sizes from 512 bytes to 16MB. The benefits of
larger blocks, and thus larger IO, need to be weighed against the cost of
COWing a giant block to modify one byte. Additionally, very large blocks
can have an impact on i/o latency, and also potentially on the memory
allocator. Therefore, we do not allow the recordsize to be set larger than
zfs_max_recordsize (default 1MB). Larger blocks can be created by changing
this tunable, and pools with larger blocks can always be imported and used,
regardless of this setting.
.sp
Default value: \fB1,048,576\fR.
.RE
.sp
.ne 2
.na
\fBzfs_metaslab_fragmentation_threshold\fR (int)
.ad
.RS 12n
Allow metaslabs to keep their active state as long as their fragmentation
percentage is less than or equal to this value. An active metaslab that
exceeds this threshold will no longer keep its active status allowing
better metaslabs to be selected.
.sp
Default value: \fB70\fR.
.RE
.sp
.ne 2
.na
\fBzfs_mg_fragmentation_threshold\fR (int)
.ad
.RS 12n
Metaslab groups are considered eligible for allocations if their
fragmentation metric (measured as a percentage) is less than or equal to
this value. If a metaslab group exceeds this threshold then it will be
skipped unless all metaslab groups within the metaslab class have also
crossed this threshold.
.sp
Default value: \fB85\fR.
.RE
.sp
.ne 2
.na
\fBzfs_mg_noalloc_threshold\fR (int)
.ad
.RS 12n
Defines a threshold at which metaslab groups should be eligible for
allocations. The value is expressed as a percentage of free space
beyond which a metaslab group is always eligible for allocations.
If a metaslab group's free space is less than or equal to the
threshold, the allocator will avoid allocating to that group
unless all groups in the pool have reached the threshold. Once all
groups have reached the threshold, all groups are allowed to accept
allocations. The default value of 0 disables the feature and causes
all metaslab groups to be eligible for allocations.
This parameter allows one to deal with pools having heavily imbalanced
vdevs such as would be the case when a new vdev has been added.
Setting the threshold to a non-zero percentage will stop allocations
from being made to vdevs that aren't filled to the specified percentage
and allow lesser filled vdevs to acquire more allocations than they
otherwise would under the old \fBzfs_mg_alloc_failures\fR facility.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_multihost_history\fR (int)
.ad
.RS 12n
Historical statistics for the last N multihost updates will be available in
\fB/proc/spl/kstat/zfs/<pool>/multihost\fR
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_multihost_interval\fR (ulong)
.ad
.RS 12n
Used to control the frequency of multihost writes which are performed when the
\fBmultihost\fR pool property is on. This is one factor used to determine
the length of the activity check during import.
.sp
The multihost write period is \fBzfs_multihost_interval / leaf-vdevs\fR milliseconds.
This means that on average a multihost write will be issued for each leaf vdev every
\fBzfs_multihost_interval\fR milliseconds. In practice, the observed period can
vary with the I/O load and this observed value is the delay which is stored in
the uberblock.
.sp
On import the activity check waits a minimum amount of time determined by
\fBzfs_multihost_interval * zfs_multihost_import_intervals\fR. The activity
check time may be further extended if the value of mmp delay found in the best
uberblock indicates actual multihost updates happened at longer intervals than
\fBzfs_multihost_interval\fR. A minimum value of \fB100ms\fR is enforced.
.sp
Default value: \fB1000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_multihost_import_intervals\fR (uint)
.ad
.RS 12n
Used to control the duration of the activity test on import. Smaller values of
\fBzfs_multihost_import_intervals\fR will reduce the import time but increase
the risk of failing to detect an active pool. The total activity check time is
never allowed to drop below one second. A value of 0 is ignored and treated as
if it was set to 1
.sp
Default value: \fB10\fR.
.RE
.sp
.ne 2
.na
\fBzfs_multihost_fail_intervals\fR (uint)
.ad
.RS 12n
Controls the behavior of the pool when multihost write failures are detected.
.sp
When \fBzfs_multihost_fail_intervals = 0\fR then multihost write failures are ignored.
The failures will still be reported to the ZED which depending on its
configuration may take action such as suspending the pool or offlining a device.
.sp
When \fBzfs_multihost_fail_intervals > 0\fR then sequential multihost write failures
will cause the pool to be suspended. This occurs when
\fBzfs_multihost_fail_intervals * zfs_multihost_interval\fR milliseconds have
passed since the last successful multihost write. This guarantees the activity test
will see multihost writes if the pool is imported.
.sp
Default value: \fB5\fR.
.RE
.sp
.ne 2
.na
\fBzfs_no_scrub_io\fR (int)
.ad
.RS 12n
Set for no scrub I/O. This results in scrubs not actually scrubbing data and
simply doing a metadata crawl of the pool instead.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_no_scrub_prefetch\fR (int)
.ad
.RS 12n
Set to disable block prefetching for scrubs.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_nocacheflush\fR (int)
.ad
.RS 12n
Disable cache flush operations on disks when writing. Beware, this may cause
corruption if disks re-order writes.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_nopwrite_enabled\fR (int)
.ad
.RS 12n
Enable NOP writes
.sp
Use \fB1\fR for yes (default) and \fB0\fR to disable.
.RE
.sp
.ne 2
.na
\fBzfs_dmu_offset_next_sync\fR (int)
.ad
.RS 12n
Enable forcing txg sync to find holes. When enabled forces ZFS to act
like prior versions when SEEK_HOLE or SEEK_DATA flags are used, which
when a dnode is dirty causes txg's to be synced so that this data can be
found.
.sp
Use \fB1\fR for yes and \fB0\fR to disable (default).
.RE
.sp
.ne 2
.na
\fBzfs_pd_bytes_max\fR (int)
.ad
.RS 12n
The number of bytes which should be prefetched during a pool traversal
(eg: \fBzfs send\fR or other data crawling operations)
.sp
Default value: \fB52,428,800\fR.
.RE
.sp
.ne 2
.na
\fBzfs_per_txg_dirty_frees_percent \fR (ulong)
.ad
.RS 12n
Tunable to control percentage of dirtied blocks from frees in one TXG.
After this threshold is crossed, additional dirty blocks from frees
wait until the next TXG.
A value of zero will disable this throttle.
.sp
Default value: \fB30\fR and \fB0\fR to disable.
.RE
.sp
.ne 2
.na
\fBzfs_prefetch_disable\fR (int)
.ad
.RS 12n
This tunable disables predictive prefetch. Note that it leaves "prescient"
prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
prescient prefetch never issues i/os that end up not being needed, so it
can't hurt performance.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_read_chunk_size\fR (long)
.ad
.RS 12n
Bytes to read per chunk
.sp
Default value: \fB1,048,576\fR.
.RE
.sp
.ne 2
.na
\fBzfs_read_history\fR (int)
.ad
.RS 12n
Historical statistics for the last N reads will be available in
\fB/proc/spl/kstat/zfs/<pool>/reads\fR
.sp
Default value: \fB0\fR (no data is kept).
.RE
.sp
.ne 2
.na
\fBzfs_read_history_hits\fR (int)
.ad
.RS 12n
Include cache hits in read history
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_recover\fR (int)
.ad
.RS 12n
Set to attempt to recover from fatal errors. This should only be used as a
last resort, as it typically results in leaked space, or worse.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_resilver_min_time_ms\fR (int)
.ad
.RS 12n
Resilvers are processed by the sync thread. While resilvering it will spend
at least this much time working on a resilver between txg flushes.
.sp
Default value: \fB3,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_scan_ignore_errors\fR (int)
.ad
.RS 12n
If set to a nonzero value, remove the DTL (dirty time list) upon
completion of a pool scan (scrub) even if there were unrepairable
errors. It is intended to be used during pool repair or recovery to
stop resilvering when the pool is next imported.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_scrub_min_time_ms\fR (int)
.ad
.RS 12n
Scrubs are processed by the sync thread. While scrubbing it will spend
at least this much time working on a scrub between txg flushes.
.sp
Default value: \fB1,000\fR.
.RE
.sp
.ne 2
.na
\fBzfs_scan_checkpoint_intval\fR (int)
.ad
.RS 12n
To preserve progress across reboots the sequential scan algorithm periodically
needs to stop metadata scanning and issue all the verifications I/Os to disk.
The frequency of this flushing is determined by the
\fBfBzfs_scan_checkpoint_intval\fR tunable.
.sp
Default value: \fB7200\fR seconds (every 2 hours).
.RE
.sp
.ne 2
.na
\fBzfs_scan_fill_weight\fR (int)
.ad
.RS 12n
This tunable affects how scrub and resilver I/O segments are ordered. A higher
number indicates that we care more about how filled in a segment is, while a
lower number indicates we care more about the size of the extent without
considering the gaps within a segment. This value is only tunable upon module
insertion. Changing the value afterwards will have no affect on scrub or
resilver performance.
.sp
Default value: \fB3\fR.
.RE
.sp
.ne 2
.na
\fBzfs_scan_issue_strategy\fR (int)
.ad
.RS 12n
Determines the order that data will be verified while scrubbing or resilvering.
If set to \fB1\fR, data will be verified as sequentially as possible, given the
amount of memory reserved for scrubbing (see \fBzfs_scan_mem_lim_fact\fR). This
may improve scrub performance if the pool's data is very fragmented. If set to
\fB2\fR, the largest mostly-contiguous chunk of found data will be verified
first. By deferring scrubbing of small segments, we may later find adjacent data
to coalesce and increase the segment size. If set to \fB0\fR, zfs will use
strategy \fB1\fR during normal verification and strategy \fB2\fR while taking a
checkpoint.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_scan_legacy\fR (int)
.ad
.RS 12n
A value of 0 indicates that scrubs and resilvers will gather metadata in
memory before issuing sequential I/O. A value of 1 indicates that the legacy
algorithm will be used where I/O is initiated as soon as it is discovered.
Changing this value to 0 will not affect scrubs or resilvers that are already
in progress.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_scan_max_ext_gap\fR (int)
.ad
.RS 12n
Indicates the largest gap in bytes between scrub / resilver I/Os that will still
be considered sequential for sorting purposes. Changing this value will not
affect scrubs or resilvers that are already in progress.
.sp
Default value: \fB2097152 (2 MB)\fR.
.RE
.sp
.ne 2
.na
\fBzfs_scan_mem_lim_fact\fR (int)
.ad
.RS 12n
Maximum fraction of RAM used for I/O sorting by sequential scan algorithm.
This tunable determines the hard limit for I/O sorting memory usage.
When the hard limit is reached we stop scanning metadata and start issuing
data verification I/O. This is done until we get below the soft limit.
.sp
Default value: \fB20\fR which is 5% of RAM (1/20).
.RE
.sp
.ne 2
.na
\fBzfs_scan_mem_lim_soft_fact\fR (int)
.ad
.RS 12n
The fraction of the hard limit used to determined the soft limit for I/O sorting
by the sequential scan algorithm. When we cross this limit from bellow no action
is taken. When we cross this limit from above it is because we are issuing
verification I/O. In this case (unless the metadata scan is done) we stop
issuing verification I/O and start scanning metadata again until we get to the
hard limit.
.sp
Default value: \fB20\fR which is 5% of the hard limit (1/20).
.RE
.sp
.ne 2
.na
\fBzfs_scan_vdev_limit\fR (int)
.ad
.RS 12n
Maximum amount of data that can be concurrently issued at once for scrubs and
resilvers per leaf device, given in bytes.
.sp
Default value: \fB41943040\fR.
.RE
.sp
.ne 2
.na
\fBzfs_send_corrupt_data\fR (int)
.ad
.RS 12n
Allow sending of corrupt data (ignore read/checksum errors when sending data)
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_send_queue_length\fR (int)
.ad
.RS 12n
The maximum number of bytes allowed in the \fBzfs send\fR queue. This value
must be at least twice the maximum block size in use.
.sp
Default value: \fB16,777,216\fR.
.RE
.sp
.ne 2
.na
\fBzfs_recv_queue_length\fR (int)
.ad
.RS 12n
.sp
The maximum number of bytes allowed in the \fBzfs receive\fR queue. This value
must be at least twice the maximum block size in use.
.sp
Default value: \fB16,777,216\fR.
.RE
.sp
.ne 2
.na
\fBzfs_sync_pass_deferred_free\fR (int)
.ad
.RS 12n
Flushing of data to disk is done in passes. Defer frees starting in this pass
.sp
Default value: \fB2\fR.
.RE
.sp
.ne 2
.na
\fBzfs_sync_pass_dont_compress\fR (int)
.ad
.RS 12n
Don't compress starting in this pass
.sp
Default value: \fB5\fR.
.RE
.sp
.ne 2
.na
\fBzfs_sync_pass_rewrite\fR (int)
.ad
.RS 12n
Rewrite new block pointers starting in this pass
.sp
Default value: \fB2\fR.
.RE
.sp
.ne 2
.na
\fBzfs_sync_taskq_batch_pct\fR (int)
.ad
.RS 12n
This controls the number of threads used by the dp_sync_taskq. The default
value of 75% will create a maximum of one thread per cpu.
.sp
Default value: \fB75\fR%.
.RE
.sp
.ne 2
.na
\fBzfs_txg_history\fR (int)
.ad
.RS 12n
Historical statistics for the last N txgs will be available in
\fB/proc/spl/kstat/zfs/<pool>/txgs\fR
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_txg_timeout\fR (int)
.ad
.RS 12n
Flush dirty data to disk at least every N seconds (maximum txg duration)
.sp
Default value: \fB5\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_aggregation_limit\fR (int)
.ad
.RS 12n
Max vdev I/O aggregation size
.sp
Default value: \fB131,072\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_cache_bshift\fR (int)
.ad
.RS 12n
Shift size to inflate reads too
.sp
Default value: \fB16\fR (effectively 65536).
.RE
.sp
.ne 2
.na
\fBzfs_vdev_cache_max\fR (int)
.ad
.RS 12n
Inflate reads smaller than this value to meet the \fBzfs_vdev_cache_bshift\fR
size (default 64k).
.sp
Default value: \fB16384\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_cache_size\fR (int)
.ad
.RS 12n
Total size of the per-disk cache in bytes.
.sp
Currently this feature is disabled as it has been found to not be helpful
for performance and in some cases harmful.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_mirror_rotating_inc\fR (int)
.ad
.RS 12n
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O immediately
follows its predecessor on rotational vdevs for the purpose of making decisions
based on load.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_mirror_rotating_seek_inc\fR (int)
.ad
.RS 12n
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O lacks
locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
this that are not immediately following the previous I/O are incremented by
half.
.sp
Default value: \fB5\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_mirror_rotating_seek_offset\fR (int)
.ad
.RS 12n
The maximum distance for the last queued I/O in which the balancing algorithm
considers an I/O to have locality.
See the section "ZFS I/O SCHEDULER".
.sp
Default value: \fB1048576\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_mirror_non_rotating_inc\fR (int)
.ad
.RS 12n
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member on non-rotational vdevs
when I/Os do not immediately follow one another.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_mirror_non_rotating_seek_inc\fR (int)
.ad
.RS 12n
A number by which the balancing algorithm increments the load calculation for
the purpose of selecting the least busy mirror member when an I/O lacks
locality as defined by the zfs_vdev_mirror_rotating_seek_offset. I/Os within
this that are not immediately following the previous I/O are incremented by
half.
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_read_gap_limit\fR (int)
.ad
.RS 12n
Aggregate read I/O operations if the gap on-disk between them is within this
threshold.
.sp
Default value: \fB32,768\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_scheduler\fR (charp)
.ad
.RS 12n
Set the Linux I/O scheduler on whole disk vdevs to this scheduler. Valid options
are noop, cfq, bfq & deadline
.sp
Default value: \fBnoop\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_write_gap_limit\fR (int)
.ad
.RS 12n
Aggregate write I/O over gap
.sp
Default value: \fB4,096\fR.
.RE
.sp
.ne 2
.na
\fBzfs_vdev_raidz_impl\fR (string)
.ad
.RS 12n
Parameter for selecting raidz parity implementation to use.
Options marked (always) below may be selected on module load as they are
supported on all systems.
The remaining options may only be set after the module is loaded, as they
are available only if the implementations are compiled in and supported
on the running system.
Once the module is loaded, the content of
/sys/module/zfs/parameters/zfs_vdev_raidz_impl will show available options
with the currently selected one enclosed in [].
Possible options are:
fastest - (always) implementation selected using built-in benchmark
original - (always) original raidz implementation
scalar - (always) scalar raidz implementation
sse2 - implementation using SSE2 instruction set (64bit x86 only)
ssse3 - implementation using SSSE3 instruction set (64bit x86 only)
avx2 - implementation using AVX2 instruction set (64bit x86 only)
avx512f - implementation using AVX512F instruction set (64bit x86 only)
avx512bw - implementation using AVX512F & AVX512BW instruction sets (64bit x86 only)
aarch64_neon - implementation using NEON (Aarch64/64 bit ARMv8 only)
aarch64_neonx2 - implementation using NEON with more unrolling (Aarch64/64 bit ARMv8 only)
.sp
Default value: \fBfastest\fR.
.RE
.sp
.ne 2
.na
\fBzfs_zevent_cols\fR (int)
.ad
.RS 12n
When zevents are logged to the console use this as the word wrap width.
.sp
Default value: \fB80\fR.
.RE
.sp
.ne 2
.na
\fBzfs_zevent_console\fR (int)
.ad
.RS 12n
Log events to the console
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzfs_zevent_len_max\fR (int)
.ad
.RS 12n
Max event queue length. A value of 0 will result in a calculated value which
increases with the number of CPUs in the system (minimum 64 events). Events
in the queue can be viewed with the \fBzpool events\fR command.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzfs_zil_clean_taskq_maxalloc\fR (int)
.ad
.RS 12n
The maximum number of taskq entries that are allowed to be cached. When this
limit is exceeded transaction records (itxs) will be cleaned synchronously.
.sp
Default value: \fB1048576\fR.
.RE
.sp
.ne 2
.na
\fBzfs_zil_clean_taskq_minalloc\fR (int)
.ad
.RS 12n
The number of taskq entries that are pre-populated when the taskq is first
created and are immediately available for use.
.sp
Default value: \fB1024\fR.
.RE
.sp
.ne 2
.na
\fBzfs_zil_clean_taskq_nthr_pct\fR (int)
.ad
.RS 12n
This controls the number of threads used by the dp_zil_clean_taskq. The default
value of 100% will create a maximum of one thread per cpu.
.sp
Default value: \fB100\fR%.
.RE
.sp
.ne 2
.na
\fBzil_replay_disable\fR (int)
.ad
.RS 12n
Disable intent logging replay. Can be disabled for recovery from corrupted
ZIL
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzil_slog_bulk\fR (ulong)
.ad
.RS 12n
Limit SLOG write size per commit executed with synchronous priority.
Any writes above that will be executed with lower (asynchronous) priority
to limit potential SLOG device abuse by single active ZIL writer.
.sp
Default value: \fB786,432\fR.
.RE
.sp
.ne 2
.na
\fBzio_delay_max\fR (int)
.ad
.RS 12n
A zevent will be logged if a ZIO operation takes more than N milliseconds to
complete. Note that this is only a logging facility, not a timeout on
operations.
.sp
Default value: \fB30,000\fR.
.RE
.sp
.ne 2
.na
\fBzio_dva_throttle_enabled\fR (int)
.ad
.RS 12n
Throttle block allocations in the ZIO pipeline. This allows for
dynamic allocation distribution when devices are imbalanced.
When enabled, the maximum number of pending allocations per top-level vdev
is limited by \fBzfs_vdev_queue_depth_pct\fR.
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBzio_requeue_io_start_cut_in_line\fR (int)
.ad
.RS 12n
Prioritize requeued I/O
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzio_taskq_batch_pct\fR (uint)
.ad
.RS 12n
Percentage of online CPUs (or CPU cores, etc) which will run a worker thread
for IO. These workers are responsible for IO work such as compression and
checksum calculations. Fractional number of CPUs will be rounded down.
.sp
The default value of 75 was chosen to avoid using all CPUs which can result in
latency issues and inconsistent application performance, especially when high
compression is enabled.
.sp
Default value: \fB75\fR.
.RE
.sp
.ne 2
.na
\fBzvol_inhibit_dev\fR (uint)
.ad
.RS 12n
Do not create zvol device nodes. This may slightly improve startup time on
systems with a very large number of zvols.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.sp
.ne 2
.na
\fBzvol_major\fR (uint)
.ad
.RS 12n
Major number for zvol block devices
.sp
Default value: \fB230\fR.
.RE
.sp
.ne 2
.na
\fBzvol_max_discard_blocks\fR (ulong)
.ad
.RS 12n
Discard (aka TRIM) operations done on zvols will be done in batches of this
many blocks, where block size is determined by the \fBvolblocksize\fR property
of a zvol.
.sp
Default value: \fB16,384\fR.
.RE
.sp
.ne 2
.na
\fBzvol_prefetch_bytes\fR (uint)
.ad
.RS 12n
When adding a zvol to the system prefetch \fBzvol_prefetch_bytes\fR
from the start and end of the volume. Prefetching these regions
of the volume is desirable because they are likely to be accessed
immediately by \fBblkid(8)\fR or by the kernel scanning for a partition
table.
.sp
Default value: \fB131,072\fR.
.RE
.sp
.ne 2
.na
\fBzvol_request_sync\fR (uint)
.ad
.RS 12n
When processing I/O requests for a zvol submit them synchronously. This
effectively limits the queue depth to 1 for each I/O submitter. When set
to 0 requests are handled asynchronously by a thread pool. The number of
requests which can be handled concurrently is controller by \fBzvol_threads\fR.
.sp
Default value: \fB0\fR.
.RE
.sp
.ne 2
.na
\fBzvol_threads\fR (uint)
.ad
.RS 12n
Max number of threads which can handle zvol I/O requests concurrently.
.sp
Default value: \fB32\fR.
.RE
.sp
.ne 2
.na
\fBzvol_volmode\fR (uint)
.ad
.RS 12n
Defines zvol block devices behaviour when \fBvolmode\fR is set to \fBdefault\fR.
Valid values are \fB1\fR (full), \fB2\fR (dev) and \fB3\fR (none).
.sp
Default value: \fB1\fR.
.RE
.sp
.ne 2
.na
\fBzfs_qat_disable\fR (int)
.ad
.RS 12n
This tunable disables qat hardware acceleration for gzip compression and.
AES-GCM encryption. It is available only if qat acceleration is compiled in
and the qat driver is present.
.sp
Use \fB1\fR for yes and \fB0\fR for no (default).
.RE
.SH ZFS I/O SCHEDULER
ZFS issues I/O operations to leaf vdevs to satisfy and complete I/Os.
The I/O scheduler determines when and in what order those operations are
issued. The I/O scheduler divides operations into five I/O classes
prioritized in the following order: sync read, sync write, async read,
async write, and scrub/resilver. Each queue defines the minimum and
maximum number of concurrent operations that may be issued to the
device. In addition, the device has an aggregate maximum,
\fBzfs_vdev_max_active\fR. Note that the sum of the per-queue minimums
must not exceed the aggregate maximum. If the sum of the per-queue
maximums exceeds the aggregate maximum, then the number of active I/Os
may reach \fBzfs_vdev_max_active\fR, in which case no further I/Os will
be issued regardless of whether all per-queue minimums have been met.
.sp
For many physical devices, throughput increases with the number of
concurrent operations, but latency typically suffers. Further, physical
devices typically have a limit at which more concurrent operations have no
effect on throughput or can actually cause it to decrease.
.sp
The scheduler selects the next operation to issue by first looking for an
I/O class whose minimum has not been satisfied. Once all are satisfied and
the aggregate maximum has not been hit, the scheduler looks for classes
whose maximum has not been satisfied. Iteration through the I/O classes is
done in the order specified above. No further operations are issued if the
aggregate maximum number of concurrent operations has been hit or if there
are no operations queued for an I/O class that has not hit its maximum.
Every time an I/O is queued or an operation completes, the I/O scheduler
looks for new operations to issue.
.sp
In general, smaller max_active's will lead to lower latency of synchronous
operations. Larger max_active's may lead to higher overall throughput,
depending on underlying storage.
.sp
The ratio of the queues' max_actives determines the balance of performance
between reads, writes, and scrubs. E.g., increasing
\fBzfs_vdev_scrub_max_active\fR will cause the scrub or resilver to complete
more quickly, but reads and writes to have higher latency and lower throughput.
.sp
All I/O classes have a fixed maximum number of outstanding operations
except for the async write class. Asynchronous writes represent the data
that is committed to stable storage during the syncing stage for
transaction groups. Transaction groups enter the syncing state
periodically so the number of queued async writes will quickly burst up
and then bleed down to zero. Rather than servicing them as quickly as
possible, the I/O scheduler changes the maximum number of active async
write I/Os according to the amount of dirty data in the pool. Since
both throughput and latency typically increase with the number of
concurrent operations issued to physical devices, reducing the
burstiness in the number of concurrent operations also stabilizes the
response time of operations from other -- and in particular synchronous
-- queues. In broad strokes, the I/O scheduler will issue more
concurrent operations from the async write queue as there's more dirty
data in the pool.
.sp
Async Writes
.sp
The number of concurrent operations issued for the async write I/O class
follows a piece-wise linear function defined by a few adjustable points.
.nf
| o---------| <-- zfs_vdev_async_write_max_active
^ | /^ |
| | / | |
active | / | |
I/O | / | |
count | / | |
| / | |
|-------o | | <-- zfs_vdev_async_write_min_active
0|_______^______|_________|
0% | | 100% of zfs_dirty_data_max
| |
| `-- zfs_vdev_async_write_active_max_dirty_percent
`--------- zfs_vdev_async_write_active_min_dirty_percent
.fi
Until the amount of dirty data exceeds a minimum percentage of the dirty
data allowed in the pool, the I/O scheduler will limit the number of
concurrent operations to the minimum. As that threshold is crossed, the
number of concurrent operations issued increases linearly to the maximum at
the specified maximum percentage of the dirty data allowed in the pool.
.sp
Ideally, the amount of dirty data on a busy pool will stay in the sloped
part of the function between \fBzfs_vdev_async_write_active_min_dirty_percent\fR
and \fBzfs_vdev_async_write_active_max_dirty_percent\fR. If it exceeds the
maximum percentage, this indicates that the rate of incoming data is
greater than the rate that the backend storage can handle. In this case, we
must further throttle incoming writes, as described in the next section.
.SH ZFS TRANSACTION DELAY
We delay transactions when we've determined that the backend storage
isn't able to accommodate the rate of incoming writes.
.sp
If there is already a transaction waiting, we delay relative to when
that transaction will finish waiting. This way the calculated delay time
is independent of the number of threads concurrently executing
transactions.
.sp
If we are the only waiter, wait relative to when the transaction
started, rather than the current time. This credits the transaction for
"time already served", e.g. reading indirect blocks.
.sp
The minimum time for a transaction to take is calculated as:
.nf
min_time = zfs_delay_scale * (dirty - min) / (max - dirty)
min_time is then capped at 100 milliseconds.
.fi
.sp
The delay has two degrees of freedom that can be adjusted via tunables. The
percentage of dirty data at which we start to delay is defined by
\fBzfs_delay_min_dirty_percent\fR. This should typically be at or above
\fBzfs_vdev_async_write_active_max_dirty_percent\fR so that we only start to
delay after writing at full speed has failed to keep up with the incoming write
rate. The scale of the curve is defined by \fBzfs_delay_scale\fR. Roughly speaking,
this variable determines the amount of delay at the midpoint of the curve.
.sp
.nf
delay
10ms +-------------------------------------------------------------*+
| *|
9ms + *+
| *|
8ms + *+
| * |
7ms + * +
| * |
6ms + * +
| * |
5ms + * +
| * |
4ms + * +
| * |
3ms + * +
| * |
2ms + (midpoint) * +
| | ** |
1ms + v *** +
| zfs_delay_scale ----------> ******** |
0 +-------------------------------------*********----------------+
0% <- zfs_dirty_data_max -> 100%
.fi
.sp
Note that since the delay is added to the outstanding time remaining on the
most recent transaction, the delay is effectively the inverse of IOPS.
Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
was chosen such that small changes in the amount of accumulated dirty data
in the first 3/4 of the curve yield relatively small differences in the
amount of delay.
.sp
The effects can be easier to understand when the amount of delay is
represented on a log scale:
.sp
.nf
delay
100ms +-------------------------------------------------------------++
+ +
| |
+ *+
10ms + *+
+ ** +
| (midpoint) ** |
+ | ** +
1ms + v **** +
+ zfs_delay_scale ----------> ***** +
| **** |
+ **** +
100us + ** +
+ * +
| * |
+ * +
10us + * +
+ +
| |
+ +
+--------------------------------------------------------------+
0% <- zfs_dirty_data_max -> 100%
.fi
.sp
Note here that only as the amount of dirty data approaches its limit does
the delay start to increase rapidly. The goal of a properly tuned system
should be to keep the amount of dirty data out of that range by first
ensuring that the appropriate limits are set for the I/O scheduler to reach
optimal throughput on the backend storage, and then by changing the value
of \fBzfs_delay_scale\fR to increase the steepness of the curve.
diff --git a/man/man5/zpool-features.5 b/man/man5/zpool-features.5
index 523fd1fd09cc..ce34a05a22fa 100644
--- a/man/man5/zpool-features.5
+++ b/man/man5/zpool-features.5
@@ -1,678 +1,720 @@
'\" te
-.\" Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+.\" Copyright (c) 2013, 2016 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" The contents of this file are subject to the terms of the Common Development
.\" and Distribution License (the "License"). You may not use this file except
.\" in compliance with the License. You can obtain a copy of the license at
.\" usr/src/OPENSOLARIS.LICENSE or http://www.opensolaris.org/os/licensing.
.\"
.\" See the License for the specific language governing permissions and
.\" limitations under the License. When distributing Covered Code, include this
.\" CDDL HEADER in each file and include the License file at
.\" usr/src/OPENSOLARIS.LICENSE. If applicable, add the following below this
.\" CDDL HEADER, with the fields enclosed by brackets "[]" replaced with your
.\" own identifying information:
.\" Portions Copyright [yyyy] [name of copyright owner]
.TH ZPOOL-FEATURES 5 "Aug 27, 2013"
.SH NAME
zpool\-features \- ZFS pool feature descriptions
.SH DESCRIPTION
.sp
.LP
ZFS pool on\-disk format versions are specified via "features" which replace
the old on\-disk format numbers (the last supported on\-disk format number is
28). To enable a feature on a pool use the \fBupgrade\fR subcommand of the
\fBzpool\fR(8) command, or set the \fBfeature@\fR\fIfeature_name\fR property
to \fBenabled\fR.
.sp
.LP
The pool format does not affect file system version compatibility or the ability
to send file systems between pools.
.sp
.LP
Since most features can be enabled independently of each other the on\-disk
format of the pool is specified by the set of all features marked as
\fBactive\fR on the pool. If the pool was created by another software version
this set may include unsupported features.
.SS "Identifying features"
.sp
.LP
Every feature has a guid of the form \fIcom.example:feature_name\fR. The reverse
DNS name ensures that the feature's guid is unique across all ZFS
implementations. When unsupported features are encountered on a pool they will
be identified by their guids. Refer to the documentation for the ZFS
implementation that created the pool for information about those features.
.sp
.LP
Each supported feature also has a short name. By convention a feature's short
name is the portion of its guid which follows the ':' (e.g.
\fIcom.example:feature_name\fR would have the short name \fIfeature_name\fR),
however a feature's short name may differ across ZFS implementations if
following the convention would result in name conflicts.
.SS "Feature states"
.sp
.LP
Features can be in one of three states:
.sp
.ne 2
.na
\fB\fBactive\fR\fR
.ad
.RS 12n
This feature's on\-disk format changes are in effect on the pool. Support for
this feature is required to import the pool in read\-write mode. If this
feature is not read-only compatible, support is also required to import the pool
in read\-only mode (see "Read\-only compatibility").
.RE
.sp
.ne 2
.na
\fB\fBenabled\fR\fR
.ad
.RS 12n
An administrator has marked this feature as enabled on the pool, but the
feature's on\-disk format changes have not been made yet. The pool can still be
imported by software that does not support this feature, but changes may be made
to the on\-disk format at any time which will move the feature to the
\fBactive\fR state. Some features may support returning to the \fBenabled\fR
state after becoming \fBactive\fR. See feature\-specific documentation for
details.
.RE
.sp
.ne 2
.na
\fBdisabled\fR
.ad
.RS 12n
This feature's on\-disk format changes have not been made and will not be made
unless an administrator moves the feature to the \fBenabled\fR state. Features
cannot be disabled once they have been enabled.
.RE
.sp
.LP
The state of supported features is exposed through pool properties of the form
\fIfeature@short_name\fR.
.SS "Read\-only compatibility"
.sp
.LP
Some features may make on\-disk format changes that do not interfere with other
software's ability to read from the pool. These features are referred to as
"read\-only compatible". If all unsupported features on a pool are read\-only
compatible, the pool can be imported in read\-only mode by setting the
\fBreadonly\fR property during import (see \fBzpool\fR(8) for details on
importing pools).
.SS "Unsupported features"
.sp
.LP
For each unsupported feature enabled on an imported pool a pool property
named \fIunsupported@feature_guid\fR will indicate why the import was allowed
despite the unsupported feature. Possible values for this property are:
.sp
.ne 2
.na
\fB\fBinactive\fR\fR
.ad
.RS 12n
The feature is in the \fBenabled\fR state and therefore the pool's on\-disk
format is still compatible with software that does not support this feature.
.RE
.sp
.ne 2
.na
\fB\fBreadonly\fR\fR
.ad
.RS 12n
The feature is read\-only compatible and the pool has been imported in
read\-only mode.
.RE
.SS "Feature dependencies"
.sp
.LP
Some features depend on other features being enabled in order to function
properly. Enabling a feature will automatically enable any features it
depends on.
.SH FEATURES
.sp
.LP
The following features are supported on this system:
.sp
.ne 2
.na
\fB\fBasync_destroy\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:async_destroy
READ\-ONLY COMPATIBLE yes
DEPENDENCIES none
.TE
Destroying a file system requires traversing all of its data in order to
return its used space to the pool. Without \fBasync_destroy\fR the file system
is not fully removed until all space has been reclaimed. If the destroy
operation is interrupted by a reboot or power outage the next attempt to open
the pool will need to complete the destroy operation synchronously.
When \fBasync_destroy\fR is enabled the file system's data will be reclaimed
by a background process, allowing the destroy operation to complete without
traversing the entire file system. The background process is able to resume
interrupted destroys after the pool has been opened, eliminating the need
to finish interrupted destroys as part of the open operation. The amount
of space remaining to be reclaimed by the background process is available
through the \fBfreeing\fR property.
This feature is only \fBactive\fR while \fBfreeing\fR is non\-zero.
.RE
.sp
.ne 2
.na
\fB\fBempty_bpobj\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:empty_bpobj
READ\-ONLY COMPATIBLE yes
DEPENDENCIES none
.TE
This feature increases the performance of creating and using a large
number of snapshots of a single filesystem or volume, and also reduces
the disk space required.
When there are many snapshots, each snapshot uses many Block Pointer
Objects (bpobj's) to track blocks associated with that snapshot.
However, in common use cases, most of these bpobj's are empty. This
feature allows us to create each bpobj on-demand, thus eliminating the
empty bpobjs.
This feature is \fBactive\fR while there are any filesystems, volumes,
or snapshots which were created after enabling this feature.
.RE
.sp
.ne 2
.na
\fB\fBfilesystem_limits\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.joyent:filesystem_limits
READ\-ONLY COMPATIBLE yes
DEPENDENCIES extensible_dataset
.TE
This feature enables filesystem and snapshot limits. These limits can be used
to control how many filesystems and/or snapshots can be created at the point in
the tree on which the limits are set.
This feature is \fBactive\fR once either of the limit properties has been
set on a dataset. Once activated the feature is never deactivated.
.RE
.sp
.ne 2
.na
\fB\fBlz4_compress\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.illumos:lz4_compress
READ\-ONLY COMPATIBLE no
DEPENDENCIES none
.TE
\fBlz4\fR is a high-performance real-time compression algorithm that
features significantly faster compression and decompression as well as a
higher compression ratio than the older \fBlzjb\fR compression.
Typically, \fBlz4\fR compression is approximately 50% faster on
compressible data and 200% faster on incompressible data than
\fBlzjb\fR. It is also approximately 80% faster on decompression, while
giving approximately 10% better compression ratio.
When the \fBlz4_compress\fR feature is set to \fBenabled\fR, the
administrator can turn on \fBlz4\fR compression on any dataset on the
pool using the \fBzfs\fR(8) command. Please note that doing so will
immediately activate the \fBlz4_compress\fR feature on the underlying
pool using the \fBzfs\fR(1M) command. Also, all newly written metadata
will be compressed with \fBlz4\fR algorithm. Since this feature is not
read-only compatible, this operation will render the pool unimportable
on systems without support for the \fBlz4_compress\fR feature. Booting
off of \fBlz4\fR-compressed root pools is supported.
This feature becomes \fBactive\fR as soon as it is enabled and will
never return to being \fBenabled\fB.
.RE
.sp
.ne 2
.na
\fB\fBspacemap_histogram\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:spacemap_histogram
READ\-ONLY COMPATIBLE yes
DEPENDENCIES none
.TE
This features allows ZFS to maintain more information about how free space
is organized within the pool. If this feature is \fBenabled\fR, ZFS will
set this feature to \fBactive\fR when a new space map object is created or
an existing space map is upgraded to the new format. Once the feature is
\fBactive\fR, it will remain in that state until the pool is destroyed.
.RE
.sp
.ne 2
.na
\fB\fBmulti_vdev_crash_dump\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.joyent:multi_vdev_crash_dump
READ\-ONLY COMPATIBLE no
DEPENDENCIES none
.TE
This feature allows a dump device to be configured with a pool comprised
of multiple vdevs. Those vdevs may be arranged in any mirrored or raidz
configuration.
When the \fBmulti_vdev_crash_dump\fR feature is set to \fBenabled\fR,
the administrator can use the \fBdumpadm\fR(1M) command to configure a
dump device on a pool comprised of multiple vdevs.
Under Linux this feature is registered for compatibility but not used.
New pools created under Linux will have the feature \fBenabled\fR but
will never transition to \fB\fBactive\fR. This functionality is not
required in order to support crash dumps under Linux. Existing pools
where this feature is \fB\fBactive\fR can be imported.
.RE
.sp
.ne 2
.na
\fB\fBextensible_dataset\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:extensible_dataset
READ\-ONLY COMPATIBLE no
DEPENDENCIES none
.TE
This feature allows more flexible use of internal ZFS data structures,
and exists for other features to depend on.
This feature will be \fBactive\fR when the first dependent feature uses it,
and will be returned to the \fBenabled\fR state when all datasets that use
this feature are destroyed.
.RE
.sp
.ne 2
.na
\fB\fBbookmarks\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:bookmarks
READ\-ONLY COMPATIBLE yes
DEPENDENCIES extensible_dataset
.TE
This feature enables use of the \fBzfs bookmark\fR subcommand.
This feature is \fBactive\fR while any bookmarks exist in the pool.
All bookmarks in the pool can be listed by running
\fBzfs list -t bookmark -r \fIpoolname\fR\fR.
.RE
.sp
.ne 2
.na
\fB\fBenabled_txg\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:enabled_txg
READ\-ONLY COMPATIBLE yes
DEPENDENCIES none
.TE
Once this feature is enabled ZFS records the transaction group number
in which new features are enabled. This has no user-visible impact,
but other features may depend on this feature.
This feature becomes \fBactive\fR as soon as it is enabled and will
never return to being \fBenabled\fB.
.RE
.sp
.ne 2
.na
\fB\fBhole_birth\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:hole_birth
READ\-ONLY COMPATIBLE no
DEPENDENCIES enabled_txg
.TE
This feature improves performance of incremental sends ("zfs send -i")
and receives for objects with many holes. The most common case of
hole-filled objects is zvols.
An incremental send stream from snapshot \fBA\fR to snapshot \fBB\fR
contains information about every block that changed between \fBA\fR and
\fBB\fR. Blocks which did not change between those snapshots can be
identified and omitted from the stream using a piece of metadata called
the 'block birth time', but birth times are not recorded for holes (blocks
filled only with zeroes). Since holes created after \fBA\fR cannot be
distinguished from holes created before \fBA\fR, information about every
hole in the entire filesystem or zvol is included in the send stream.
For workloads where holes are rare this is not a problem. However, when
incrementally replicating filesystems or zvols with many holes (for
example a zvol formatted with another filesystem) a lot of time will
be spent sending and receiving unnecessary information about holes that
already exist on the receiving side.
Once the \fBhole_birth\fR feature has been enabled the block birth times
of all new holes will be recorded. Incremental sends between snapshots
created after this feature is enabled will use this new metadata to avoid
sending information about holes that already exist on the receiving side.
This feature becomes \fBactive\fR as soon as it is enabled and will
never return to being \fBenabled\fB.
.RE
.sp
.ne 2
.na
\fB\fBembedded_data\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.delphix:embedded_data
READ\-ONLY COMPATIBLE no
DEPENDENCIES none
.TE
This feature improves the performance and compression ratio of
highly-compressible blocks. Blocks whose contents can compress to 112 bytes
or smaller can take advantage of this feature.
When this feature is enabled, the contents of highly-compressible blocks are
stored in the block "pointer" itself (a misnomer in this case, as it contains
the compressed data, rather than a pointer to its location on disk). Thus
the space of the block (one sector, typically 512 bytes or 4KB) is saved,
and no additional i/o is needed to read and write the data block.
This feature becomes \fBactive\fR as soon as it is enabled and will
never return to being \fBenabled\fR.
.RE
+.sp
+.ne 2
+.na
+\fB\fBdevice_removal\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:device_removal
+READ\-ONLY COMPATIBLE no
+DEPENDENCIES none
+.TE
+
+This feature enables the "zpool remove" subcommand to remove top-level
+vdevs, evacuating them to reduce the total size of the pool.
+
+This feature becomes \fBactive\fR when the "zpool remove" command is used
+on a top-level vdev, and will never return to being \fBenabled\fR.
+
+.RE
+.sp
+.ne 2
+.na
+\fB\fBobsolete_counts\fR\fR
+.ad
+.RS 4n
+.TS
+l l .
+GUID com.delphix:obsolete_counts
+READ\-ONLY COMPATIBLE yes
+DEPENDENCIES device_removal
+.TE
+This feature is an enhancement of device_removal, which will over time
+reduce the memory used to track removed devices. When indirect blocks
+are freed or remapped, we note that their part of the indirect mapping
+is "obsolete", i.e. no longer needed. See also the \fBzfs remap\fR
+subcommand in \fBzfs\fR(1M).
+
+This feature becomes \fBactive\fR when the "zpool remove" command is
+used on a top-level vdev, and will never return to being \fBenabled\fR.
+
+.RE
.sp
.ne 2
.na
\fB\fBlarge_blocks\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.open-zfs:large_block
READ\-ONLY COMPATIBLE no
DEPENDENCIES extensible_dataset
.TE
The \fBlarge_block\fR feature allows the record size on a dataset to be
set larger than 128KB.
This feature becomes \fBactive\fR once a dataset contains a file with
a block size larger than 128KB, and will return to being \fBenabled\fR once all
filesystems that have ever had their recordsize larger than 128KB are destroyed.
.RE
.sp
.ne 2
.na
\fB\fBlarge_dnode\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.zfsonlinux:large_dnode
READ\-ONLY COMPATIBLE no
DEPENDENCIES extensible_dataset
.TE
The \fBlarge_dnode\fR feature allows the size of dnodes in a dataset to be
set larger than 512B.
This feature becomes \fBactive\fR once a dataset contains an object with
a dnode larger than 512B, which occurs as a result of setting the
\fBdnodesize\fR dataset property to a value other than \fBlegacy\fR. The
feature will return to being \fBenabled\fR once all filesystems that
have ever contained a dnode larger than 512B are destroyed. Large dnodes
allow more data to be stored in the bonus buffer, thus potentially
improving performance by avoiding the use of spill blocks.
.RE
\fB\fBsha512\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.illumos:sha512
READ\-ONLY COMPATIBLE no
DEPENDENCIES extensible_dataset
.TE
This feature enables the use of the SHA-512/256 truncated hash algorithm
(FIPS 180-4) for checksum and dedup. The native 64-bit arithmetic of
SHA-512 provides an approximate 50% performance boost over SHA-256 on
64-bit hardware and is thus a good minimum-change replacement candidate
for systems where hash performance is important, but these systems
cannot for whatever reason utilize the faster \fBskein\fR and
\fBedonr\fR algorithms.
When the \fBsha512\fR feature is set to \fBenabled\fR, the administrator
can turn on the \fBsha512\fR checksum on any dataset using the
\fBzfs set checksum=sha512\fR(1M) command. This feature becomes
\fBactive\fR once a \fBchecksum\fR property has been set to \fBsha512\fR,
and will return to being \fBenabled\fR once all filesystems that have
ever had their checksum set to \fBsha512\fR are destroyed.
Booting off of pools utilizing SHA-512/256 is supported (provided that
the updated GRUB stage2 module is installed).
.RE
.sp
.ne 2
.na
\fB\fBskein\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.illumos:skein
READ\-ONLY COMPATIBLE no
DEPENDENCIES extensible_dataset
.TE
This feature enables the use of the Skein hash algorithm for checksum
and dedup. Skein is a high-performance secure hash algorithm that was a
finalist in the NIST SHA-3 competition. It provides a very high security
margin and high performance on 64-bit hardware (80% faster than
SHA-256). This implementation also utilizes the new salted checksumming
functionality in ZFS, which means that the checksum is pre-seeded with a
secret 256-bit random key (stored on the pool) before being fed the data
block to be checksummed. Thus the produced checksums are unique to a
given pool, preventing hash collision attacks on systems with dedup.
When the \fBskein\fR feature is set to \fBenabled\fR, the administrator
can turn on the \fBskein\fR checksum on any dataset using the
\fBzfs set checksum=skein\fR(1M) command. This feature becomes
\fBactive\fR once a \fBchecksum\fR property has been set to \fBskein\fR,
and will return to being \fBenabled\fR once all filesystems that have
ever had their checksum set to \fBskein\fR are destroyed.
Booting off of pools using \fBskein\fR is \fBNOT\fR supported
-- any attempt to enable \fBskein\fR on a root pool will fail with an
error.
.RE
.sp
.ne 2
.na
\fB\fBedonr\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.illumos:edonr
READ\-ONLY COMPATIBLE no
DEPENDENCIES extensible_dataset
.TE
This feature enables the use of the Edon-R hash algorithm for checksum,
including for nopwrite (if compression is also enabled, an overwrite of
a block whose checksum matches the data being written will be ignored).
In an abundance of caution, Edon-R can not be used with dedup
(without verification).
Edon-R is a very high-performance hash algorithm that was part
of the NIST SHA-3 competition. It provides extremely high hash
performance (over 350% faster than SHA-256), but was not selected
because of its unsuitability as a general purpose secure hash algorithm.
This implementation utilizes the new salted checksumming functionality
in ZFS, which means that the checksum is pre-seeded with a secret
256-bit random key (stored on the pool) before being fed the data block
to be checksummed. Thus the produced checksums are unique to a given
pool.
When the \fBedonr\fR feature is set to \fBenabled\fR, the administrator
can turn on the \fBedonr\fR checksum on any dataset using the
\fBzfs set checksum=edonr\fR(1M) command. This feature becomes
\fBactive\fR once a \fBchecksum\fR property has been set to \fBedonr\fR,
and will return to being \fBenabled\fR once all filesystems that have
ever had their checksum set to \fBedonr\fR are destroyed.
Booting off of pools using \fBedonr\fR is \fBNOT\fR supported
-- any attempt to enable \fBedonr\fR on a root pool will fail with an
error.
.RE
.sp
.ne 2
.na
\fB\fBuserobj_accounting\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.zfsonlinux:userobj_accounting
READ\-ONLY COMPATIBLE yes
DEPENDENCIES extensible_dataset
.TE
This feature allows administrators to account the object usage information
by user and group.
This feature becomes \fBactive\fR as soon as it is enabled and will never
return to being \fBenabled\fR. Each filesystem will be upgraded automatically
when remounted, or when new files are created under that filesystem.
The upgrade can also be started manually on filesystems by running
`zfs set version=current <pool/fs>`. The upgrade process runs in the background
and may take a while to complete for filesystems containing a large number of
files.
.RE
.sp
.ne 2
.na
\fB\fBencryption\fR\fR
.ad
.RS 4n
.TS
l l .
GUID com.datto:encryption
READ\-ONLY COMPATIBLE no
DEPENDENCIES extensible_dataset
.TE
This feature enables the creation and management of natively encrypted datasets.
This feature becomes \fBactive\fR when an encrypted dataset is created and will
be returned to the \fBenabled\fR state when all datasets that use this feature
are destroyed.
.RE
.sp
.ne 2
.na
\fB\fBproject_quota\fR\fR
.ad
.RS 4n
.TS
l l .
GUID org.zfsonlinux:project_quota
READ\-ONLY COMPATIBLE yes
DEPENDENCIES extensible_dataset
.TE
This feature allows administrators to account the spaces and objects usage
information against the project identifier (ID).
The project ID is new object-based attribute. When upgrading an existing
filesystem, object without project ID attribute will be assigned a zero
project ID. After this feature is enabled, newly created object will inherit
its parent directory's project ID if the parent inherit flag is set (via
\fBchattr +/-P\fR or \fBzfs project [-s|-C]\fR). Otherwise, the new object's
project ID will be set as zero. An object's project ID can be changed at
anytime by the owner (or privileged user) via \fBchattr -p $prjid\fR or
\fBzfs project -p $prjid\fR.
This feature will become \fBactive\fR as soon as it is enabled and will never
return to being \fBdisabled\fR. Each filesystem will be upgraded automatically
when remounted or when new file is created under that filesystem. The upgrade
can also be triggered on filesystems via `zfs set version=current <pool/fs>`.
The upgrade process runs in the background and may take a while to complete
for the filesystems containing a large number of files.
.RE
.SH "SEE ALSO"
\fBzpool\fR(8)
diff --git a/man/man8/zfs.8 b/man/man8/zfs.8
index a7edc8bccbf9..db09028b214a 100644
--- a/man/man8/zfs.8
+++ b/man/man8/zfs.8
@@ -1,4944 +1,4957 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\"
.\" Copyright (c) 2009 Sun Microsystems, Inc. All Rights Reserved.
.\" Copyright 2011 Joshua M. Clulow <josh@sysmgr.org>
.\" Copyright (c) 2011, 2017 by Delphix. All rights reserved.
.\" Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
.\" Copyright (c) 2014, Joyent, Inc. All rights reserved.
.\" Copyright (c) 2014 by Adam Stevko. All rights reserved.
.\" Copyright (c) 2014 Integros [integros.com]
.\" Copyright 2016 Richard Laager. All rights reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright 2018 Joyent, Inc.
.\"
.Dd January 10, 2018
.Dt ZFS 8 SMM
.Os Linux
.Sh NAME
.Nm zfs
.Nd configures ZFS file systems
.Sh SYNOPSIS
.Nm
.Fl ?
.Nm
.Cm create
.Op Fl p
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Ar filesystem
.Nm
.Cm create
.Op Fl ps
.Op Fl b Ar blocksize
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Fl V Ar size Ar volume
.Nm
.Cm destroy
.Op Fl Rfnprv
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm destroy
.Op Fl Rdnprv
.Ar filesystem Ns | Ns Ar volume Ns @ Ns Ar snap Ns
.Oo % Ns Ar snap Ns Oo , Ns Ar snap Ns Oo % Ns Ar snap Oc Oc Oc Ns ...
.Nm
.Cm destroy
.Ar filesystem Ns | Ns Ar volume Ns # Ns Ar bookmark
.Nm
.Cm snapshot
.Op Fl r
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Ar filesystem Ns @ Ns Ar snapname Ns | Ns Ar volume Ns @ Ns Ar snapname Ns ...
.Nm
.Cm rollback
.Op Fl Rfr
.Ar snapshot
.Nm
.Cm clone
.Op Fl p
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Ar snapshot Ar filesystem Ns | Ns Ar volume
.Nm
.Cm promote
.Ar clone-filesystem
.Nm
.Cm rename
.Op Fl f
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Nm
.Cm rename
.Op Fl fp
.Ar filesystem Ns | Ns Ar volume
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm rename
.Fl r
.Ar snapshot Ar snapshot
.Nm
.Cm list
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Oo Fl o Ar property Ns Oo , Ns Ar property Oc Ns ... Oc
.Oo Fl s Ar property Oc Ns ...
.Oo Fl S Ar property Oc Ns ...
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Oo Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Oc Ns ...
.Nm
+.Cm remap
+.Ar filesystem Ns | Ns Ar volume
+.Nm
.Cm set
.Ar property Ns = Ns Ar value Oo Ar property Ns = Ns Ar value Oc Ns ...
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
.Nm
.Cm get
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns ... Oc
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Cm all | Ar property Ns Oo , Ns Ar property Oc Ns ...
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns | Ns Ar bookmark Ns ...
.Nm
.Cm inherit
.Op Fl rS
.Ar property Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
.Nm
.Cm upgrade
.Nm
.Cm upgrade
.Fl v
.Nm
.Cm upgrade
.Op Fl r
.Op Fl V Ar version
.Fl a | Ar filesystem
.Nm
.Cm userspace
.Op Fl Hinp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar field Oc Ns ...
.Oo Fl S Ar field Oc Ns ...
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar snapshot
.Nm
.Cm groupspace
.Op Fl Hinp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar field Oc Ns ...
.Oo Fl S Ar field Oc Ns ...
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar snapshot
.Nm
.Cm projectspace
.Op Fl Hp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar field Oc Ns ...
.Oo Fl S Ar field Oc Ns ...
.Ar filesystem Ns | Ns Ar snapshot
.Nm
.Cm project
.Oo Fl d Ns | Ns Fl r Ns Oc
.Ar file Ns | Ns Ar directory Ns ...
.Nm
.Cm project
.Fl C
.Oo Fl kr Ns Oc
.Ar file Ns | Ns Ar directory Ns ...
.Nm
.Cm project
.Fl c
.Oo Fl 0 Ns Oc
.Oo Fl d Ns | Ns Fl r Ns Oc
.Op Fl p Ar id
.Ar file Ns | Ns Ar directory Ns ...
.Nm
.Cm project
.Op Fl p Ar id
.Oo Fl rs Ns Oc
.Ar file Ns | Ns Ar directory Ns ...
.Nm
.Cm mount
.Nm
.Cm mount
.Op Fl Olv
.Op Fl o Ar options
.Fl a | Ar filesystem
.Nm
.Cm unmount
.Op Fl f
.Fl a | Ar filesystem Ns | Ns Ar mountpoint
.Nm
.Cm share
.Fl a | Ar filesystem
.Nm
.Cm unshare
.Fl a | Ar filesystem Ns | Ns Ar mountpoint
.Nm
.Cm bookmark
.Ar snapshot bookmark
.Nm
.Cm send
.Op Fl DLPRbcenpvw
.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
.Ar snapshot
.Nm
.Cm send
.Op Fl LPcenvw
.Op Fl i Ar snapshot Ns | Ns Ar bookmark
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Nm
.Cm send
.Op Fl Penv
.Fl t Ar receive_resume_token
.Nm
.Cm receive
.Op Fl Fnsuv
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Nm
.Cm receive
.Op Fl Fnsuv
.Op Fl d Ns | Ns Fl e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem
.Nm
.Cm receive
.Fl A
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm allow
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm allow
.Op Fl dglu
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm allow
.Op Fl dl
.Fl e Ns | Ns Sy everyone
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm allow
.Fl c
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm allow
.Fl s No @ Ns Ar setname
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm unallow
.Op Fl dglru
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm unallow
.Op Fl dlr
.Fl e Ns | Ns Sy everyone
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm unallow
.Op Fl r
.Fl c
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm unallow
.Op Fl r
.Fl s @ Ns Ar setname
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.Nm
.Cm hold
.Op Fl r
.Ar tag Ar snapshot Ns ...
.Nm
.Cm holds
.Op Fl r
.Ar snapshot Ns ...
.Nm
.Cm release
.Op Fl r
.Ar tag Ar snapshot Ns ...
.Nm
.Cm diff
.Op Fl FHt
.Ar snapshot Ar snapshot Ns | Ns Ar filesystem
.Nm
.Cm program
.Op Fl jn
.Op Fl t Ar timeout
.Op Fl m Ar memory_limit
.Ar pool script
.Op Ar arg1 No ...
.Nm
.Cm load-key
.Op Fl nr
.Op Fl L Ar keylocation
.Fl a | Ar filesystem
.Nm
.Cm unload-key
.Op Fl r
.Fl a | Ar filesystem
.Nm
.Cm change-key
.Op Fl l
.Op Fl o Ar keylocation Ns = Ns Ar value
.Op Fl o Ar keyformat Ns = Ns Ar value
.Op Fl o Ar pbkdf2iters Ns = Ns Ar value
.Ar filesystem
.Nm
.Cm change-key
.Fl i
.Op Fl l
.Ar filesystem
.Sh DESCRIPTION
The
.Nm
command configures ZFS datasets within a ZFS storage pool, as described in
.Xr zpool 8 .
A dataset is identified by a unique path within the ZFS namespace.
For example:
.Bd -literal
pool/{filesystem,volume,snapshot}
.Ed
.Pp
where the maximum length of a dataset name is
.Dv MAXNAMELEN
.Pq 256 bytes .
.Pp
A dataset can be one of the following:
.Bl -tag -width "file system"
.It Sy file system
A ZFS dataset of type
.Sy filesystem
can be mounted within the standard system namespace and behaves like other file
systems.
While ZFS file systems are designed to be POSIX compliant, known issues exist
that prevent compliance in some cases.
Applications that depend on standards conformance might fail due to non-standard
behavior when checking file system free space.
.It Sy volume
A logical volume exported as a raw or block device.
This type of dataset should only be used under special circumstances.
File systems are typically used in most environments.
.It Sy snapshot
A read-only version of a file system or volume at a given point in time.
It is specified as
.Ar filesystem Ns @ Ns Ar name
or
.Ar volume Ns @ Ns Ar name .
.It Sy bookmark
Much like a
.Sy snapshot ,
but without the hold on on-disk data. It can be used as the source of a send
(but not for a receive). It is specified as
.Ar filesystem Ns # Ns Ar name
or
.Ar volume Ns # Ns Ar name .
.El
.Ss ZFS File System Hierarchy
A ZFS storage pool is a logical collection of devices that provide space for
datasets.
A storage pool is also the root of the ZFS file system hierarchy.
.Pp
The root of the pool can be accessed as a file system, such as mounting and
unmounting, taking snapshots, and setting properties.
The physical storage characteristics, however, are managed by the
.Xr zpool 8
command.
.Pp
See
.Xr zpool 8
for more information on creating and administering pools.
.Ss Snapshots
A snapshot is a read-only copy of a file system or volume.
Snapshots can be created extremely quickly, and initially consume no additional
space within the pool.
As data within the active dataset changes, the snapshot consumes more data than
would otherwise be shared with the active dataset.
.Pp
Snapshots can have arbitrary names.
Snapshots of volumes can be cloned or rolled back, visibility is determined
by the
.Sy snapdev
property of the parent volume.
.Pp
File system snapshots can be accessed under the
.Pa .zfs/snapshot
directory in the root of the file system.
Snapshots are automatically mounted on demand and may be unmounted at regular
intervals.
The visibility of the
.Pa .zfs
directory can be controlled by the
.Sy snapdir
property.
.Ss Bookmarks
A bookmark is like a snapshot, a read-only copy of a file system or volume.
Bookmarks can be created extremely quickly, compared to snapshots, and they
consume no additional space within the pool. Bookmarks can also have arbitrary
names, much like snapshots.
.Pp
Unlike snapshots, bookmarks can not be accessed through the filesystem in any
way. From a storage standpoint a bookmark just provides a way to reference
when a snapshot was created as a distinct object. Bookmarks are initially
tied to a snapshot, not the filesystem or volume, and they will survive if the
snapshot itself is destroyed. Since they are very light weight there's little
incentive to destroy them.
.Ss Clones
A clone is a writable volume or file system whose initial contents are the same
as another dataset.
As with snapshots, creating a clone is nearly instantaneous, and initially
consumes no additional space.
.Pp
Clones can only be created from a snapshot.
When a snapshot is cloned, it creates an implicit dependency between the parent
and child.
Even though the clone is created somewhere else in the dataset hierarchy, the
original snapshot cannot be destroyed as long as a clone exists.
The
.Sy origin
property exposes this dependency, and the
.Cm destroy
command lists any such dependencies, if they exist.
.Pp
The clone parent-child dependency relationship can be reversed by using the
.Cm promote
subcommand.
This causes the
.Qq origin
file system to become a clone of the specified file system, which makes it
possible to destroy the file system that the clone was created from.
.Ss "Mount Points"
Creating a ZFS file system is a simple operation, so the number of file systems
per system is likely to be numerous.
To cope with this, ZFS automatically manages mounting and unmounting file
systems without the need to edit the
.Pa /etc/fstab
file.
All automatically managed file systems are mounted by ZFS at boot time.
.Pp
By default, file systems are mounted under
.Pa /path ,
where
.Ar path
is the name of the file system in the ZFS namespace.
Directories are created and destroyed as needed.
.Pp
A file system can also have a mount point set in the
.Sy mountpoint
property.
This directory is created as needed, and ZFS automatically mounts the file
system when the
.Nm zfs Cm mount Fl a
command is invoked
.Po without editing
.Pa /etc/fstab
.Pc .
The
.Sy mountpoint
property can be inherited, so if
.Em pool/home
has a mount point of
.Pa /export/stuff ,
then
.Em pool/home/user
automatically inherits a mount point of
.Pa /export/stuff/user .
.Pp
A file system
.Sy mountpoint
property of
.Sy none
prevents the file system from being mounted.
.Pp
If needed, ZFS file systems can also be managed with traditional tools
.Po
.Nm mount ,
.Nm umount ,
.Pa /etc/fstab
.Pc .
If a file system's mount point is set to
.Sy legacy ,
ZFS makes no attempt to manage the file system, and the administrator is
responsible for mounting and unmounting the file system. Because pools must
be imported before a legacy mount can succeed, administrators should ensure
that legacy mounts are only attempted after the zpool import process
finishes at boot time. For example, on machines using systemd, the mount
option
.Pp
.Nm x-systemd.requires=zfs-import.target
.Pp
will ensure that the zfs-import completes before systemd attempts mounting
the filesystem. See systemd.mount(5) for details.
.Ss Deduplication
Deduplication is the process for removing redundant data at the block level,
reducing the total amount of data stored. If a file system has the
.Sy dedup
property enabled, duplicate data blocks are removed synchronously. The result
is that only unique data is stored and common components are shared among files.
.Pp
Deduplicating data is a very resource-intensive operation. It is generally
recommended that you have at least 1.25 GiB of RAM per 1 TiB of storage when
you enable deduplication. Calculating the exact requirement depends heavily
on the type of data stored in the pool.
.Pp
Enabling deduplication on an improperly-designed system can result in
performance issues (slow IO and administrative operations). It can potentially
lead to problems importing a pool due to memory exhaustion. Deduplication
can consume significant processing power (CPU) and memory as well as generate
additional disk IO.
.Pp
Before creating a pool with deduplication enabled, ensure that you have planned
your hardware requirements appropriately and implemented appropriate recovery
practices, such as regular backups. As an alternative to deduplication
consider using
.Sy compression=on ,
as a less resource-intensive alternative.
.Ss Native Properties
Properties are divided into two types, native properties and user-defined
.Po or
.Qq user
.Pc
properties.
Native properties either export internal statistics or control ZFS behavior.
In addition, native properties are either editable or read-only.
User properties have no effect on ZFS behavior, but you can use them to annotate
datasets in a way that is meaningful in your environment.
For more information about user properties, see the
.Sx User Properties
section, below.
.Pp
Every dataset has a set of properties that export statistics about the dataset
as well as control various behaviors.
Properties are inherited from the parent unless overridden by the child.
Some properties apply only to certain types of datasets
.Pq file systems, volumes, or snapshots .
.Pp
The values of numeric properties can be specified using human-readable suffixes
.Po for example,
.Sy k ,
.Sy KB ,
.Sy M ,
.Sy Gb ,
and so forth, up to
.Sy Z
for zettabyte
.Pc .
The following are all valid
.Pq and equal
specifications:
.Li 1536M, 1.5g, 1.50GB .
.Pp
The values of non-numeric properties are case sensitive and must be lowercase,
except for
.Sy mountpoint ,
.Sy sharenfs ,
and
.Sy sharesmb .
.Pp
The following native properties consist of read-only statistics about the
dataset.
These properties can be neither set, nor inherited.
Native properties apply to all dataset types unless otherwise noted.
.Bl -tag -width "usedbyrefreservation"
.It Sy available
The amount of space available to the dataset and all its children, assuming that
there is no other activity in the pool.
Because space is shared within a pool, availability can be limited by any number
of factors, including physical pool size, quotas, reservations, or other
datasets within the pool.
.Pp
This property can also be referred to by its shortened column name,
.Sy avail .
.It Sy compressratio
For non-snapshots, the compression ratio achieved for the
.Sy used
space of this dataset, expressed as a multiplier.
The
.Sy used
property includes descendant datasets, and, for clones, does not include the
space shared with the origin snapshot.
For snapshots, the
.Sy compressratio
is the same as the
.Sy refcompressratio
property.
Compression can be turned on by running:
.Nm zfs Cm set Sy compression Ns = Ns Sy on Ar dataset .
The default value is
.Sy off .
.It Sy createtxg
The transaction group (txg) in which the dataset was created. Bookmarks have
the same
.Sy createtxg
as the snapshot they are initially tied to. This property is suitable for
ordering a list of snapshots, e.g. for incremental send and receive.
.It Sy creation
The time this dataset was created.
.It Sy clones
For snapshots, this property is a comma-separated list of filesystems or volumes
which are clones of this snapshot.
The clones'
.Sy origin
property is this snapshot.
If the
.Sy clones
property is not empty, then this snapshot can not be destroyed
.Po even with the
.Fl r
or
.Fl f
options
.Pc .
The roles of origin and clone can be swapped by promoting the clone with the
.Nm zfs Cm promote
command.
.It Sy defer_destroy
This property is
.Sy on
if the snapshot has been marked for deferred destroy by using the
.Nm zfs Cm destroy Fl d
command.
Otherwise, the property is
.Sy off .
.It Sy encryptionroot
For encrypted datasets, indicates where the dataset is currently inheriting its
encryption key from. Loading or unloading a key for the
.Sy encryptionroot
will implicitly load / unload the key for any inheriting datasets (see
.Nm zfs Cm load-key
and
.Nm zfs Cm unload-key
for details).
Clones will always share an
encryption key with their origin. See the
.Sx Encryption
section for details.
.It Sy filesystem_count
The total number of filesystems and volumes that exist under this location in
the dataset tree.
This value is only available when a
.Sy filesystem_limit
has been set somewhere in the tree under which the dataset resides.
.It Sy keystatus
Indicates if an encryption key is currently loaded into ZFS. The possible
values are
.Sy none ,
.Sy available ,
and
.Sy unavailable .
See
.Nm zfs Cm load-key
and
.Nm zfs Cm unload-key .
.It Sy guid
The 64 bit GUID of this dataset or bookmark which does not change over its
entire lifetime. When a snapshot is sent to another pool, the received
snapshot has the same GUID. Thus, the
.Sy guid
is suitable to identify a snapshot across pools.
.It Sy logicalreferenced
The amount of space that is
.Qq logically
accessible by this dataset.
See the
.Sy referenced
property.
The logical space ignores the effect of the
.Sy compression
and
.Sy copies
properties, giving a quantity closer to the amount of data that applications
see.
However, it does include space consumed by metadata.
.Pp
This property can also be referred to by its shortened column name,
.Sy lrefer .
.It Sy logicalused
The amount of space that is
.Qq logically
consumed by this dataset and all its descendents.
See the
.Sy used
property.
The logical space ignores the effect of the
.Sy compression
and
.Sy copies
properties, giving a quantity closer to the amount of data that applications
see.
However, it does include space consumed by metadata.
.Pp
This property can also be referred to by its shortened column name,
.Sy lused .
.It Sy mounted
For file systems, indicates whether the file system is currently mounted.
This property can be either
.Sy yes
or
.Sy no .
.It Sy origin
For cloned file systems or volumes, the snapshot from which the clone was
created.
See also the
.Sy clones
property.
.It Sy receive_resume_token
For filesystems or volumes which have saved partially-completed state from
.Sy zfs receive -s ,
this opaque token can be provided to
.Sy zfs send -t
to resume and complete the
.Sy zfs receive .
.It Sy referenced
The amount of data that is accessible by this dataset, which may or may not be
shared with other datasets in the pool.
When a snapshot or clone is created, it initially references the same amount of
space as the file system or snapshot it was created from, since its contents are
identical.
.Pp
This property can also be referred to by its shortened column name,
.Sy refer .
.It Sy refcompressratio
The compression ratio achieved for the
.Sy referenced
space of this dataset, expressed as a multiplier.
See also the
.Sy compressratio
property.
.It Sy snapshot_count
The total number of snapshots that exist under this location in the dataset
tree.
This value is only available when a
.Sy snapshot_limit
has been set somewhere in the tree under which the dataset resides.
.It Sy type
The type of dataset:
.Sy filesystem ,
.Sy volume ,
or
.Sy snapshot .
.It Sy used
The amount of space consumed by this dataset and all its descendents.
This is the value that is checked against this dataset's quota and reservation.
The space used does not include this dataset's reservation, but does take into
account the reservations of any descendent datasets.
The amount of space that a dataset consumes from its parent, as well as the
amount of space that is freed if this dataset is recursively destroyed, is the
greater of its space used and its reservation.
.Pp
The used space of a snapshot
.Po see the
.Sx Snapshots
section
.Pc
is space that is referenced exclusively by this snapshot.
If this snapshot is destroyed, the amount of
.Sy used
space will be freed.
Space that is shared by multiple snapshots isn't accounted for in this metric.
When a snapshot is destroyed, space that was previously shared with this
snapshot can become unique to snapshots adjacent to it, thus changing the used
space of those snapshots.
The used space of the latest snapshot can also be affected by changes in the
file system.
Note that the
.Sy used
space of a snapshot is a subset of the
.Sy written
space of the snapshot.
.Pp
The amount of space used, available, or referenced does not take into account
pending changes.
Pending changes are generally accounted for within a few seconds.
Committing a change to a disk using
.Xr fsync 2
or
.Dv O_SYNC
does not necessarily guarantee that the space usage information is updated
immediately.
.It Sy usedby*
The
.Sy usedby*
properties decompose the
.Sy used
properties into the various reasons that space is used.
Specifically,
.Sy used No =
.Sy usedbychildren No +
.Sy usedbydataset No +
.Sy usedbyrefreservation No +
.Sy usedbysnapshots .
These properties are only available for datasets created on
.Nm zpool
.Qo version 13 Qc
pools.
.It Sy usedbychildren
The amount of space used by children of this dataset, which would be freed if
all the dataset's children were destroyed.
.It Sy usedbydataset
The amount of space used by this dataset itself, which would be freed if the
dataset were destroyed
.Po after first removing any
.Sy refreservation
and destroying any necessary snapshots or descendents
.Pc .
.It Sy usedbyrefreservation
The amount of space used by a
.Sy refreservation
set on this dataset, which would be freed if the
.Sy refreservation
was removed.
.It Sy usedbysnapshots
The amount of space consumed by snapshots of this dataset.
In particular, it is the amount of space that would be freed if all of this
dataset's snapshots were destroyed.
Note that this is not simply the sum of the snapshots'
.Sy used
properties because space can be shared by multiple snapshots.
.It Sy userused Ns @ Ns Em user
The amount of space consumed by the specified user in this dataset.
Space is charged to the owner of each file, as displayed by
.Nm ls Fl l .
The amount of space charged is displayed by
.Nm du
and
.Nm ls Fl s .
See the
.Nm zfs Cm userspace
subcommand for more information.
.Pp
Unprivileged users can access only their own space usage.
The root user, or a user who has been granted the
.Sy userused
privilege with
.Nm zfs Cm allow ,
can access everyone's usage.
.Pp
The
.Sy userused Ns @ Ns Em ...
properties are not displayed by
.Nm zfs Cm get Sy all .
The user's name must be appended after the @ symbol, using one of the following
forms:
.Bl -bullet -width ""
.It
.Em POSIX name
.Po for example,
.Sy joe
.Pc
.It
.Em POSIX numeric ID
.Po for example,
.Sy 789
.Pc
.It
.Em SID name
.Po for example,
.Sy joe.smith@mydomain
.Pc
.It
.Em SID numeric ID
.Po for example,
.Sy S-1-123-456-789
.Pc
.El
.Pp
Files created on Linux always have POSIX owners.
.It Sy userobjused Ns @ Ns Em user
The
.Sy userobjused
property is similar to
.Sy userused
but instead it counts the number of objects consumed by a user. This property
counts all objects allocated on behalf of the user, it may differ from the
results of system tools such as
.Nm df Fl i .
.Pp
When the property
.Sy xattr=on
is set on a file system additional objects will be created per-file to store
extended attributes. These additional objects are reflected in the
.Sy userobjused
value and are counted against the user's
.Sy userobjquota .
When a file system is configured to use
.Sy xattr=sa
no additional internal objects are normally required.
.It Sy userrefs
This property is set to the number of user holds on this snapshot.
User holds are set by using the
.Nm zfs Cm hold
command.
.It Sy groupused Ns @ Ns Em group
The amount of space consumed by the specified group in this dataset.
Space is charged to the group of each file, as displayed by
.Nm ls Fl l .
See the
.Sy userused Ns @ Ns Em user
property for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupused
privilege with
.Nm zfs Cm allow ,
can access all groups' usage.
.It Sy groupobjused Ns @ Ns Em group
The number of objects consumed by the specified group in this dataset.
Multiple objects may be charged to the group for each file when extended
attributes are in use. See the
.Sy userobjused Ns @ Ns Em user
property for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupobjused
privilege with
.Nm zfs Cm allow ,
can access all groups' usage.
.It Sy projectused Ns @ Ns Em project
The amount of space consumed by the specified project in this dataset. Project
is identified via the project identifier (ID) that is object-based numeral
attribute. An object can inherit the project ID from its parent object (if the
parent has the flag of inherit project ID that can be set and changed via
.Nm chattr Fl /+P
or
.Nm zfs project Fl s )
when being created. The privileged user can set and change object's project
ID via
.Nm chattr Fl p
or
.Nm zfs project Fl s
anytime. Space is charged to the project of each file, as displayed by
.Nm lsattr Fl p
or
.Nm zfs project .
See the
.Sy userused Ns @ Ns Em user
property for more information.
.Pp
The root user, or a user who has been granted the
.Sy projectused
privilege with
.Nm zfs allow ,
can access all projects' usage.
.It Sy projectobjused Ns @ Ns Em project
The
.Sy projectobjused
is similar to
.Sy projectused
but instead it counts the number of objects consumed by project. When the
property
.Sy xattr=on
is set on a fileset, ZFS will create additional objects per-file to store
extended attributes. These additional objects are reflected in the
.Sy projectobjused
value and are counted against the project's
.Sy projectobjquota .
When a filesystem is configured to use
.Sy xattr=sa
no additional internal objects are required. See the
.Sy userobjused Ns @ Ns Em user
property for more information.
.Pp
The root user, or a user who has been granted the
.Sy projectobjused
privilege with
.Nm zfs allow ,
can access all projects' objects usage.
.It Sy volblocksize
For volumes, specifies the block size of the volume.
The
.Sy blocksize
cannot be changed once the volume has been written, so it should be set at
volume creation time.
The default
.Sy blocksize
for volumes is 8 Kbytes.
Any power of 2 from 512 bytes to 128 Kbytes is valid.
.Pp
This property can also be referred to by its shortened column name,
.Sy volblock .
.It Sy written
The amount of space
.Sy referenced
by this dataset, that was written since the previous snapshot
.Pq i.e. that is not referenced by the previous snapshot .
.It Sy written Ns @ Ns Em snapshot
The amount of
.Sy referenced
space written to this dataset since the specified snapshot.
This is the space that is referenced by this dataset but was not referenced by
the specified snapshot.
.Pp
The
.Em snapshot
may be specified as a short snapshot name
.Po just the part after the
.Sy @
.Pc ,
in which case it will be interpreted as a snapshot in the same filesystem as
this dataset.
The
.Em snapshot
may be a full snapshot name
.Po Em filesystem Ns @ Ns Em snapshot Pc ,
which for clones may be a snapshot in the origin's filesystem
.Pq or the origin of the origin's filesystem, etc.
.El
.Pp
The following native properties can be used to change the behavior of a ZFS
dataset.
.Bl -tag -width ""
.It Xo
.Sy aclinherit Ns = Ns Sy discard Ns | Ns Sy noallow Ns | Ns
.Sy restricted Ns | Ns Sy passthrough Ns | Ns Sy passthrough-x
.Xc
Controls how ACEs are inherited when files and directories are created.
.Bl -tag -width "passthrough-x"
.It Sy discard
does not inherit any ACEs.
.It Sy noallow
only inherits inheritable ACEs that specify
.Qq deny
permissions.
.It Sy restricted
default, removes the
.Sy write_acl
and
.Sy write_owner
permissions when the ACE is inherited.
.It Sy passthrough
inherits all inheritable ACEs without any modifications.
.It Sy passthrough-x
same meaning as
.Sy passthrough ,
except that the
.Sy owner@ ,
.Sy group@ ,
and
.Sy everyone@
ACEs inherit the execute permission only if the file creation mode also requests
the execute bit.
.El
.Pp
When the property value is set to
.Sy passthrough ,
files are created with a mode determined by the inheritable ACEs.
If no inheritable ACEs exist that affect the mode, then the mode is set in
accordance to the requested mode from the application.
.Pp
The
.Sy aclinherit
property does not apply to posix ACLs.
.It Sy acltype Ns = Ns Sy off Ns | Ns Sy noacl Ns | Ns Sy posixacl
Controls whether ACLs are enabled and if so what type of ACL to use.
.Bl -tag -width "posixacl"
.It Sy off
default, when a file system has the
.Sy acltype
property set to off then ACLs are disabled.
.It Sy noacl
an alias for
.Sy off
.It Sy posixacl
indicates posix ACLs should be used. Posix ACLs are specific to Linux and are
not functional on other platforms. Posix ACLs are stored as an extended
attribute and therefore will not overwrite any existing NFSv4 ACLs which
may be set.
.El
.Pp
To obtain the best performance when setting
.Sy posixacl
users are strongly encouraged to set the
.Sy xattr=sa
property. This will result in the posix ACL being stored more efficiently on
disk. But as a consequence of this all new extended attributes will only be
accessible from OpenZFS implementations which support the
.Sy xattr=sa
property. See the
.Sy xattr
property for more details.
.It Sy atime Ns = Ns Sy on Ns | Ns Sy off
Controls whether the access time for files is updated when they are read.
Turning this property off avoids producing write traffic when reading files and
can result in significant performance gains, though it might confuse mailers
and other similar utilities. The values
.Sy on
and
.Sy off
are equivalent to the
.Sy atime
and
.Sy noatime
mount options. The default value is
.Sy on .
See also
.Sy relatime
below.
.It Sy canmount Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy noauto
If this property is set to
.Sy off ,
the file system cannot be mounted, and is ignored by
.Nm zfs Cm mount Fl a .
Setting this property to
.Sy off
is similar to setting the
.Sy mountpoint
property to
.Sy none ,
except that the dataset still has a normal
.Sy mountpoint
property, which can be inherited.
Setting this property to
.Sy off
allows datasets to be used solely as a mechanism to inherit properties.
One example of setting
.Sy canmount Ns = Ns Sy off
is to have two datasets with the same
.Sy mountpoint ,
so that the children of both datasets appear in the same directory, but might
have different inherited characteristics.
.Pp
When set to
.Sy noauto ,
a dataset can only be mounted and unmounted explicitly.
The dataset is not mounted automatically when the dataset is created or
imported, nor is it mounted by the
.Nm zfs Cm mount Fl a
command or unmounted by the
.Nm zfs Cm unmount Fl a
command.
.Pp
This property is not inherited.
.It Xo
.Sy checksum Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy fletcher2 Ns | Ns
.Sy fletcher4 Ns | Ns Sy sha256 Ns | Ns Sy noparity Ns | Ns
.Sy sha512 Ns | Ns Sy skein Ns | Ns Sy edonr
.Xc
Controls the checksum used to verify data integrity.
The default value is
.Sy on ,
which automatically selects an appropriate algorithm
.Po currently,
.Sy fletcher4 ,
but this may change in future releases
.Pc .
The value
.Sy off
disables integrity checking on user data.
The value
.Sy noparity
not only disables integrity but also disables maintaining parity for user data.
This setting is used internally by a dump device residing on a RAID-Z pool and
should not be used by any other dataset.
Disabling checksums is
.Sy NOT
a recommended practice.
.Pp
The
.Sy sha512 ,
.Sy skein ,
and
.Sy edonr
checksum algorithms require enabling the appropriate features on the pool.
Please see
.Xr zpool-features 5
for more information on these algorithms.
.Pp
Changing this property affects only newly-written data.
.Pp
Salted checksum algorithms
.Pq Cm edonr , skein
are currently not supported for any filesystem on the boot pools.
.It Xo
.Sy compression Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy gzip Ns | Ns
.Sy gzip- Ns Em N Ns | Ns Sy lz4 Ns | Ns Sy lzjb Ns | Ns Sy zle
.Xc
Controls the compression algorithm used for this dataset.
.Pp
Setting compression to
.Sy on
indicates that the current default compression algorithm should be used.
The default balances compression and decompression speed, with compression ratio
and is expected to work well on a wide variety of workloads.
Unlike all other settings for this property,
.Sy on
does not select a fixed compression type.
As new compression algorithms are added to ZFS and enabled on a pool, the
default compression algorithm may change.
The current default compression algorithm is either
.Sy lzjb
or, if the
.Sy lz4_compress
feature is enabled,
.Sy lz4 .
.Pp
The
.Sy lz4
compression algorithm is a high-performance replacement for the
.Sy lzjb
algorithm.
It features significantly faster compression and decompression, as well as a
moderately higher compression ratio than
.Sy lzjb ,
but can only be used on pools with the
.Sy lz4_compress
feature set to
.Sy enabled .
See
.Xr zpool-features 5
for details on ZFS feature flags and the
.Sy lz4_compress
feature.
.Pp
The
.Sy lzjb
compression algorithm is optimized for performance while providing decent data
compression.
.Pp
The
.Sy gzip
compression algorithm uses the same compression as the
.Xr gzip 1
command.
You can specify the
.Sy gzip
level by using the value
.Sy gzip- Ns Em N ,
where
.Em N
is an integer from 1
.Pq fastest
to 9
.Pq best compression ratio .
Currently,
.Sy gzip
is equivalent to
.Sy gzip-6
.Po which is also the default for
.Xr gzip 1
.Pc .
.Pp
The
.Sy zle
compression algorithm compresses runs of zeros.
.Pp
This property can also be referred to by its shortened column name
.Sy compress .
Changing this property affects only newly-written data.
.It Xo
.Sy context Ns = Ns Sy none Ns | Ns
.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
.Xc
This flag sets the SELinux context for all files in the file system under
a mount point for that file system. See
.Xr selinux 8
for more information.
.It Xo
.Sy fscontext Ns = Ns Sy none Ns | Ns
.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
.Xc
This flag sets the SELinux context for the file system file system being
mounted. See
.Xr selinux 8
for more information.
.It Xo
.Sy defcontext Ns = Ns Sy none Ns | Ns
.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
.Xc
This flag sets the SELinux default context for unlabeled files. See
.Xr selinux 8
for more information.
.It Xo
.Sy rootcontext Ns = Ns Sy none Ns | Ns
.Em SELinux_User:SElinux_Role:Selinux_Type:Sensitivity_Level
.Xc
This flag sets the SELinux context for the root inode of the file system. See
.Xr selinux 8
for more information.
.It Sy copies Ns = Ns Sy 1 Ns | Ns Sy 2 Ns | Ns Sy 3
Controls the number of copies of data stored for this dataset.
These copies are in addition to any redundancy provided by the pool, for
example, mirroring or RAID-Z.
The copies are stored on different disks, if possible.
The space used by multiple copies is charged to the associated file and dataset,
changing the
.Sy used
property and counting against quotas and reservations.
.Pp
Changing this property only affects newly-written data.
Therefore, set this property at file system creation time by using the
.Fl o Sy copies Ns = Ns Ar N
option.
.Pp
Remember that ZFS will not import a pool with a missing top-level vdev. Do
.Sy NOT
create, for example a two-disk striped pool and set
.Sy copies=2
on some datasets thinking you have setup redundancy for them. When a disk
fails you will not be able to import the pool and will have lost all of your
data.
.It Sy devices Ns = Ns Sy on Ns | Ns Sy off
Controls whether device nodes can be opened on this file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy dev
and
.Sy nodev
mount options.
.It Xo
.Sy dedup Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy verify Ns | Ns
.Sy sha256[,verify] Ns | Ns Sy sha512[,verify] Ns | Ns Sy skein[,verify] Ns | Ns
.Sy edonr,verify
.Xc
Configures deduplication for a dataset. The default value is
.Sy off .
The default deduplication checksum is
.Sy sha256
(this may change in the future). When
.Sy dedup
is enabled, the checksum defined here overrides the
.Sy checksum
property. Setting the value to
.Sy verify
has the same effect as the setting
.Sy sha256,verify.
.Pp
If set to
.Sy verify ,
ZFS will do a byte-to-byte comparsion in case of two blocks having the same
signature to make sure the block contents are identical. Specifying
.Sy verify
is mandatory for the
.Sy edonr
algorithm.
.Pp
Unless necessary, deduplication should NOT be enabled on a system. See
.Sx Deduplication
above.
.It Xo
.Sy dnodesize Ns = Ns Sy legacy Ns | Ns Sy auto Ns | Ns Sy 1k Ns | Ns
.Sy 2k Ns | Ns Sy 4k Ns | Ns Sy 8k Ns | Ns Sy 16k
.Xc
Specifies a compatibility mode or literal value for the size of dnodes in the
file system. The default value is
.Sy legacy .
Setting this property to a value other than
.Sy legacy
requires the large_dnode pool feature to be enabled.
.Pp
Consider setting
.Sy dnodesize
to
.Sy auto
if the dataset uses the
.Sy xattr=sa
property setting and the workload makes heavy use of extended attributes. This
may be applicable to SELinux-enabled systems, Lustre servers, and Samba
servers, for example. Literal values are supported for cases where the optimal
size is known in advance and for performance testing.
.Pp
Leave
.Sy dnodesize
set to
.Sy legacy
if you need to receive a send stream of this dataset on a pool that doesn't
enable the large_dnode feature, or if you need to import this pool on a system
that doesn't support the large_dnode feature.
.Pp
This property can also be referred to by its shortened column name,
.Sy dnsize .
.It Xo
.Sy encryption Ns = Ns Sy off Ns | Ns Sy on Ns | Ns Sy aes-128-ccm Ns | Ns
.Sy aes-192-ccm Ns | Ns Sy aes-256-ccm Ns | Ns Sy aes-128-gcm Ns | Ns
.Sy aes-192-gcm Ns | Ns Sy aes-256-gcm
.Xc
Controls the encryption cipher suite (block cipher, key length, and mode) used
for this dataset. Requires the
.Sy encryption
feature to be enabled on the pool.
Requires a
.Sy keyformat
to be set at dataset creation time.
.Pp
Selecting
.Sy encryption Ns = Ns Sy on
when creating a dataset indicates that the default encryption suite will be
selected, which is currently
.Sy aes-256-ccm .
In order to provide consistent data protection, encryption must be specified at
dataset creation time and it cannot be changed afterwards.
.Pp
For more details and caveats about encryption see the
.Sy Encryption
section.
.It Sy keyformat Ns = Ns Sy raw Ns | Ns Sy hex Ns | Ns Sy passphrase
Controls what format the user's encryption key will be provided as. This
property is only set when the dataset is encrypted.
.Pp
Raw keys and hex keys must be 32 bytes long (regardless of the chosen
encryption suite) and must be randomly generated. A raw key can be generated
with the following command:
.Bd -literal
# dd if=/dev/urandom of=/path/to/output/key bs=32 count=1
.Ed
.Pp
Passphrases must be between 8 and 512 bytes long and will be processed through
PBKDF2 before being used (see the
.Sy pbkdf2iters
property). Even though the
encryption suite cannot be changed after dataset creation, the keyformat can be
with
.Nm zfs Cm change-key .
.It Xo
.Sy keylocation Ns = Ns Sy prompt Ns | Ns Sy file:// Ns Em </absolute/file/path>
.Xc
Controls where the user's encryption key will be loaded from by default for
commands such as
.Nm zfs Cm load-key
and
.Nm zfs Cm mount Cm -l . This property is
only set for encrypted datasets which are encryption roots. If unspecified, the
default is
.Sy prompt.
.Pp
Even though the encryption suite cannot be changed after dataset creation, the
keylocation can be with either
.Nm zfs Cm set
or
.Nm zfs Cm change-key .
If
.Sy prompt
is selected ZFS will ask for the key at the command prompt when it is required
to access the encrypted data (see
.Nm zfs Cm load-key
for details). This setting will also allow the key to be passed in via STDIN,
but users should be careful not to place keys which should be kept secret on
the command line. If a file URI is selected, the key will be loaded from the
specified absolute file path.
.It Sy pbkdf2iters Ns = Ns Ar iterations
Controls the number of PBKDF2 iterations that a
.Sy passphrase
encryption key should be run through when processing it into an encryption key.
This property is only defined when encryption is enabled and a keyformat of
.Sy passphrase
is selected. The goal of PBKDF2 is to significantly increase the
computational difficulty needed to brute force a user's passphrase. This is
accomplished by forcing the attacker to run each passphrase through a
computationally expensive hashing function many times before they arrive at the
resulting key. A user who actually knows the passphrase will only have to pay
this cost once. As CPUs become better at processing, this number should be
raised to ensure that a brute force attack is still not possible. The current
default is
.Sy 350000
and the minimum is
.Sy 100000 .
This property may be changed with
.Nm zfs Cm change-key .
.It Sy exec Ns = Ns Sy on Ns | Ns Sy off
Controls whether processes can be executed from within this file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy exec
and
.Sy noexec
mount options.
.It Sy filesystem_limit Ns = Ns Em count Ns | Ns Sy none
Limits the number of filesystems and volumes that can exist under this point in
the dataset tree.
The limit is not enforced if the user is allowed to change the limit.
Setting a
.Sy filesystem_limit
to
.Sy on
a descendent of a filesystem that already has a
.Sy filesystem_limit
does not override the ancestor's
.Sy filesystem_limit ,
but rather imposes an additional limit.
This feature must be enabled to be used
.Po see
.Xr zpool-features 5
.Pc .
.It Sy mountpoint Ns = Ns Pa path Ns | Ns Sy none Ns | Ns Sy legacy
Controls the mount point used for this file system.
See the
.Sx Mount Points
section for more information on how this property is used.
.Pp
When the
.Sy mountpoint
property is changed for a file system, the file system and any children that
inherit the mount point are unmounted.
If the new value is
.Sy legacy ,
then they remain unmounted.
Otherwise, they are automatically remounted in the new location if the property
was previously
.Sy legacy
or
.Sy none ,
or if they were mounted before the property was changed.
In addition, any shared file systems are unshared and shared in the new
location.
.It Sy nbmand Ns = Ns Sy on Ns | Ns Sy off
Controls whether the file system should be mounted with
.Sy nbmand
.Pq Non Blocking mandatory locks .
This is used for SMB clients.
Changes to this property only take effect when the file system is umounted and
remounted.
See
.Xr mount 8
for more information on
.Sy nbmand
mounts. This property is not used on Linux.
.It Sy overlay Ns = Ns Sy off Ns | Ns Sy on
Allow mounting on a busy directory or a directory which already contains
files or directories. This is the default mount behavior for Linux file systems.
For consistency with OpenZFS on other platforms overlay mounts are
.Sy off
by default. Set to
.Sy on
to enable overlay mounts.
.It Sy primarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
Controls what is cached in the primary cache
.Pq ARC .
If this property is set to
.Sy all ,
then both user data and metadata is cached.
If this property is set to
.Sy none ,
then neither user data nor metadata is cached.
If this property is set to
.Sy metadata ,
then only metadata is cached.
The default value is
.Sy all .
.It Sy quota Ns = Ns Em size Ns | Ns Sy none
Limits the amount of space a dataset and its descendents can consume.
This property enforces a hard limit on the amount of space used.
This includes all space consumed by descendents, including file systems and
snapshots.
Setting a quota on a descendent of a dataset that already has a quota does not
override the ancestor's quota, but rather imposes an additional limit.
.Pp
Quotas cannot be set on volumes, as the
.Sy volsize
property acts as an implicit quota.
.It Sy snapshot_limit Ns = Ns Em count Ns | Ns Sy none
Limits the number of snapshots that can be created on a dataset and its
descendents.
Setting a
.Sy snapshot_limit
on a descendent of a dataset that already has a
.Sy snapshot_limit
does not override the ancestor's
.Sy snapshot_limit ,
but rather imposes an additional limit.
The limit is not enforced if the user is allowed to change the limit.
For example, this means that recursive snapshots taken from the global zone are
counted against each delegated dataset within a zone.
This feature must be enabled to be used
.Po see
.Xr zpool-features 5
.Pc .
.It Sy userquota@ Ns Em user Ns = Ns Em size Ns | Ns Sy none
Limits the amount of space consumed by the specified user.
User space consumption is identified by the
.Sy userspace@ Ns Em user
property.
.Pp
Enforcement of user quotas may be delayed by several seconds.
This delay means that a user might exceed their quota before the system notices
that they are over quota and begins to refuse additional writes with the
.Er EDQUOT
error message.
See the
.Nm zfs Cm userspace
subcommand for more information.
.Pp
Unprivileged users can only access their own groups' space usage.
The root user, or a user who has been granted the
.Sy userquota
privilege with
.Nm zfs Cm allow ,
can get and set everyone's quota.
.Pp
This property is not available on volumes, on file systems before version 4, or
on pools before version 15.
The
.Sy userquota@ Ns Em ...
properties are not displayed by
.Nm zfs Cm get Sy all .
The user's name must be appended after the
.Sy @
symbol, using one of the following forms:
.Bl -bullet
.It
.Em POSIX name
.Po for example,
.Sy joe
.Pc
.It
.Em POSIX numeric ID
.Po for example,
.Sy 789
.Pc
.It
.Em SID name
.Po for example,
.Sy joe.smith@mydomain
.Pc
.It
.Em SID numeric ID
.Po for example,
.Sy S-1-123-456-789
.Pc
.El
.Pp
Files created on Linux always have POSIX owners.
.It Sy userobjquota@ Ns Em user Ns = Ns Em size Ns | Ns Sy none
The
.Sy userobjquota
is similar to
.Sy userquota
but it limits the number of objects a user can create. Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy groupquota@ Ns Em group Ns = Ns Em size Ns | Ns Sy none
Limits the amount of space consumed by the specified group.
Group space consumption is identified by the
.Sy groupused@ Ns Em group
property.
.Pp
Unprivileged users can access only their own groups' space usage.
The root user, or a user who has been granted the
.Sy groupquota
privilege with
.Nm zfs Cm allow ,
can get and set all groups' quotas.
.It Sy groupobjquota@ Ns Em group Ns = Ns Em size Ns | Ns Sy none
The
.Sy groupobjquota
is similar to
.Sy groupquota
but it limits number of objects a group can consume. Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy projectquota@ Ns Em project Ns = Ns Em size Ns | Ns Sy none
Limits the amount of space consumed by the specified project. Project
space consumption is identified by the
.Sy projectused@ Ns Em project
property. Please refer to
.Sy projectused
for more information about how project is identified and set/changed.
.Pp
The root user, or a user who has been granted the
.Sy projectquota
privilege with
.Nm zfs allow ,
can access all projects' quota.
.It Sy projectobjquota@ Ns Em project Ns = Ns Em size Ns | Ns Sy none
The
.Sy projectobjquota
is similar to
.Sy projectquota
but it limits number of objects a project can consume. Please refer to
.Sy userobjused
for more information about how objects are counted.
.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
Controls whether this dataset can be modified.
The default value is
.Sy off .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy ro
and
.Sy rw
mount options.
.Pp
This property can also be referred to by its shortened column name,
.Sy rdonly .
.It Sy recordsize Ns = Ns Em size
Specifies a suggested block size for files in the file system.
This property is designed solely for use with database workloads that access
files in fixed-size records.
ZFS automatically tunes block sizes according to internal algorithms optimized
for typical access patterns.
.Pp
For databases that create very large files but access them in small random
chunks, these algorithms may be suboptimal.
Specifying a
.Sy recordsize
greater than or equal to the record size of the database can result in
significant performance gains.
Use of this property for general purpose file systems is strongly discouraged,
and may adversely affect performance.
.Pp
The size specified must be a power of two greater than or equal to 512 and less
than or equal to 128 Kbytes.
If the
.Sy large_blocks
feature is enabled on the pool, the size may be up to 1 Mbyte.
See
.Xr zpool-features 5
for details on ZFS feature flags.
.Pp
Changing the file system's
.Sy recordsize
affects only files created afterward; existing files are unaffected.
.Pp
This property can also be referred to by its shortened column name,
.Sy recsize .
.It Sy redundant_metadata Ns = Ns Sy all Ns | Ns Sy most
Controls what types of metadata are stored redundantly.
ZFS stores an extra copy of metadata, so that if a single block is corrupted,
the amount of user data lost is limited.
This extra copy is in addition to any redundancy provided at the pool level
.Pq e.g. by mirroring or RAID-Z ,
and is in addition to an extra copy specified by the
.Sy copies
property
.Pq up to a total of 3 copies .
For example if the pool is mirrored,
.Sy copies Ns = Ns 2 ,
and
.Sy redundant_metadata Ns = Ns Sy most ,
then ZFS stores 6 copies of most metadata, and 4 copies of data and some
metadata.
.Pp
When set to
.Sy all ,
ZFS stores an extra copy of all metadata.
If a single on-disk block is corrupt, at worst a single block of user data
.Po which is
.Sy recordsize
bytes long
.Pc
can be lost.
.Pp
When set to
.Sy most ,
ZFS stores an extra copy of most types of metadata.
This can improve performance of random writes, because less metadata must be
written.
In practice, at worst about 100 blocks
.Po of
.Sy recordsize
bytes each
.Pc
of user data can be lost if a single on-disk block is corrupt.
The exact behavior of which metadata blocks are stored redundantly may change in
future releases.
.Pp
The default value is
.Sy all .
.It Sy refquota Ns = Ns Em size Ns | Ns Sy none
Limits the amount of space a dataset can consume.
This property enforces a hard limit on the amount of space used.
This hard limit does not include space used by descendents, including file
systems and snapshots.
.It Sy refreservation Ns = Ns Em size Ns | Ns Sy none Ns | Ns Sy auto
The minimum amount of space guaranteed to a dataset, not including its
descendents.
When the amount of space used is below this value, the dataset is treated as if
it were taking up the amount of space specified by
.Sy refreservation .
The
.Sy refreservation
reservation is accounted for in the parent datasets' space used, and counts
against the parent datasets' quotas and reservations.
.Pp
If
.Sy refreservation
is set, a snapshot is only allowed if there is enough free pool space outside of
this reservation to accommodate the current number of
.Qq referenced
bytes in the dataset.
.Pp
If
.Sy refreservation
is set to
.Sy auto ,
a volume is thick provisioned
.Po or
.Qq not sparse
.Pc .
.Sy refreservation Ns = Ns Sy auto
is only supported on volumes.
See
.Sy volsize
in the
.Sx Native Properties
section for more information about sparse volumes.
.Pp
This property can also be referred to by its shortened column name,
.Sy refreserv .
.It Sy relatime Ns = Ns Sy on Ns | Ns Sy off
Controls the manner in which the access time is updated when
.Sy atime=on
is set. Turning this property on causes the access time to be updated relative
to the modify or change time. Access time is only updated if the previous
access time was earlier than the current modify or change time or if the
existing access time hasn't been updated within the past 24 hours. The default
value is
.Sy off .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy relatime
and
.Sy norelatime
mount options.
.It Sy reservation Ns = Ns Em size Ns | Ns Sy none
The minimum amount of space guaranteed to a dataset and its descendants.
When the amount of space used is below this value, the dataset is treated as if
it were taking up the amount of space specified by its reservation.
Reservations are accounted for in the parent datasets' space used, and count
against the parent datasets' quotas and reservations.
.Pp
This property can also be referred to by its shortened column name,
.Sy reserv .
.It Sy secondarycache Ns = Ns Sy all Ns | Ns Sy none Ns | Ns Sy metadata
Controls what is cached in the secondary cache
.Pq L2ARC .
If this property is set to
.Sy all ,
then both user data and metadata is cached.
If this property is set to
.Sy none ,
then neither user data nor metadata is cached.
If this property is set to
.Sy metadata ,
then only metadata is cached.
The default value is
.Sy all .
.It Sy setuid Ns = Ns Sy on Ns | Ns Sy off
Controls whether the setuid bit is respected for the file system.
The default value is
.Sy on .
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy suid
and
.Sy nosuid
mount options.
.It Sy sharesmb Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Em opts
Controls whether the file system is shared by using
.Sy Samba USERSHARES
and what options are to be used. Otherwise, the file system is automatically
shared and unshared with the
.Nm zfs Cm share
and
.Nm zfs Cm unshare
commands. If the property is set to on, the
.Xr net 8
command is invoked to create a
.Sy USERSHARE .
.Pp
Because SMB shares requires a resource name, a unique resource name is
constructed from the dataset name. The constructed name is a copy of the
dataset name except that the characters in the dataset name, which would be
invalid in the resource name, are replaced with underscore (_) characters.
Linux does not currently support additional options which might be available
on Solaris.
.Pp
If the
.Sy sharesmb
property is set to
.Sy off ,
the file systems are unshared.
.Pp
The share is created with the ACL (Access Control List) "Everyone:F" ("F"
stands for "full permissions", ie. read and write permissions) and no guest
access (which means Samba must be able to authenticate a real user, system
passwd/shadow, LDAP or smbpasswd based) by default. This means that any
additional access control (disallow specific user specific access etc) must
be done on the underlying file system.
.It Sy sharenfs Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Em opts
Controls whether the file system is shared via NFS, and what options are to be
used.
A file system with a
.Sy sharenfs
property of
.Sy off
is managed with the
.Xr exportfs 8
command and entries in the
.Em /etc/exports
file.
Otherwise, the file system is automatically shared and unshared with the
.Nm zfs Cm share
and
.Nm zfs Cm unshare
commands.
If the property is set to
.Sy on ,
the dataset is shared using the default options:
.Pp
.Em sec=sys,rw,crossmnt,no_subtree_check,no_root_squash
.Pp
See
.Xr exports 5
for the meaning of the default options. Otherwise, the
.Xr exportfs 8
command is invoked with options equivalent to the contents of this property.
.Pp
When the
.Sy sharenfs
property is changed for a dataset, the dataset and any children inheriting the
property are re-shared with the new options, only if the property was previously
.Sy off ,
or if they were shared before the property was changed.
If the new property is
.Sy off ,
the file systems are unshared.
.It Sy logbias Ns = Ns Sy latency Ns | Ns Sy throughput
Provide a hint to ZFS about handling of synchronous requests in this dataset.
If
.Sy logbias
is set to
.Sy latency
.Pq the default ,
ZFS will use pool log devices
.Pq if configured
to handle the requests at low latency.
If
.Sy logbias
is set to
.Sy throughput ,
ZFS will not use configured pool log devices.
ZFS will instead optimize synchronous operations for global pool throughput and
efficient use of resources.
.It Sy snapdev Ns = Ns Sy hidden Ns | Ns Sy visible
Controls whether the volume snapshot devices under
.Em /dev/zvol/<pool>
are hidden or visible. The default value is
.Sy hidden .
.It Sy snapdir Ns = Ns Sy hidden Ns | Ns Sy visible
Controls whether the
.Pa .zfs
directory is hidden or visible in the root of the file system as discussed in
the
.Sx Snapshots
section.
The default value is
.Sy hidden .
.It Sy sync Ns = Ns Sy standard Ns | Ns Sy always Ns | Ns Sy disabled
Controls the behavior of synchronous requests
.Pq e.g. fsync, O_DSYNC .
.Sy standard
is the
.Tn POSIX
specified behavior of ensuring all synchronous requests are written to stable
storage and all devices are flushed to ensure data is not cached by device
controllers
.Pq this is the default .
.Sy always
causes every file system transaction to be written and flushed before its
system call returns.
This has a large performance penalty.
.Sy disabled
disables synchronous requests.
File system transactions are only committed to stable storage periodically.
This option will give the highest performance.
However, it is very dangerous as ZFS would be ignoring the synchronous
transaction demands of applications such as databases or NFS.
Administrators should only use this option when the risks are understood.
.It Sy version Ns = Ns Em N Ns | Ns Sy current
The on-disk version of this file system, which is independent of the pool
version.
This property can only be set to later supported versions.
See the
.Nm zfs Cm upgrade
command.
.It Sy volsize Ns = Ns Em size
For volumes, specifies the logical size of the volume.
By default, creating a volume establishes a reservation of equal size.
For storage pools with a version number of 9 or higher, a
.Sy refreservation
is set instead.
Any changes to
.Sy volsize
are reflected in an equivalent change to the reservation
.Po or
.Sy refreservation
.Pc .
The
.Sy volsize
can only be set to a multiple of
.Sy volblocksize ,
and cannot be zero.
.Pp
The reservation is kept equal to the volume's logical size to prevent unexpected
behavior for consumers.
Without the reservation, the volume could run out of space, resulting in
undefined behavior or data corruption, depending on how the volume is used.
These effects can also occur when the volume size is changed while it is in use
.Pq particularly when shrinking the size .
Extreme care should be used when adjusting the volume size.
.Pp
Though not recommended, a
.Qq sparse volume
.Po also known as
.Qq thin provisioned
.Pc
can be created by specifying the
.Fl s
option to the
.Nm zfs Cm create Fl V
command, or by changing the value of the
.Sy refreservation
property
.Po or
.Sy reservation
property on pool version 8 or earlier
.Pc
after the volume has been created.
A
.Qq sparse volume
is a volume where the value of
.Sy refreservation
is less than the size of the volume plus the space required to store its
metadata.
Consequently, writes to a sparse volume can fail with
.Er ENOSPC
when the pool is low on space.
For a sparse volume, changes to
.Sy volsize
are not reflected in the
.Sy refreservation.
A volume that is not sparse is said to be
.Qq thick provisioned .
A sparse volume can become thick provisioned by setting
.Sy refreservation
to
.Sy auto .
.It Sy volmode Ns = Ns Cm default | full | geom | dev | none
This property specifies how volumes should be exposed to the OS.
Setting it to
.Sy full
exposes volumes as fully fledged block devices, providing maximal
functionality. The value
.Sy geom
is just an alias for
.Sy full
and is kept for compatibility.
Setting it to
.Sy dev
hides its partitions.
Volumes with property set to
.Sy none
are not exposed outside ZFS, but can be snapshoted, cloned, replicated, etc,
that can be suitable for backup purposes.
Value
.Sy default
means that volumes exposition is controlled by system-wide tunable
.Va zvol_volmode ,
where
.Sy full ,
.Sy dev
and
.Sy none
are encoded as 1, 2 and 3 respectively.
The default values is
.Sy full .
.It Sy vscan Ns = Ns Sy on Ns | Ns Sy off
Controls whether regular files should be scanned for viruses when a file is
opened and closed.
In addition to enabling this property, the virus scan service must also be
enabled for virus scanning to occur.
The default value is
.Sy off .
This property is not used on Linux.
.It Sy xattr Ns = Ns Sy on Ns | Ns Sy off Ns | Ns Sy sa
Controls whether extended attributes are enabled for this file system. Two
styles of extended attributes are supported either directory based or system
attribute based.
.Pp
The default value of
.Sy on
enables directory based extended attributes. This style of extended attribute
imposes no practical limit on either the size or number of attributes which
can be set on a file. Although under Linux the
.Xr getxattr 2
and
.Xr setxattr 2
system calls limit the maximum size to 64K. This is the most compatible
style of extended attribute and is supported by all OpenZFS implementations.
.Pp
System attribute based xattrs can be enabled by setting the value to
.Sy sa .
The key advantage of this type of xattr is improved performance. Storing
extended attributes as system attributes significantly decreases the amount of
disk IO required. Up to 64K of data may be stored per-file in the space
reserved for system attributes. If there is not enough space available for
an extended attribute then it will be automatically written as a directory
based xattr. System attribute based extended attributes are not accessible
on platforms which do not support the
.Sy xattr=sa
feature.
.Pp
The use of system attribute based xattrs is strongly encouraged for users of
SELinux or posix ACLs. Both of these features heavily rely of extended
attributes and benefit significantly from the reduced access time.
.Pp
The values
.Sy on
and
.Sy off
are equivalent to the
.Sy xattr
and
.Sy noxattr
mount options.
.It Sy zoned Ns = Ns Sy on Ns | Ns Sy off
Controls whether the dataset is managed from a non-global zone. Zones are a
Solaris feature and are not relevant on Linux. The default value is
.Sy off .
.El
.Pp
The following three properties cannot be changed after the file system is
created, and therefore, should be set when the file system is created.
If the properties are not set with the
.Nm zfs Cm create
or
.Nm zpool Cm create
commands, these properties are inherited from the parent dataset.
If the parent dataset lacks these properties due to having been created prior to
these features being supported, the new file system will have the default values
for these properties.
.Bl -tag -width ""
.It Xo
.Sy casesensitivity Ns = Ns Sy sensitive Ns | Ns
.Sy insensitive Ns | Ns Sy mixed
.Xc
Indicates whether the file name matching algorithm used by the file system
should be case-sensitive, case-insensitive, or allow a combination of both
styles of matching.
The default value for the
.Sy casesensitivity
property is
.Sy sensitive .
Traditionally,
.Ux
and
.Tn POSIX
file systems have case-sensitive file names.
.Pp
The
.Sy mixed
value for the
.Sy casesensitivity
property indicates that the file system can support requests for both
case-sensitive and case-insensitive matching behavior.
Currently, case-insensitive matching behavior on a file system that supports
mixed behavior is limited to the SMB server product.
For more information about the
.Sy mixed
value behavior, see the "ZFS Administration Guide".
.It Xo
.Sy normalization Ns = Ns Sy none Ns | Ns Sy formC Ns | Ns
.Sy formD Ns | Ns Sy formKC Ns | Ns Sy formKD
.Xc
Indicates whether the file system should perform a
.Sy unicode
normalization of file names whenever two file names are compared, and which
normalization algorithm should be used.
File names are always stored unmodified, names are normalized as part of any
comparison process.
If this property is set to a legal value other than
.Sy none ,
and the
.Sy utf8only
property was left unspecified, the
.Sy utf8only
property is automatically set to
.Sy on .
The default value of the
.Sy normalization
property is
.Sy none .
This property cannot be changed after the file system is created.
.It Sy utf8only Ns = Ns Sy on Ns | Ns Sy off
Indicates whether the file system should reject file names that include
characters that are not present in the
.Sy UTF-8
character code set.
If this property is explicitly set to
.Sy off ,
the normalization property must either not be explicitly set or be set to
.Sy none .
The default value for the
.Sy utf8only
property is
.Sy off .
This property cannot be changed after the file system is created.
.El
.Pp
The
.Sy casesensitivity ,
.Sy normalization ,
and
.Sy utf8only
properties are also new permissions that can be assigned to non-privileged users
by using the ZFS delegated administration feature.
.Ss "Temporary Mount Point Properties"
When a file system is mounted, either through
.Xr mount 8
for legacy mounts or the
.Nm zfs Cm mount
command for normal file systems, its mount options are set according to its
properties.
The correlation between properties and mount options is as follows:
.Bd -literal
PROPERTY MOUNT OPTION
atime atime/noatime
canmount auto/noauto
devices dev/nodev
exec exec/noexec
readonly ro/rw
relatime relatime/norelatime
setuid suid/nosuid
xattr xattr/noxattr
.Ed
.Pp
In addition, these options can be set on a per-mount basis using the
.Fl o
option, without affecting the property that is stored on disk.
The values specified on the command line override the values stored in the
dataset.
The
.Sy nosuid
option is an alias for
.Sy nodevices Ns \&, Ns Sy nosetuid .
These properties are reported as
.Qq temporary
by the
.Nm zfs Cm get
command.
If the properties are changed while the dataset is mounted, the new setting
overrides any temporary settings.
.Ss "User Properties"
In addition to the standard native properties, ZFS supports arbitrary user
properties.
User properties have no effect on ZFS behavior, but applications or
administrators can use them to annotate datasets
.Pq file systems, volumes, and snapshots .
.Pp
User property names must contain a colon
.Pq Qq Sy \&:
character to distinguish them from native properties.
They may contain lowercase letters, numbers, and the following punctuation
characters: colon
.Pq Qq Sy \&: ,
dash
.Pq Qq Sy - ,
period
.Pq Qq Sy \&. ,
and underscore
.Pq Qq Sy _ .
The expected convention is that the property name is divided into two portions
such as
.Em module Ns \&: Ns Em property ,
but this namespace is not enforced by ZFS.
User property names can be at most 256 characters, and cannot begin with a dash
.Pq Qq Sy - .
.Pp
When making programmatic use of user properties, it is strongly suggested to use
a reversed
.Sy DNS
domain name for the
.Em module
component of property names to reduce the chance that two
independently-developed packages use the same property name for different
purposes.
.Pp
The values of user properties are arbitrary strings, are always inherited, and
are never validated.
All of the commands that operate on properties
.Po Nm zfs Cm list ,
.Nm zfs Cm get ,
.Nm zfs Cm set ,
and so forth
.Pc
can be used to manipulate both native properties and user properties.
Use the
.Nm zfs Cm inherit
command to clear a user property.
If the property is not defined in any parent dataset, it is removed entirely.
Property values are limited to 8192 bytes.
.Ss ZFS Volumes as Swap
ZFS volumes may be used as swap devices. After creating the volume with the
.Nm zfs Cm create Fl V
command set up and enable the swap area using the
.Xr mkswap 8
and
.Xr swapon 8
commands. Do not swap to a file on a ZFS file system. A ZFS swap file
configuration is not supported.
.Ss Encryption
Enabling the
.Sy encryption
feature allows for the creation of encrypted filesystems and volumes.
.Nm
will encrypt all user data including file and zvol data, file attributes,
ACLs, permission bits, directory listings, FUID mappings, and userused /
groupused data.
.Nm
will not encrypt metadata related to the pool structure, including dataset
names, dataset hierarchy, file size, file holes, and dedup tables. Key rotation
is managed internally by the kernel module and changing the user's key does not
require re-encrypting the entire dataset. Datasets can be scrubbed, resilvered,
renamed, and deleted without the encryption keys being loaded (see the
.Nm zfs Cm load-key
subcommand for more info on key loading).
.Pp
Creating an encrypted dataset requires specifying the
.Sy encryption
and
.Sy keyformat
properties at creation time, along with an optional
.Sy keylocation
and
.Sy pbkdf2iters .
After entering an encryption key, the
created dataset will become an encryption root. Any descendant datasets will
inherit their encryption key from the encryption root by default, meaning that
loading, unloading, or changing the key for the encryption root will implicitly
do the same for all inheriting datasets. If this inheritance is not desired,
simply supply a
.Sy keyformat
when creating the child dataset or use
.Nm zfs Cm change-key
to break an existing relationship, creating a new encryption root on the child.
Note that the child's
.Sy keyformat
may match that of the parent while still creating a new encryption root, and
that changing the
.Sy encryption
property alone does not create a new encryption root; this would simply use a
different cipher suite with the same key as its encryption root. The one
exception is that clones will always use their origin's encryption key.
As a result of this exception, some encryption-related properties (namely
.Sy keystatus ,
.Sy keyformat ,
.Sy keylocation ,
and
.Sy pbkdf2iters )
do not inherit like other ZFS properties and instead use the value determined
by their encryption root. Encryption root inheritance can be tracked via the
read-only
.Sy encryptionroot
property.
.Pp
Encryption changes the behavior of a few
.Nm
operations. Encryption is applied after compression so compression ratios are
preserved. Normally checksums in ZFS are 256 bits long, but for encrypted data
the checksum is 128 bits of the user-chosen checksum and 128 bits of MAC from
the encryption suite, which provides additional protection against maliciously
altered data. Deduplication is still possible with encryption enabled but for
security, datasets will only dedup against themselves, their snapshots, and
their clones.
.Pp
There are a few limitations on encrypted datasets. Encrypted data cannot be
embedded via the
.Sy embedded_data
feature. Encrypted datasets may not have
.Sy copies Ns = Ns Em 3
since the implementation stores some encryption metadata where the third copy
would normally be. Since compression is applied before encryption datasets may
be vulnerable to a CRIME-like attack if applications accessing the data allow
for it. Deduplication with encryption will leak information about which blocks
are equivalent in a dataset and will incur an extra CPU cost per block written.
.Sh SUBCOMMANDS
All subcommands that modify state are logged persistently to the pool in their
original form.
.Bl -tag -width ""
.It Nm Fl ?
Displays a help message.
.It Xo
.Nm
.Cm create
.Op Fl p
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Ar filesystem
.Xc
Creates a new ZFS file system.
The file system is automatically mounted according to the
.Sy mountpoint
property inherited from the parent.
.Bl -tag -width "-o"
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property as if the command
.Nm zfs Cm set Ar property Ns = Ns Ar value
was invoked at the same time the dataset was created.
Any editable ZFS property can also be set at creation time.
Multiple
.Fl o
options can be specified.
An error results if the same property is specified in multiple
.Fl o
options.
.It Fl p
Creates all the non-existing parent datasets.
Datasets created in this manner are automatically mounted according to the
.Sy mountpoint
property inherited from their parent.
Any property specified on the command line using the
.Fl o
option is ignored.
If the target filesystem already exists, the operation completes successfully.
.El
.It Xo
.Nm
.Cm create
.Op Fl ps
.Op Fl b Ar blocksize
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Fl V Ar size Ar volume
.Xc
Creates a volume of the given size.
The volume is exported as a block device in
.Pa /dev/zvol/path ,
where
.Em path
is the name of the volume in the ZFS namespace.
The size represents the logical size as exported by the device.
By default, a reservation of equal size is created.
.Pp
.Ar size
is automatically rounded up to the nearest 128 Kbytes to ensure that the volume
has an integral number of blocks regardless of
.Sy blocksize .
.Bl -tag -width "-b"
.It Fl b Ar blocksize
Equivalent to
.Fl o Sy volblocksize Ns = Ns Ar blocksize .
If this option is specified in conjunction with
.Fl o Sy volblocksize ,
the resulting behavior is undefined.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property as if the
.Nm zfs Cm set Ar property Ns = Ns Ar value
command was invoked at the same time the dataset was created.
Any editable ZFS property can also be set at creation time.
Multiple
.Fl o
options can be specified.
An error results if the same property is specified in multiple
.Fl o
options.
.It Fl p
Creates all the non-existing parent datasets.
Datasets created in this manner are automatically mounted according to the
.Sy mountpoint
property inherited from their parent.
Any property specified on the command line using the
.Fl o
option is ignored.
If the target filesystem already exists, the operation completes successfully.
.It Fl s
Creates a sparse volume with no reservation.
See
.Sy volsize
in the
.Sx Native Properties
section for more information about sparse volumes.
.El
.It Xo
.Nm
.Cm destroy
.Op Fl Rfnprv
.Ar filesystem Ns | Ns Ar volume
.Xc
Destroys the given dataset.
By default, the command unshares any file systems that are currently shared,
unmounts any file systems that are currently mounted, and refuses to destroy a
dataset that has active dependents
.Pq children or clones .
.Bl -tag -width "-R"
.It Fl R
Recursively destroy all dependents, including cloned file systems outside the
target hierarchy.
.It Fl f
Force an unmount of any file systems using the
.Nm unmount Fl f
command.
This option has no effect on non-file systems or unmounted file systems.
.It Fl n
Do a dry-run
.Pq Qq No-op
deletion.
No data will be deleted.
This is useful in conjunction with the
.Fl v
or
.Fl p
flags to determine what data would be deleted.
.It Fl p
Print machine-parsable verbose information about the deleted data.
.It Fl r
Recursively destroy all children.
.It Fl v
Print verbose information about the deleted data.
.El
.Pp
Extreme care should be taken when applying either the
.Fl r
or the
.Fl R
options, as they can destroy large portions of a pool and cause unexpected
behavior for mounted file systems in use.
.It Xo
.Nm
.Cm destroy
.Op Fl Rdnprv
.Ar filesystem Ns | Ns Ar volume Ns @ Ns Ar snap Ns
.Oo % Ns Ar snap Ns Oo , Ns Ar snap Ns Oo % Ns Ar snap Oc Oc Oc Ns ...
.Xc
The given snapshots are destroyed immediately if and only if the
.Nm zfs Cm destroy
command without the
.Fl d
option would have destroyed it.
Such immediate destruction would occur, for example, if the snapshot had no
clones and the user-initiated reference count were zero.
.Pp
If a snapshot does not qualify for immediate destruction, it is marked for
deferred deletion.
In this state, it exists as a usable, visible snapshot until both of the
preconditions listed above are met, at which point it is destroyed.
.Pp
An inclusive range of snapshots may be specified by separating the first and
last snapshots with a percent sign.
The first and/or last snapshots may be left blank, in which case the
filesystem's oldest or newest snapshot will be implied.
.Pp
Multiple snapshots
.Pq or ranges of snapshots
of the same filesystem or volume may be specified in a comma-separated list of
snapshots.
Only the snapshot's short name
.Po the part after the
.Sy @
.Pc
should be specified when using a range or comma-separated list to identify
multiple snapshots.
.Bl -tag -width "-R"
.It Fl R
Recursively destroy all clones of these snapshots, including the clones,
snapshots, and children.
If this flag is specified, the
.Fl d
flag will have no effect.
.It Fl d
Destroy immediately. If a snapshot cannot be destroyed now, mark it for
deferred destruction.
.It Fl n
Do a dry-run
.Pq Qq No-op
deletion.
No data will be deleted.
This is useful in conjunction with the
.Fl p
or
.Fl v
flags to determine what data would be deleted.
.It Fl p
Print machine-parsable verbose information about the deleted data.
.It Fl r
Destroy
.Pq or mark for deferred deletion
all snapshots with this name in descendent file systems.
.It Fl v
Print verbose information about the deleted data.
.Pp
Extreme care should be taken when applying either the
.Fl r
or the
.Fl R
options, as they can destroy large portions of a pool and cause unexpected
behavior for mounted file systems in use.
.El
.It Xo
.Nm
.Cm destroy
.Ar filesystem Ns | Ns Ar volume Ns # Ns Ar bookmark
.Xc
The given bookmark is destroyed.
.It Xo
.Nm
.Cm snapshot
.Op Fl r
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Ar filesystem Ns @ Ns Ar snapname Ns | Ns Ar volume Ns @ Ns Ar snapname Ns ...
.Xc
Creates snapshots with the given names.
All previous modifications by successful system calls to the file system are
part of the snapshots.
Snapshots are taken atomically, so that all snapshots correspond to the same
moment in time.
.Nm zfs Cm snap
can be used as an alias for
.Nm zfs Cm snapshot.
See the
.Sx Snapshots
section for details.
.Bl -tag -width "-o"
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property; see
.Nm zfs Cm create
for details.
.It Fl r
Recursively create snapshots of all descendent datasets
.El
.It Xo
.Nm
.Cm rollback
.Op Fl Rfr
.Ar snapshot
.Xc
Roll back the given dataset to a previous snapshot.
When a dataset is rolled back, all data that has changed since the snapshot is
discarded, and the dataset reverts to the state at the time of the snapshot.
By default, the command refuses to roll back to a snapshot other than the most
recent one.
In order to do so, all intermediate snapshots and bookmarks must be destroyed by
specifying the
.Fl r
option.
.Pp
The
.Fl rR
options do not recursively destroy the child snapshots of a recursive snapshot.
Only direct snapshots of the specified filesystem are destroyed by either of
these options.
To completely roll back a recursive snapshot, you must rollback the individual
child snapshots.
.Bl -tag -width "-R"
.It Fl R
Destroy any more recent snapshots and bookmarks, as well as any clones of those
snapshots.
.It Fl f
Used with the
.Fl R
option to force an unmount of any clone file systems that are to be destroyed.
.It Fl r
Destroy any snapshots and bookmarks more recent than the one specified.
.El
.It Xo
.Nm
.Cm clone
.Op Fl p
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Ar snapshot Ar filesystem Ns | Ns Ar volume
.Xc
Creates a clone of the given snapshot.
See the
.Sx Clones
section for details.
The target dataset can be located anywhere in the ZFS hierarchy, and is created
as the same type as the original.
.Bl -tag -width "-o"
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property; see
.Nm zfs Cm create
for details.
.It Fl p
Creates all the non-existing parent datasets.
Datasets created in this manner are automatically mounted according to the
.Sy mountpoint
property inherited from their parent.
If the target filesystem or volume already exists, the operation completes
successfully.
.El
.It Xo
.Nm
.Cm promote
.Ar clone-filesystem
.Xc
Promotes a clone file system to no longer be dependent on its
.Qq origin
snapshot.
This makes it possible to destroy the file system that the clone was created
from.
The clone parent-child dependency relationship is reversed, so that the origin
file system becomes a clone of the specified file system.
.Pp
The snapshot that was cloned, and any snapshots previous to this snapshot, are
now owned by the promoted clone.
The space they use moves from the origin file system to the promoted clone, so
enough space must be available to accommodate these snapshots.
No new space is consumed by this operation, but the space accounting is
adjusted.
The promoted clone must not have any conflicting snapshot names of its own.
The
.Cm rename
subcommand can be used to rename any conflicting snapshots.
.It Xo
.Nm
.Cm rename
.Op Fl f
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Xc
.It Xo
.Nm
.Cm rename
.Op Fl fp
.Ar filesystem Ns | Ns Ar volume
.Ar filesystem Ns | Ns Ar volume
.Xc
Renames the given dataset.
The new target can be located anywhere in the ZFS hierarchy, with the exception
of snapshots.
Snapshots can only be renamed within the parent file system or volume.
When renaming a snapshot, the parent file system of the snapshot does not need
to be specified as part of the second argument.
Renamed file systems can inherit new mount points, in which case they are
unmounted and remounted at the new mount point.
.Bl -tag -width "-a"
.It Fl f
Force unmount any filesystems that need to be unmounted in the process.
.It Fl p
Creates all the nonexistent parent datasets.
Datasets created in this manner are automatically mounted according to the
.Sy mountpoint
property inherited from their parent.
.El
.It Xo
.Nm
.Cm rename
.Fl r
.Ar snapshot Ar snapshot
.Xc
Recursively rename the snapshots of all descendent datasets.
Snapshots are the only dataset that can be renamed recursively.
.It Xo
.Nm
.Cm list
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Oo Fl o Ar property Ns Oo , Ns Ar property Oc Ns ... Oc
.Oo Fl s Ar property Oc Ns ...
.Oo Fl S Ar property Oc Ns ...
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Oo Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Oc Ns ...
.Xc
Lists the property information for the given datasets in tabular form.
If specified, you can list property information by the absolute pathname or the
relative pathname.
By default, all file systems and volumes are displayed.
Snapshots are displayed if the
.Sy listsnaps
property is
.Sy on
.Po the default is
.Sy off
.Pc .
The following fields are displayed,
.Sy name Ns \&, Ns Sy used Ns \&, Ns Sy available Ns \&, Ns Sy referenced Ns \&, Ns
.Sy mountpoint .
.Bl -tag -width "-H"
.It Fl H
Used for scripting mode.
Do not print headers and separate fields by a single tab instead of arbitrary
white space.
.It Fl S Ar property
Same as the
.Fl s
option, but sorts by property in descending order.
.It Fl d Ar depth
Recursively display any children of the dataset, limiting the recursion to
.Ar depth .
A
.Ar depth
of
.Sy 1
will display only the dataset and its direct children.
.It Fl o Ar property
A comma-separated list of properties to display.
The property must be:
.Bl -bullet
.It
One of the properties described in the
.Sx Native Properties
section
.It
A user property
.It
The value
.Sy name
to display the dataset name
.It
The value
.Sy space
to display space usage properties on file systems and volumes.
This is a shortcut for specifying
.Fl o Sy name Ns \&, Ns Sy avail Ns \&, Ns Sy used Ns \&, Ns Sy usedsnap Ns \&, Ns
.Sy usedds Ns \&, Ns Sy usedrefreserv Ns \&, Ns Sy usedchild Fl t
.Sy filesystem Ns \&, Ns Sy volume
syntax.
.El
.It Fl p
Display numbers in parsable
.Pq exact
values.
.It Fl r
Recursively display any children of the dataset on the command line.
.It Fl s Ar property
A property for sorting the output by column in ascending order based on the
value of the property.
The property must be one of the properties described in the
.Sx Properties
section, or the special value
.Sy name
to sort by the dataset name.
Multiple properties can be specified at one time using multiple
.Fl s
property options.
Multiple
.Fl s
options are evaluated from left to right in decreasing order of importance.
The following is a list of sorting criteria:
.Bl -bullet
.It
Numeric types sort in numeric order.
.It
String types sort in alphabetical order.
.It
Types inappropriate for a row sort that row to the literal bottom, regardless of
the specified ordering.
.El
.Pp
If no sorting options are specified the existing behavior of
.Nm zfs Cm list
is preserved.
.It Fl t Ar type
A comma-separated list of types to display, where
.Ar type
is one of
.Sy filesystem ,
.Sy snapshot ,
.Sy volume ,
.Sy bookmark ,
or
.Sy all .
For example, specifying
.Fl t Sy snapshot
displays only snapshots.
.El
.It Xo
.Nm
.Cm set
.Ar property Ns = Ns Ar value Oo Ar property Ns = Ns Ar value Oc Ns ...
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
.Xc
Sets the property or list of properties to the given value(s) for each dataset.
Only some properties can be edited.
See the
.Sx Properties
section for more information on what properties can be set and acceptable
values.
Numeric values can be specified as exact values, or in a human-readable form
with a suffix of
.Sy B , K , M , G , T , P , E , Z
.Po for bytes, kilobytes, megabytes, gigabytes, terabytes, petabytes, exabytes,
or zettabytes, respectively
.Pc .
User properties can be set on snapshots.
For more information, see the
.Sx User Properties
section.
.It Xo
.Nm
.Cm get
.Op Fl r Ns | Ns Fl d Ar depth
.Op Fl Hp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar source Ns Oo , Ns Ar source Oc Ns ... Oc
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Cm all | Ar property Ns Oo , Ns Ar property Oc Ns ...
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns | Ns Ar bookmark Ns ...
.Xc
Displays properties for the given datasets.
If no datasets are specified, then the command displays properties for all
datasets on the system.
For each property, the following columns are displayed:
.Bd -literal
name Dataset name
property Property name
value Property value
source Property source. Can either be local, default,
temporary, inherited, or none (-).
.Ed
.Pp
All columns are displayed by default, though this can be controlled by using the
.Fl o
option.
This command takes a comma-separated list of properties as described in the
.Sx Native Properties
and
.Sx User Properties
sections.
.Pp
The special value
.Sy all
can be used to display all properties that apply to the given dataset's type
.Pq filesystem, volume, snapshot, or bookmark .
.Bl -tag -width "-H"
.It Fl H
Display output in a form more easily parsed by scripts.
Any headers are omitted, and fields are explicitly separated by a single tab
instead of an arbitrary amount of space.
.It Fl d Ar depth
Recursively display any children of the dataset, limiting the recursion to
.Ar depth .
A depth of
.Sy 1
will display only the dataset and its direct children.
.It Fl o Ar field
A comma-separated list of columns to display.
.Sy name Ns \&, Ns Sy property Ns \&, Ns Sy value Ns \&, Ns Sy source
is the default value.
.It Fl p
Display numbers in parsable
.Pq exact
values.
.It Fl r
Recursively display properties for any children.
.It Fl s Ar source
A comma-separated list of sources to display.
Those properties coming from a source other than those in this list are ignored.
Each source must be one of the following:
.Sy local ,
.Sy default ,
.Sy inherited ,
.Sy temporary ,
and
.Sy none .
The default value is all sources.
.It Fl t Ar type
A comma-separated list of types to display, where
.Ar type
is one of
.Sy filesystem ,
.Sy snapshot ,
.Sy volume ,
.Sy bookmark ,
or
.Sy all .
.El
.It Xo
.Nm
.Cm inherit
.Op Fl rS
.Ar property Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot Ns ...
.Xc
Clears the specified property, causing it to be inherited from an ancestor,
restored to default if no ancestor has the property set, or with the
.Fl S
option reverted to the received value if one exists.
See the
.Sx Properties
section for a listing of default values, and details on which properties can be
inherited.
.Bl -tag -width "-r"
.It Fl r
Recursively inherit the given property for all children.
.It Fl S
Revert the property to the received value if one exists; otherwise operate as
if the
.Fl S
option was not specified.
.El
.It Xo
.Nm
+.Cm remap
+.Ar filesystem Ns | Ns Ar volume
+.Xc
+Remap the indirect blocks in the given fileystem or volume so that they no
+longer reference blocks on previously removed vdevs and we can eventually
+shrink the size of the indirect mapping objects for the previously removed
+vdevs. Note that remapping all blocks might not be possible and that
+references from snapshots will still exist and cannot be remapped.
+.It Xo
+.Nm
.Cm upgrade
.Xc
Displays a list of file systems that are not the most recent version.
.It Xo
.Nm
.Cm upgrade
.Fl v
.Xc
Displays a list of currently supported file system versions.
.It Xo
.Nm
.Cm upgrade
.Op Fl r
.Op Fl V Ar version
.Fl a | Ar filesystem
.Xc
Upgrades file systems to a new on-disk version.
Once this is done, the file systems will no longer be accessible on systems
running older versions of the software.
.Nm zfs Cm send
streams generated from new snapshots of these file systems cannot be accessed on
systems running older versions of the software.
.Pp
In general, the file system version is independent of the pool version.
See
.Xr zpool 8
for information on the
.Nm zpool Cm upgrade
command.
.Pp
In some cases, the file system version and the pool version are interrelated and
the pool version must be upgraded before the file system version can be
upgraded.
.Bl -tag -width "-V"
.It Fl V Ar version
Upgrade to the specified
.Ar version .
If the
.Fl V
flag is not specified, this command upgrades to the most recent version.
This
option can only be used to increase the version number, and only up to the most
recent version supported by this software.
.It Fl a
Upgrade all file systems on all imported pools.
.It Ar filesystem
Upgrade the specified file system.
.It Fl r
Upgrade the specified file system and all descendent file systems.
.El
.It Xo
.Nm
.Cm userspace
.Op Fl Hinp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar field Oc Ns ...
.Oo Fl S Ar field Oc Ns ...
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar snapshot
.Xc
Displays space consumed by, and quotas on, each user in the specified filesystem
or snapshot.
This corresponds to the
.Sy userused@ Ns Em user ,
.Sy userobjused@ Ns Em user ,
.Sy userquota@ Ns Em user,
and
.Sy userobjquota@ Ns Em user
properties.
.Bl -tag -width "-H"
.It Fl H
Do not print headers, use tab-delimited output.
.It Fl S Ar field
Sort by this field in reverse order.
See
.Fl s .
.It Fl i
Translate SID to POSIX ID.
The POSIX ID may be ephemeral if no mapping exists.
Normal POSIX interfaces
.Po for example,
.Xr stat 2 ,
.Nm ls Fl l
.Pc
perform this translation, so the
.Fl i
option allows the output from
.Nm zfs Cm userspace
to be compared directly with those utilities.
However,
.Fl i
may lead to confusion if some files were created by an SMB user before a
SMB-to-POSIX name mapping was established.
In such a case, some files will be owned by the SMB entity and some by the POSIX
entity.
However, the
.Fl i
option will report that the POSIX entity has the total usage and quota for both.
.It Fl n
Print numeric ID instead of user/group name.
.It Fl o Ar field Ns Oo , Ns Ar field Oc Ns ...
Display only the specified fields from the following set:
.Sy type ,
.Sy name ,
.Sy used ,
.Sy quota .
The default is to display all fields.
.It Fl p
Use exact
.Pq parsable
numeric output.
.It Fl s Ar field
Sort output by this field.
The
.Fl s
and
.Fl S
flags may be specified multiple times to sort first by one field, then by
another.
The default is
.Fl s Sy type Fl s Sy name .
.It Fl t Ar type Ns Oo , Ns Ar type Oc Ns ...
Print only the specified types from the following set:
.Sy all ,
.Sy posixuser ,
.Sy smbuser ,
.Sy posixgroup ,
.Sy smbgroup .
The default is
.Fl t Sy posixuser Ns \&, Ns Sy smbuser .
The default can be changed to include group types.
.El
.It Xo
.Nm
.Cm groupspace
.Op Fl Hinp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar field Oc Ns ...
.Oo Fl S Ar field Oc Ns ...
.Oo Fl t Ar type Ns Oo , Ns Ar type Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar snapshot
.Xc
Displays space consumed by, and quotas on, each group in the specified
filesystem or snapshot.
This subcommand is identical to
.Nm zfs Cm userspace ,
except that the default types to display are
.Fl t Sy posixgroup Ns \&, Ns Sy smbgroup .
.It Xo
.Nm
.Cm projectspace
.Op Fl Hp
.Oo Fl o Ar field Ns Oo , Ns Ar field Oc Ns ... Oc
.Oo Fl s Ar field Oc Ns ...
.Oo Fl S Ar field Oc Ns ...
.Ar filesystem Ns | Ns Ar snapshot
.Xc
Displays space consumed by, and quotas on, each project in the specified
filesystem or snapshot. This subcommand is identical to
.Nm zfs Cm userspace ,
except that the project identifier is numeral, not name. So need neither
the option
.Sy -i
for SID to POSIX ID nor
.Sy -n
for numeric ID, nor
.Sy -t
for types.
.It Xo
.Nm
.Cm project
.Oo Fl d Ns | Ns Fl r Ns Oc
.Ar file Ns | Ns Ar directory Ns ...
.Xc
List project identifier (ID) and inherit flag of file(s) or directories.
.Bl -tag -width "-d"
.It Fl d
Show the directory project ID and inherit flag, not its childrens. It will
overwrite the former specified
.Fl r
option.
.It Fl r
Show on subdirectories recursively. It will overwrite the former specified
.Fl d
option.
.El
.It Xo
.Nm
.Cm project
.Fl C
.Oo Fl kr Ns Oc
.Ar file Ns | Ns Ar directory Ns ...
.Xc
Clear project inherit flag and/or ID on the file(s) or directories.
.Bl -tag -width "-k"
.It Fl k
Keep the project ID unchanged. If not specified, the project ID will be reset
as zero.
.It Fl r
Clear on subdirectories recursively.
.El
.It Xo
.Nm
.Cm project
.Fl c
.Oo Fl 0 Ns Oc
.Oo Fl d Ns | Ns Fl r Ns Oc
.Op Fl p Ar id
.Ar file Ns | Ns Ar directory Ns ...
.Xc
Check project ID and inherit flag on the file(s) or directories, report the
entries without project inherit flag or with different project IDs from the
specified (via
.Fl p
option) value or the target directory's project ID.
.Bl -tag -width "-0"
.It Fl 0
Print file name with a trailing NUL instead of newline (by default), like
"find -print0".
.It Fl d
Check the directory project ID and inherit flag, not its childrens. It will
overwrite the former specified
.Fl r
option.
.It Fl p
Specify the referenced ID for comparing with the target file(s) or directories'
project IDs. If not specified, the target (top) directory's project ID will be
used as the referenced one.
.It Fl r
Check on subdirectories recursively. It will overwrite the former specified
.Fl d
option.
.El
.It Xo
.Nm
.Cm project
.Op Fl p Ar id
.Oo Fl rs Ns Oc
.Ar file Ns | Ns Ar directory Ns ...
.Xc
.Bl -tag -width "-p"
Set project ID and/or inherit flag on the file(s) or directories.
.It Fl p
Set the file(s)' or directories' project ID with the given value.
.It Fl r
Set on subdirectories recursively.
.It Fl s
Set project inherit flag on the given file(s) or directories. It is usually used
for setup tree quota on the directory target with
.Fl r
option specified together. When setup tree quota, by default the directory's
project ID will be set to all its descendants unless you specify the project
ID via
.Fl p
option explicitly.
.El
.It Xo
.Nm
.Cm mount
.Xc
Displays all ZFS file systems currently mounted.
.It Xo
.Nm
.Cm mount
.Op Fl Olv
.Op Fl o Ar options
.Fl a | Ar filesystem
.Xc
Mounts ZFS file systems.
.Bl -tag -width "-O"
.It Fl O
Perform an overlay mount.
See
.Xr mount 8
for more information.
.It Fl a
Mount all available ZFS file systems.
Invoked automatically as part of the boot process.
.It Ar filesystem
Mount the specified filesystem.
.It Fl o Ar options
An optional, comma-separated list of mount options to use temporarily for the
duration of the mount.
See the
.Sx Temporary Mount Point Properties
section for details.
.It Fl l
Load keys for encrypted filesystems as they are being mounted. This is
equivalent to executing
.Nm zfs Cm load-key
on each encryption root before mounting it. Note that if a filesystem has a
.Sy keylocation
of
.Sy prompt
this will cause the terminal to interactively block after asking for the key.
.It Fl v
Report mount progress.
.El
.It Xo
.Nm
.Cm unmount
.Op Fl f
.Fl a | Ar filesystem Ns | Ns Ar mountpoint
.Xc
Unmounts currently mounted ZFS file systems.
.Bl -tag -width "-a"
.It Fl a
Unmount all available ZFS file systems.
Invoked automatically as part of the shutdown process.
.It Ar filesystem Ns | Ns Ar mountpoint
Unmount the specified filesystem.
The command can also be given a path to a ZFS file system mount point on the
system.
.It Fl f
Forcefully unmount the file system, even if it is currently in use.
.El
.It Xo
.Nm
.Cm share
.Fl a | Ar filesystem
.Xc
Shares available ZFS file systems.
.Bl -tag -width "-a"
.It Fl a
Share all available ZFS file systems.
Invoked automatically as part of the boot process.
.It Ar filesystem
Share the specified filesystem according to the
.Sy sharenfs
and
.Sy sharesmb
properties.
File systems are shared when the
.Sy sharenfs
or
.Sy sharesmb
property is set.
.El
.It Xo
.Nm
.Cm unshare
.Fl a | Ar filesystem Ns | Ns Ar mountpoint
.Xc
Unshares currently shared ZFS file systems.
.Bl -tag -width "-a"
.It Fl a
Unshare all available ZFS file systems.
Invoked automatically as part of the shutdown process.
.It Ar filesystem Ns | Ns Ar mountpoint
Unshare the specified filesystem.
The command can also be given a path to a ZFS file system shared on the system.
.El
.It Xo
.Nm
.Cm bookmark
.Ar snapshot bookmark
.Xc
Creates a bookmark of the given snapshot.
Bookmarks mark the point in time when the snapshot was created, and can be used
as the incremental source for a
.Nm zfs Cm send
command.
.Pp
This feature must be enabled to be used.
See
.Xr zpool-features 5
for details on ZFS feature flags and the
.Sy bookmarks
feature.
.It Xo
.Nm
.Cm send
.Op Fl DLPRbcenpvw
.Op Oo Fl I Ns | Ns Fl i Oc Ar snapshot
.Ar snapshot
.Xc
Creates a stream representation of the second
.Ar snapshot ,
which is written to standard output.
The output can be redirected to a file or to a different system
.Po for example, using
.Xr ssh 1
.Pc .
By default, a full stream is generated.
.Bl -tag -width "-D"
.It Fl D, -dedup
Generate a deduplicated stream.
Blocks which would have been sent multiple times in the send stream will only be
sent once.
The receiving system must also support this feature to receive a deduplicated
stream.
This flag can be used regardless of the dataset's
.Sy dedup
property, but performance will be much better if the filesystem uses a
dedup-capable checksum
.Po for example,
.Sy sha256
.Pc .
.It Fl I Ar snapshot
Generate a stream package that sends all intermediary snapshots from the first
snapshot to the second snapshot.
For example,
.Fl I Em @a Em fs@d
is similar to
.Fl i Em @a Em fs@b Ns \&; Fl i Em @b Em fs@c Ns \&; Fl i Em @c Em fs@d .
The incremental source may be specified as with the
.Fl i
option.
.It Fl L, -large-block
Generate a stream which may contain blocks larger than 128KB.
This flag has no effect if the
.Sy large_blocks
pool feature is disabled, or if the
.Sy recordsize
property of this filesystem has never been set above 128KB.
The receiving system must have the
.Sy large_blocks
pool feature enabled as well.
See
.Xr zpool-features 5
for details on ZFS feature flags and the
.Sy large_blocks
feature.
.It Fl P, -parsable
Print machine-parsable verbose information about the stream package generated.
.It Fl R, -replicate
Generate a replication stream package, which will replicate the specified
file system, and all descendent file systems, up to the named snapshot.
When received, all properties, snapshots, descendent file systems, and clones
are preserved.
.Pp
If the
.Fl i
or
.Fl I
flags are used in conjunction with the
.Fl R
flag, an incremental replication stream is generated.
The current values of properties, and current snapshot and file system names are
set when the stream is received.
If the
.Fl F
flag is specified when this stream is received, snapshots and file systems that
do not exist on the sending side are destroyed.
.It Fl e, -embed
Generate a more compact stream by using
.Sy WRITE_EMBEDDED
records for blocks which are stored more compactly on disk by the
.Sy embedded_data
pool feature.
This flag has no effect if the
.Sy embedded_data
feature is disabled.
The receiving system must have the
.Sy embedded_data
feature enabled.
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well. Datasets that are sent with this flag may not be
received as an encrypted dataset, since encrypted datasets cannot use the
.Sy embedded_data
feature.
See
.Xr zpool-features 5
for details on ZFS feature flags and the
.Sy embedded_data
feature.
.It Fl b, -backup
Sends only received property values whether or not they are overridden by local
settings, but only if the dataset has ever been received. Use this option when
you want
.Nm zfs Cm receive
to restore received properties backed up on the sent dataset and to avoid
sending local settings that may have nothing to do with the source dataset,
but only with how the data is backed up.
.It Fl c, -compressed
Generate a more compact stream by using compressed WRITE records for blocks
which are compressed on disk and in memory
.Po see the
.Sy compression
property for details
.Pc .
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
.Fl L
option is not supplied in conjunction with
.Fl c ,
then the data will be decompressed before sending so it can be split into
smaller block sizes.
.It Fl w, -raw
For encrypted datasets, send data exactly as it exists on disk. This allows
backups to be taken even if encryption keys are not currently loaded. The
backup may then be received on an untrusted machine since that machine will
not have the encryption keys to read the protected data or alter it without
being detected. Upon being received, the dataset will have the same encryption
keys as it did on the send side, although the
.Sy keylocation
property will be defaulted to
.Sy prompt
if not otherwise provided. For unencrypted datasets, this flag will be
equivalent to
.Fl Lec .
Note that if you do not use this flag for sending encrypted datasets, data will
be sent unencrypted and may be re-encrypted with a different encryption key on
the receiving system, which will disable the ability to do a raw send to that
system for incrementals.
.It Fl i Ar snapshot
Generate an incremental stream from the first
.Ar snapshot
.Pq the incremental source
to the second
.Ar snapshot
.Pq the incremental target .
The incremental source can be specified as the last component of the snapshot
name
.Po the
.Sy @
character and following
.Pc
and it is assumed to be from the same file system as the incremental target.
.Pp
If the destination is a clone, the source may be the origin snapshot, which must
be fully specified
.Po for example,
.Em pool/fs@origin ,
not just
.Em @origin
.Pc .
.It Fl n, -dryrun
Do a dry-run
.Pq Qq No-op
send.
Do not generate any actual send data.
This is useful in conjunction with the
.Fl v
or
.Fl P
flags to determine what data will be sent.
In this case, the verbose output will be written to standard output
.Po contrast with a non-dry-run, where the stream is written to standard output
and the verbose output goes to standard error
.Pc .
.It Fl p, -props
Include the dataset's properties in the stream.
This flag is implicit when
.Fl R
is specified.
The receiving system must also support this feature. Sends of encrypted datasets
must use
.Fl w
when using this flag.
.It Fl v, -verbose
Print verbose information about the stream package generated.
This information includes a per-second report of how much data has been sent.
.Pp
The format of the stream is committed.
You will be able to receive your streams on future versions of ZFS.
.El
.It Xo
.Nm
.Cm send
.Op Fl LPcenvw
.Op Fl i Ar snapshot Ns | Ns Ar bookmark
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Xc
Generate a send stream, which may be of a filesystem, and may be incremental
from a bookmark.
If the destination is a filesystem or volume, the pool must be read-only, or the
filesystem must not be mounted.
When the stream generated from a filesystem or volume is received, the default
snapshot name will be
.Qq --head-- .
.Bl -tag -width "-L"
.It Fl L, -large-block
Generate a stream which may contain blocks larger than 128KB.
This flag has no effect if the
.Sy large_blocks
pool feature is disabled, or if the
.Sy recordsize
property of this filesystem has never been set above 128KB.
The receiving system must have the
.Sy large_blocks
pool feature enabled as well.
See
.Xr zpool-features 5
for details on ZFS feature flags and the
.Sy large_blocks
feature.
.It Fl P, -parsable
Print machine-parsable verbose information about the stream package generated.
.It Fl c, -compressed
Generate a more compact stream by using compressed WRITE records for blocks
which are compressed on disk and in memory
.Po see the
.Sy compression
property for details
.Pc .
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well.
If the
.Sy large_blocks
feature is enabled on the sending system but the
.Fl L
option is not supplied in conjunction with
.Fl c ,
then the data will be decompressed before sending so it can be split into
smaller block sizes.
.It Fl w, -raw
For encrypted datasets, send data exactly as it exists on disk. This allows
backups to be taken even if encryption keys are not currently loaded. The
backup may then be received on an untrusted machine since that machine will
not have the encryption keys to read the protected data or alter it without
being detected. Upon being received, the dataset will have the same encryption
keys as it did on the send side, although the
.Sy keylocation
property will be defaulted to
.Sy prompt
if not otherwise provided. For unencrypted datasets, this flag will be
equivalent to
.Fl Lec .
Note that if you do not use this flag for sending encrypted datasets, data will
be sent unencrypted and may be re-encrypted with a different encryption key on
the receiving system, which will disable the ability to do a raw send to that
system for incrementals.
.It Fl e, -embed
Generate a more compact stream by using
.Sy WRITE_EMBEDDED
records for blocks which are stored more compactly on disk by the
.Sy embedded_data
pool feature.
This flag has no effect if the
.Sy embedded_data
feature is disabled.
The receiving system must have the
.Sy embedded_data
feature enabled.
If the
.Sy lz4_compress
feature is active on the sending system, then the receiving system must have
that feature enabled as well. Datasets that are sent with this flag may not be
received as an encrypted dataset, since encrypted datasets cannot use the
.Sy embedded_data
feature.
See
.Xr zpool-features 5
for details on ZFS feature flags and the
.Sy embedded_data
feature.
.It Fl i Ar snapshot Ns | Ns Ar bookmark
Generate an incremental send stream.
The incremental source must be an earlier snapshot in the destination's history.
It will commonly be an earlier snapshot in the destination's file system, in
which case it can be specified as the last component of the name
.Po the
.Sy #
or
.Sy @
character and following
.Pc .
.Pp
If the incremental target is a clone, the incremental source can be the origin
snapshot, or an earlier snapshot in the origin's filesystem, or the origin's
origin, etc.
.It Fl n, -dryrun
Do a dry-run
.Pq Qq No-op
send.
Do not generate any actual send data.
This is useful in conjunction with the
.Fl v
or
.Fl P
flags to determine what data will be sent.
In this case, the verbose output will be written to standard output
.Po contrast with a non-dry-run, where the stream is written to standard output
and the verbose output goes to standard error
.Pc .
.It Fl v, -verbose
Print verbose information about the stream package generated.
This information includes a per-second report of how much data has been sent.
.El
.It Xo
.Nm
.Cm send
.Op Fl Penv
.Fl t
.Ar receive_resume_token
.Xc
Creates a send stream which resumes an interrupted receive.
The
.Ar receive_resume_token
is the value of this property on the filesystem or volume that was being
received into.
See the documentation for
.Sy zfs receive -s
for more details.
.It Xo
.Nm
.Cm receive
.Op Fl Fnsuv
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem Ns | Ns Ar volume Ns | Ns Ar snapshot
.Xc
.It Xo
.Nm
.Cm receive
.Op Fl Fnsuv
.Op Fl d Ns | Ns Fl e
.Op Fl o Sy origin Ns = Ns Ar snapshot
.Op Fl o Ar property Ns = Ns Ar value
.Op Fl x Ar property
.Ar filesystem
.Xc
Creates a snapshot whose contents are as specified in the stream provided on
standard input.
If a full stream is received, then a new file system is created as well.
Streams are created using the
.Nm zfs Cm send
subcommand, which by default creates a full stream.
.Nm zfs Cm recv
can be used as an alias for
.Nm zfs Cm receive.
.Pp
If an incremental stream is received, then the destination file system must
already exist, and its most recent snapshot must match the incremental stream's
source.
For
.Sy zvols ,
the destination device link is destroyed and recreated, which means the
.Sy zvol
cannot be accessed during the
.Cm receive
operation.
.Pp
When a snapshot replication package stream that is generated by using the
.Nm zfs Cm send Fl R
command is received, any snapshots that do not exist on the sending location are
destroyed by using the
.Nm zfs Cm destroy Fl d
command.
.Pp
If
.Fl o Em property Ns = Ns Ar value
or
.Fl x Em property
is specified, it applies to the effective value of the property throughout
the entire subtree of replicated datasets. Effective property values will be
set (
.Fl o
) or inherited (
.Fl x
) on the topmost in the replicated subtree. In descendant datasets, if the
property is set by the send stream, it will be overridden by forcing the
property to be inherited from the top‐most file system. Received properties
are retained in spite of being overridden and may be restored with
.Nm zfs Cm inherit Fl S .
Specifying
.Fl o Sy origin Ns = Ns Em snapshot
is a special case because, even if
.Sy origin
is a read-only property and cannot be set, it's allowed to receive the send
stream as a clone of the given snapshot.
.Pp
Raw encrypted send streams (created with
.Nm zfs Cm send Fl w
) may only be received as is, and cannot be re-encrypted, decrypted, or
recompressed by the receive process. Unencrypted streams can be received as
encrypted datasets, either through inheritance or by specifying encryption
parameters with the
.Fl o
options.
.Pp
The name of the snapshot
.Pq and file system, if a full stream is received
that this subcommand creates depends on the argument type and the use of the
.Fl d
or
.Fl e
options.
.Pp
If the argument is a snapshot name, the specified
.Ar snapshot
is created.
If the argument is a file system or volume name, a snapshot with the same name
as the sent snapshot is created within the specified
.Ar filesystem
or
.Ar volume .
If neither of the
.Fl d
or
.Fl e
options are specified, the provided target snapshot name is used exactly as
provided.
.Pp
The
.Fl d
and
.Fl e
options cause the file system name of the target snapshot to be determined by
appending a portion of the sent snapshot's name to the specified target
.Ar filesystem .
If the
.Fl d
option is specified, all but the first element of the sent snapshot's file
system path
.Pq usually the pool name
is used and any required intermediate file systems within the specified one are
created.
If the
.Fl e
option is specified, then only the last element of the sent snapshot's file
system name
.Pq i.e. the name of the source file system itself
is used as the target file system name.
.Bl -tag -width "-F"
.It Fl F
Force a rollback of the file system to the most recent snapshot before
performing the receive operation.
If receiving an incremental replication stream
.Po for example, one generated by
.Nm zfs Cm send Fl R Op Fl i Ns | Ns Fl I
.Pc ,
destroy snapshots and file systems that do not exist on the sending side.
.It Fl d
Discard the first element of the sent snapshot's file system name, using the
remaining elements to determine the name of the target file system for the new
snapshot as described in the paragraph above.
.It Fl e
Discard all but the last element of the sent snapshot's file system name, using
that element to determine the name of the target file system for the new
snapshot as described in the paragraph above.
.It Fl n
Do not actually receive the stream.
This can be useful in conjunction with the
.Fl v
option to verify the name the receive operation would use.
.It Fl o Sy origin Ns = Ns Ar snapshot
Forces the stream to be received as a clone of the given snapshot.
If the stream is a full send stream, this will create the filesystem
described by the stream as a clone of the specified snapshot.
Which snapshot was specified will not affect the success or failure of the
receive, as long as the snapshot does exist.
If the stream is an incremental send stream, all the normal verification will be
performed.
.It Fl o Em property Ns = Ns Ar value
Sets the specified property as if the command
.Nm zfs Cm set Em property Ns = Ns Ar value
was invoked immediately before the receive. When receiving a stream from
.Nm zfs Cm send Fl R ,
causes the property to be inherited by all descendant datasets, as through
.Nm zfs Cm inherit Em property
was run on any descendant datasets that have this property set on the
sending system.
.Pp
Any editable property can be set at receive time. Set-once properties bound
to the received data, such as
.Sy normalization
and
.Sy casesensitivity ,
cannot be set at receive time even when the datasets are newly created by
.Nm zfs Cm receive .
Additionally both settable properties
.Sy version
and
.Sy volsize
cannot be set at receive time.
.Pp
The
.Fl o
option may be specified multiple times, for different properties. An error
results if the same property is specified in multiple
.Fl o
or
.Fl x
options.
.It Fl s
If the receive is interrupted, save the partially received state, rather
than deleting it.
Interruption may be due to premature termination of the stream
.Po e.g. due to network failure or failure of the remote system
if the stream is being read over a network connection
.Pc ,
a checksum error in the stream, termination of the
.Nm zfs Cm receive
process, or unclean shutdown of the system.
.Pp
The receive can be resumed with a stream generated by
.Nm zfs Cm send Fl t Ar token ,
where the
.Ar token
is the value of the
.Sy receive_resume_token
property of the filesystem or volume which is received into.
.Pp
To use this flag, the storage pool must have the
.Sy extensible_dataset
feature enabled.
See
.Xr zpool-features 5
for details on ZFS feature flags.
.It Fl u
File system that is associated with the received stream is not mounted.
.It Fl v
Print verbose information about the stream and the time required to perform the
receive operation.
.It Fl x Em property
Ensures that the effective value of the specified property after the
receive is unaffected by the value of that property in the send stream (if any),
as if the property had been excluded from the send stream.
.Pp
If the specified property is not present in the send stream, this option does
nothing.
.Pp
If a received property needs to be overridden, the effective value will be
set or inherited, depending on whether the property is inheritable or not.
.Pp
In the case of an incremental update,
.Fl x
leaves any existing local setting or explicit inheritance unchanged.
.Pp
All
.Fl o
restrictions on set-once and special properties apply equally to
.Fl x .
.El
.It Xo
.Nm
.Cm receive
.Fl A
.Ar filesystem Ns | Ns Ar volume
.Xc
Abort an interrupted
.Nm zfs Cm receive Fl s ,
deleting its saved partially received state.
.It Xo
.Nm
.Cm allow
.Ar filesystem Ns | Ns Ar volume
.Xc
Displays permissions that have been delegated on the specified filesystem or
volume.
See the other forms of
.Nm zfs Cm allow
for more information.
.Pp
Delegations are supported under Linux with the exception of
.Sy mount ,
.Sy unmount ,
.Sy mountpoint ,
.Sy canmount ,
.Sy rename ,
and
.Sy share .
These permissions cannot be delegated because the Linux
.Xr mount 8
command restricts modifications of the global namespace to the root user.
.It Xo
.Nm
.Cm allow
.Op Fl dglu
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.br
.Nm
.Cm allow
.Op Fl dl
.Fl e Ns | Ns Sy everyone
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.Xc
Delegates ZFS administration permission for the file systems to non-privileged
users.
.Bl -tag -width "-d"
.It Fl d
Allow only for the descendent file systems.
.It Fl e Ns | Ns Sy everyone
Specifies that the permissions be delegated to everyone.
.It Fl g Ar group Ns Oo , Ns Ar group Oc Ns ...
Explicitly specify that permissions are delegated to the group.
.It Fl l
Allow
.Qq locally
only for the specified file system.
.It Fl u Ar user Ns Oo , Ns Ar user Oc Ns ...
Explicitly specify that permissions are delegated to the user.
.It Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
Specifies to whom the permissions are delegated.
Multiple entities can be specified as a comma-separated list.
If neither of the
.Fl gu
options are specified, then the argument is interpreted preferentially as the
keyword
.Sy everyone ,
then as a user name, and lastly as a group name.
To specify a user or group named
.Qq everyone ,
use the
.Fl g
or
.Fl u
options.
To specify a group with the same name as a user, use the
.Fl g
options.
.It Xo
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Xc
The permissions to delegate.
Multiple permissions may be specified as a comma-separated list.
Permission names are the same as ZFS subcommand and property names.
See the property list below.
Property set names, which begin with
.Sy @ ,
may be specified.
See the
.Fl s
form below for details.
.El
.Pp
If neither of the
.Fl dl
options are specified, or both are, then the permissions are allowed for the
file system or volume, and all of its descendents.
.Pp
Permissions are generally the ability to use a ZFS subcommand or change a ZFS
property.
The following permissions are available:
.Bd -literal
NAME TYPE NOTES
allow subcommand Must also have the permission that is
being allowed
clone subcommand Must also have the 'create' ability and
'mount' ability in the origin file system
create subcommand Must also have the 'mount' ability
destroy subcommand Must also have the 'mount' ability
diff subcommand Allows lookup of paths within a dataset
given an object number, and the ability
to create snapshots necessary to
'zfs diff'.
load-key subcommand Allows loading and unloading of encryption key
(see 'zfs load-key' and 'zfs unload-key').
change-key subcommand Allows changing an encryption key via
'zfs change-key'.
mount subcommand Allows mount/umount of ZFS datasets
promote subcommand Must also have the 'mount' and 'promote'
ability in the origin file system
receive subcommand Must also have the 'mount' and 'create'
ability
rename subcommand Must also have the 'mount' and 'create'
ability in the new parent
rollback subcommand Must also have the 'mount' ability
send subcommand
share subcommand Allows sharing file systems over NFS
or SMB protocols
snapshot subcommand Must also have the 'mount' ability
groupquota other Allows accessing any groupquota@...
property
groupused other Allows reading any groupused@... property
userprop other Allows changing any user property
userquota other Allows accessing any userquota@...
property
userused other Allows reading any userused@... property
projectobjquota other Allows accessing any projectobjquota@...
property
projectquota other Allows accessing any projectquota@... property
projectobjused other Allows reading any projectobjused@... property
projectused other Allows reading any projectused@... property
aclinherit property
acltype property
atime property
canmount property
casesensitivity property
checksum property
compression property
copies property
devices property
exec property
filesystem_limit property
mountpoint property
nbmand property
normalization property
primarycache property
quota property
readonly property
recordsize property
refquota property
refreservation property
reservation property
secondarycache property
setuid property
sharenfs property
sharesmb property
snapdir property
snapshot_limit property
utf8only property
version property
volblocksize property
volsize property
vscan property
xattr property
zoned property
.Ed
.It Xo
.Nm
.Cm allow
.Fl c
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.Xc
Sets
.Qq create time
permissions.
These permissions are granted
.Pq locally
to the creator of any newly-created descendent file system.
.It Xo
.Nm
.Cm allow
.Fl s No @ Ns Ar setname
.Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ...
.Ar filesystem Ns | Ns Ar volume
.Xc
Defines or adds permissions to a permission set.
The set can be used by other
.Nm zfs Cm allow
commands for the specified file system and its descendents.
Sets are evaluated dynamically, so changes to a set are immediately reflected.
Permission sets follow the same naming restrictions as ZFS file systems, but the
name must begin with
.Sy @ ,
and can be no more than 64 characters long.
.It Xo
.Nm
.Cm unallow
.Op Fl dglru
.Ar user Ns | Ns Ar group Ns Oo , Ns Ar user Ns | Ns Ar group Oc Ns ...
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.br
.Nm
.Cm unallow
.Op Fl dlr
.Fl e Ns | Ns Sy everyone
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.br
.Nm
.Cm unallow
.Op Fl r
.Fl c
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.Xc
Removes permissions that were granted with the
.Nm zfs Cm allow
command.
No permissions are explicitly denied, so other permissions granted are still in
effect.
For example, if the permission is granted by an ancestor.
If no permissions are specified, then all permissions for the specified
.Ar user ,
.Ar group ,
or
.Sy everyone
are removed.
Specifying
.Sy everyone
.Po or using the
.Fl e
option
.Pc
only removes the permissions that were granted to everyone, not all permissions
for every user and group.
See the
.Nm zfs Cm allow
command for a description of the
.Fl ldugec
options.
.Bl -tag -width "-r"
.It Fl r
Recursively remove the permissions from this file system and all descendents.
.El
.It Xo
.Nm
.Cm unallow
.Op Fl r
.Fl s No @ Ns Ar setname
.Oo Ar perm Ns | Ns @ Ns Ar setname Ns Oo , Ns Ar perm Ns | Ns @ Ns
.Ar setname Oc Ns ... Oc
.Ar filesystem Ns | Ns Ar volume
.Xc
Removes permissions from a permission set.
If no permissions are specified, then all permissions are removed, thus removing
the set entirely.
.It Xo
.Nm
.Cm hold
.Op Fl r
.Ar tag Ar snapshot Ns ...
.Xc
Adds a single reference, named with the
.Ar tag
argument, to the specified snapshot or snapshots.
Each snapshot has its own tag namespace, and tags must be unique within that
space.
.Pp
If a hold exists on a snapshot, attempts to destroy that snapshot by using the
.Nm zfs Cm destroy
command return
.Er EBUSY .
.Bl -tag -width "-r"
.It Fl r
Specifies that a hold with the given tag is applied recursively to the snapshots
of all descendent file systems.
.El
.It Xo
.Nm
.Cm holds
.Op Fl r
.Ar snapshot Ns ...
.Xc
Lists all existing user references for the given snapshot or snapshots.
.Bl -tag -width "-r"
.It Fl r
Lists the holds that are set on the named descendent snapshots, in addition to
listing the holds on the named snapshot.
.El
.It Xo
.Nm
.Cm release
.Op Fl r
.Ar tag Ar snapshot Ns ...
.Xc
Removes a single reference, named with the
.Ar tag
argument, from the specified snapshot or snapshots.
The tag must already exist for each snapshot.
If a hold exists on a snapshot, attempts to destroy that snapshot by using the
.Nm zfs Cm destroy
command return
.Er EBUSY .
.Bl -tag -width "-r"
.It Fl r
Recursively releases a hold with the given tag on the snapshots of all
descendent file systems.
.El
.It Xo
.Nm
.Cm diff
.Op Fl FHt
.Ar snapshot Ar snapshot Ns | Ns Ar filesystem
.Xc
Display the difference between a snapshot of a given filesystem and another
snapshot of that filesystem from a later time or the current contents of the
filesystem.
The first column is a character indicating the type of change, the other columns
indicate pathname, new pathname
.Pq in case of rename ,
change in link count, and optionally file type and/or change time.
The types of change are:
.Bd -literal
- The path has been removed
+ The path has been created
M The path has been modified
R The path has been renamed
.Ed
.Bl -tag -width "-F"
.It Fl F
Display an indication of the type of file, in a manner similar to the
.Fl
option of
.Xr ls 1 .
.Bd -literal
B Block device
C Character device
/ Directory
> Door
| Named pipe
@ Symbolic link
P Event port
= Socket
F Regular file
.Ed
.It Fl H
Give more parsable tab-separated output, without header lines and without
arrows.
.It Fl t
Display the path's inode change time as the first column of output.
.El
.It Xo
.Nm
.Cm program
.Op Fl jn
.Op Fl t Ar timeout
.Op Fl m Ar memory_limit
.Ar pool script
.Op Ar arg1 No ...
.Xc
Executes
.Ar script
as a ZFS channel program on
.Ar pool .
The ZFS channel
program interface allows ZFS administrative operations to be run
programmatically via a Lua script.
The entire script is executed atomically, with no other administrative
operations taking effect concurrently.
A library of ZFS calls is made available to channel program scripts.
Channel programs may only be run with root privileges.
.sp
For full documentation of the ZFS channel program interface, see the manual
page for
.Xr zfs-program 8 .
.Bl -tag -width ""
.It Fl j
Display channel program output in JSON format. When this flag is specified and
standard output is empty - channel program encountered an error. The details of
such an error will be printed to standard error in plain text.
.It Fl n
Executes a read-only channel program, which runs faster.
The program cannot change on-disk state by calling functions from
the zfs.sync submodule.
The program can be used to gather information such as properties and
determining if changes would succeed (zfs.check.*).
Without this flag, all pending changes must be synced to disk before
a channel program can complete.
.It Fl t Ar timeout
Execution time limit, in milliseconds.
If a channel program executes for longer than the provided timeout, it will
be stopped and an error will be returned.
The default timeout is 1000 ms, and can be set to a maximum of 10000 ms.
.It Fl m Ar memory-limit
Memory limit, in bytes.
If a channel program attempts to allocate more memory than the given limit,
it will be stopped and an error returned.
The default memory limit is 10 MB, and can be set to a maximum of 100 MB.
.sp
All remaining argument strings are passed directly to the channel program as
arguments.
See
.Xr zfs-program 8
for more information.
.El
.It Xo
.Nm
.Cm load-key
.Op Fl nr
.Op Fl L Ar keylocation
.Fl a | Ar filesystem
.Xc
Load the key for
.Ar filesystem ,
allowing it and all children that inherit the
.Sy keylocation
property to be accessed. The key will be expected in the format specified by the
.Sy keyformat
and location specified by the
.Sy keylocation
property. Note that if the
.Sy keylocation
is set to
.Sy prompt
the terminal will interactively wait for the key to be entered. Loading a key
will not automatically mount the dataset. If that functionality is desired,
.Nm zfs Cm mount Sy -l
will ask for the key and mount the dataset. Once the key is loaded the
.Sy keystatus
property will become
.Sy available .
.Bl -tag -width "-r"
.It Fl r
Recursively loads the keys for the specified filesystem and all descendent
encryption roots.
.It Fl a
Loads the keys for all encryption roots in all imported pools.
.It Fl n
Do a dry-run
.Pq Qq No-op
load-key. This will cause zfs to simply check that the
provided key is correct. This command may be run even if the key is already
loaded.
.It Fl L Ar keylocation
Use
.Ar keylocation
instead of the
.Sy keylocation
property. This will not change the value of the property on the dataset. Note
that if used with either
.Fl r
or
.Fl a ,
.Ar keylocation
may only be given as
.Sy prompt .
.El
.It Xo
.Nm
.Cm unload-key
.Op Fl r
.Fl a | Ar filesystem
.Xc
Unloads a key from ZFS, removing the ability to access the dataset and all of
its children that inherit the
.Sy keylocation
property. This requires that the dataset is not currently open or mounted. Once
the key is unloaded the
.Sy keystatus
property will become
.Sy unavailable .
.Bl -tag -width "-r"
.It Fl r
Recursively unloads the keys for the specified filesystem and all descendent
encryption roots.
.It Fl a
Unloads the keys for all encryption roots in all imported pools.
.El
.It Xo
.Nm
.Cm change-key
.Op Fl l
.Op Fl o Ar keylocation Ns = Ns Ar value
.Op Fl o Ar keyformat Ns = Ns Ar value
.Op Fl o Ar pbkdf2iters Ns = Ns Ar value
.Ar filesystem
.Xc
.It Xo
.Nm
.Cm change-key
.Fl i
.Op Fl l
.Ar filesystem
.Xc
Allows a user to change the encryption key used to access a dataset. This
command requires that the existing key for the dataset is already loaded into
ZFS. This command may also be used to change the
.Sy keylocation ,
.Sy keyformat ,
and
.Sy pbkdf2iters
properties as needed. If the dataset was not previously an encryption root it
will become one. Alternatively, the
.Fl i
flag may be provided to cause an encryption root to inherit the parent's key
instead.
.Bl -tag -width "-r"
.It Fl l
Ensures the key is loaded before attempting to change the key. This is
effectively equivalent to
.Qq Nm zfs Cm load-key Ar filesystem ; Nm zfs Cm change-key Ar filesystem
.It Fl o Ar property Ns = Ns Ar value
Allows the user to set encryption key properties (
.Sy keyformat ,
.Sy keylocation ,
and
.Sy pbkdf2iters
) while changing the key. This is the only way to alter
.Sy keyformat
and
.Sy pbkdf2iters
after the dataset has been created.
.It Fl i
Indicates that zfs should make
.Ar filesystem
inherit the key of its parent. Note that this command can only be run on an
encryption root that has an encrypted parent.
.El
.El
.Sh EXIT STATUS
The
.Nm
utility exits 0 on success, 1 if an error occurs, and 2 if invalid command line
options were specified.
.Sh EXAMPLES
.Bl -tag -width ""
.It Sy Example 1 No Creating a ZFS File System Hierarchy
The following commands create a file system named
.Em pool/home
and a file system named
.Em pool/home/bob .
The mount point
.Pa /export/home
is set for the parent file system, and is automatically inherited by the child
file system.
.Bd -literal
# zfs create pool/home
# zfs set mountpoint=/export/home pool/home
# zfs create pool/home/bob
.Ed
.It Sy Example 2 No Creating a ZFS Snapshot
The following command creates a snapshot named
.Sy yesterday .
This snapshot is mounted on demand in the
.Pa .zfs/snapshot
directory at the root of the
.Em pool/home/bob
file system.
.Bd -literal
# zfs snapshot pool/home/bob@yesterday
.Ed
.It Sy Example 3 No Creating and Destroying Multiple Snapshots
The following command creates snapshots named
.Sy yesterday
of
.Em pool/home
and all of its descendent file systems.
Each snapshot is mounted on demand in the
.Pa .zfs/snapshot
directory at the root of its file system.
The second command destroys the newly created snapshots.
.Bd -literal
# zfs snapshot -r pool/home@yesterday
# zfs destroy -r pool/home@yesterday
.Ed
.It Sy Example 4 No Disabling and Enabling File System Compression
The following command disables the
.Sy compression
property for all file systems under
.Em pool/home .
The next command explicitly enables
.Sy compression
for
.Em pool/home/anne .
.Bd -literal
# zfs set compression=off pool/home
# zfs set compression=on pool/home/anne
.Ed
.It Sy Example 5 No Listing ZFS Datasets
The following command lists all active file systems and volumes in the system.
Snapshots are displayed if the
.Sy listsnaps
property is
.Sy on .
The default is
.Sy off .
See
.Xr zpool 8
for more information on pool properties.
.Bd -literal
# zfs list
NAME USED AVAIL REFER MOUNTPOINT
pool 450K 457G 18K /pool
pool/home 315K 457G 21K /export/home
pool/home/anne 18K 457G 18K /export/home/anne
pool/home/bob 276K 457G 276K /export/home/bob
.Ed
.It Sy Example 6 No Setting a Quota on a ZFS File System
The following command sets a quota of 50 Gbytes for
.Em pool/home/bob .
.Bd -literal
# zfs set quota=50G pool/home/bob
.Ed
.It Sy Example 7 No Listing ZFS Properties
The following command lists all properties for
.Em pool/home/bob .
.Bd -literal
# zfs get all pool/home/bob
NAME PROPERTY VALUE SOURCE
pool/home/bob type filesystem -
pool/home/bob creation Tue Jul 21 15:53 2009 -
pool/home/bob used 21K -
pool/home/bob available 20.0G -
pool/home/bob referenced 21K -
pool/home/bob compressratio 1.00x -
pool/home/bob mounted yes -
pool/home/bob quota 20G local
pool/home/bob reservation none default
pool/home/bob recordsize 128K default
pool/home/bob mountpoint /pool/home/bob default
pool/home/bob sharenfs off default
pool/home/bob checksum on default
pool/home/bob compression on local
pool/home/bob atime on default
pool/home/bob devices on default
pool/home/bob exec on default
pool/home/bob setuid on default
pool/home/bob readonly off default
pool/home/bob zoned off default
pool/home/bob snapdir hidden default
pool/home/bob acltype off default
pool/home/bob aclinherit restricted default
pool/home/bob canmount on default
pool/home/bob xattr on default
pool/home/bob copies 1 default
pool/home/bob version 4 -
pool/home/bob utf8only off -
pool/home/bob normalization none -
pool/home/bob casesensitivity sensitive -
pool/home/bob vscan off default
pool/home/bob nbmand off default
pool/home/bob sharesmb off default
pool/home/bob refquota none default
pool/home/bob refreservation none default
pool/home/bob primarycache all default
pool/home/bob secondarycache all default
pool/home/bob usedbysnapshots 0 -
pool/home/bob usedbydataset 21K -
pool/home/bob usedbychildren 0 -
pool/home/bob usedbyrefreservation 0 -
.Ed
.Pp
The following command gets a single property value.
.Bd -literal
# zfs get -H -o value compression pool/home/bob
on
.Ed
The following command lists all properties with local settings for
.Em pool/home/bob .
.Bd -literal
# zfs get -r -s local -o name,property,value all pool/home/bob
NAME PROPERTY VALUE
pool/home/bob quota 20G
pool/home/bob compression on
.Ed
.It Sy Example 8 No Rolling Back a ZFS File System
The following command reverts the contents of
.Em pool/home/anne
to the snapshot named
.Sy yesterday ,
deleting all intermediate snapshots.
.Bd -literal
# zfs rollback -r pool/home/anne@yesterday
.Ed
.It Sy Example 9 No Creating a ZFS Clone
The following command creates a writable file system whose initial contents are
the same as
.Em pool/home/bob@yesterday .
.Bd -literal
# zfs clone pool/home/bob@yesterday pool/clone
.Ed
.It Sy Example 10 No Promoting a ZFS Clone
The following commands illustrate how to test out changes to a file system, and
then replace the original file system with the changed one, using clones, clone
promotion, and renaming:
.Bd -literal
# zfs create pool/project/production
populate /pool/project/production with data
# zfs snapshot pool/project/production@today
# zfs clone pool/project/production@today pool/project/beta
make changes to /pool/project/beta and test them
# zfs promote pool/project/beta
# zfs rename pool/project/production pool/project/legacy
# zfs rename pool/project/beta pool/project/production
once the legacy version is no longer needed, it can be destroyed
# zfs destroy pool/project/legacy
.Ed
.It Sy Example 11 No Inheriting ZFS Properties
The following command causes
.Em pool/home/bob
and
.Em pool/home/anne
to inherit the
.Sy checksum
property from their parent.
.Bd -literal
# zfs inherit checksum pool/home/bob pool/home/anne
.Ed
.It Sy Example 12 No Remotely Replicating ZFS Data
The following commands send a full stream and then an incremental stream to a
remote machine, restoring them into
.Em poolB/received/fs@a
and
.Em poolB/received/fs@b ,
respectively.
.Em poolB
must contain the file system
.Em poolB/received ,
and must not initially contain
.Em poolB/received/fs .
.Bd -literal
# zfs send pool/fs@a | \e
ssh host zfs receive poolB/received/fs@a
# zfs send -i a pool/fs@b | \e
ssh host zfs receive poolB/received/fs
.Ed
.It Sy Example 13 No Using the zfs receive -d Option
The following command sends a full stream of
.Em poolA/fsA/fsB@snap
to a remote machine, receiving it into
.Em poolB/received/fsA/fsB@snap .
The
.Em fsA/fsB@snap
portion of the received snapshot's name is determined from the name of the sent
snapshot.
.Em poolB
must contain the file system
.Em poolB/received .
If
.Em poolB/received/fsA
does not exist, it is created as an empty file system.
.Bd -literal
# zfs send poolA/fsA/fsB@snap | \e
ssh host zfs receive -d poolB/received
.Ed
.It Sy Example 14 No Setting User Properties
The following example sets the user-defined
.Sy com.example:department
property for a dataset.
.Bd -literal
# zfs set com.example:department=12345 tank/accounting
.Ed
.It Sy Example 15 No Performing a Rolling Snapshot
The following example shows how to maintain a history of snapshots with a
consistent naming scheme.
To keep a week's worth of snapshots, the user destroys the oldest snapshot,
renames the remaining snapshots, and then creates a new snapshot, as follows:
.Bd -literal
# zfs destroy -r pool/users@7daysago
# zfs rename -r pool/users@6daysago @7daysago
# zfs rename -r pool/users@5daysago @6daysago
# zfs rename -r pool/users@yesterday @5daysago
# zfs rename -r pool/users@yesterday @4daysago
# zfs rename -r pool/users@yesterday @3daysago
# zfs rename -r pool/users@yesterday @2daysago
# zfs rename -r pool/users@today @yesterday
# zfs snapshot -r pool/users@today
.Ed
.It Sy Example 16 No Setting sharenfs Property Options on a ZFS File System
The following commands show how to set
.Sy sharenfs
property options to enable
.Sy rw
access for a set of
.Sy IP
addresses and to enable root access for system
.Sy neo
on the
.Em tank/home
file system.
.Bd -literal
# zfs set sharenfs='rw=@123.123.0.0/16,root=neo' tank/home
.Ed
.Pp
If you are using
.Sy DNS
for host name resolution, specify the fully qualified hostname.
.It Sy Example 17 No Delegating ZFS Administration Permissions on a ZFS Dataset
The following example shows how to set permissions so that user
.Sy cindys
can create, destroy, mount, and take snapshots on
.Em tank/cindys .
The permissions on
.Em tank/cindys
are also displayed.
.Bd -literal
# zfs allow cindys create,destroy,mount,snapshot tank/cindys
# zfs allow tank/cindys
---- Permissions on tank/cindys --------------------------------------
Local+Descendent permissions:
user cindys create,destroy,mount,snapshot
.Ed
.Pp
Because the
.Em tank/cindys
mount point permission is set to 755 by default, user
.Sy cindys
will be unable to mount file systems under
.Em tank/cindys .
Add an ACE similar to the following syntax to provide mount point access:
.Bd -literal
# chmod A+user:cindys:add_subdirectory:allow /tank/cindys
.Ed
.It Sy Example 18 No Delegating Create Time Permissions on a ZFS Dataset
The following example shows how to grant anyone in the group
.Sy staff
to create file systems in
.Em tank/users .
This syntax also allows staff members to destroy their own file systems, but not
destroy anyone else's file system.
The permissions on
.Em tank/users
are also displayed.
.Bd -literal
# zfs allow staff create,mount tank/users
# zfs allow -c destroy tank/users
# zfs allow tank/users
---- Permissions on tank/users ---------------------------------------
Permission sets:
destroy
Local+Descendent permissions:
group staff create,mount
.Ed
.It Sy Example 19 No Defining and Granting a Permission Set on a ZFS Dataset
The following example shows how to define and grant a permission set on the
.Em tank/users
file system.
The permissions on
.Em tank/users
are also displayed.
.Bd -literal
# zfs allow -s @pset create,destroy,snapshot,mount tank/users
# zfs allow staff @pset tank/users
# zfs allow tank/users
---- Permissions on tank/users ---------------------------------------
Permission sets:
@pset create,destroy,mount,snapshot
Local+Descendent permissions:
group staff @pset
.Ed
.It Sy Example 20 No Delegating Property Permissions on a ZFS Dataset
The following example shows to grant the ability to set quotas and reservations
on the
.Em users/home
file system.
The permissions on
.Em users/home
are also displayed.
.Bd -literal
# zfs allow cindys quota,reservation users/home
# zfs allow users/home
---- Permissions on users/home ---------------------------------------
Local+Descendent permissions:
user cindys quota,reservation
cindys% zfs set quota=10G users/home/marks
cindys% zfs get quota users/home/marks
NAME PROPERTY VALUE SOURCE
users/home/marks quota 10G local
.Ed
.It Sy Example 21 No Removing ZFS Delegated Permissions on a ZFS Dataset
The following example shows how to remove the snapshot permission from the
.Sy staff
group on the
.Em tank/users
file system.
The permissions on
.Em tank/users
are also displayed.
.Bd -literal
# zfs unallow staff snapshot tank/users
# zfs allow tank/users
---- Permissions on tank/users ---------------------------------------
Permission sets:
@pset create,destroy,mount,snapshot
Local+Descendent permissions:
group staff @pset
.Ed
.It Sy Example 22 No Showing the differences between a snapshot and a ZFS Dataset
The following example shows how to see what has changed between a prior
snapshot of a ZFS dataset and its current state.
The
.Fl F
option is used to indicate type information for the files affected.
.Bd -literal
# zfs diff -F tank/test@before tank/test
M / /tank/test/
M F /tank/test/linked (+1)
R F /tank/test/oldname -> /tank/test/newname
- F /tank/test/deleted
+ F /tank/test/created
M F /tank/test/modified
.Ed
.It Sy Example 23 No Creating a bookmark
The following example create a bookmark to a snapshot. This bookmark
can then be used instead of snapshot in send streams.
.Bd -literal
# zfs bookmark rpool@snapshot rpool#bookmark
.Ed
.It Sy Example 24 No Setting sharesmb Property Options on a ZFS File System
The following example show how to share SMB filesystem through ZFS. Note that
that a user and his/her password must be given.
.Bd -literal
# smbmount //127.0.0.1/share_tmp /mnt/tmp \\
-o user=workgroup/turbo,password=obrut,uid=1000
.Ed
.Pp
Minimal
.Em /etc/samba/smb.conf
configuration required:
.Pp
Samba will need to listen to 'localhost' (127.0.0.1) for the ZFS utilities to
communicate with Samba. This is the default behavior for most Linux
distributions.
.Pp
Samba must be able to authenticate a user. This can be done in a number of
ways, depending on if using the system password file, LDAP or the Samba
specific smbpasswd file. How to do this is outside the scope of this manual.
Please refer to the
.Xr smb.conf 5
man page for more information.
.Pp
See the
.Sy USERSHARE section
of the
.Xr smb.conf 5
man page for all configuration options in case you need to modify any options
to the share afterwards. Do note that any changes done with the
.Xr net 8
command will be undone if the share is ever unshared (such as at a reboot etc).
.El
.Sh INTERFACE STABILITY
.Sy Committed .
.Sh SEE ALSO
.Xr attr 1 ,
.Xr gzip 1 ,
.Xr ssh 1 ,
.Xr chmod 2 ,
.Xr fsync 2 ,
.Xr stat 2 ,
.Xr write 2 ,
.Xr acl 5 ,
.Xr attributes 5 ,
.Xr exports 5 ,
.Xr exportfs 8 ,
.Xr mount 8 ,
.Xr net 8 ,
.Xr selinux 8 ,
.Xr zpool 8
diff --git a/man/man8/zpool.8 b/man/man8/zpool.8
index 6480ca367b60..c0e8b24fe578 100644
--- a/man/man8/zpool.8
+++ b/man/man8/zpool.8
@@ -1,2404 +1,2451 @@
.\"
.\" CDDL HEADER START
.\"
.\" The contents of this file are subject to the terms of the
.\" Common Development and Distribution License (the "License").
.\" You may not use this file except in compliance with the License.
.\"
.\" You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
.\" or http://www.opensolaris.org/os/licensing.
.\" See the License for the specific language governing permissions
.\" and limitations under the License.
.\"
.\" When distributing Covered Code, include this CDDL HEADER in each
.\" file and include the License file at usr/src/OPENSOLARIS.LICENSE.
.\" If applicable, add the following below this CDDL HEADER, with the
.\" fields enclosed by brackets "[]" replaced with your own identifying
.\" information: Portions Copyright [yyyy] [name of copyright owner]
.\"
.\" CDDL HEADER END
.\"
.\"
.\" Copyright (c) 2007, Sun Microsystems, Inc. All Rights Reserved.
-.\" Copyright (c) 2013 by Delphix. All rights reserved.
+.\" Copyright (c) 2012, 2017 by Delphix. All rights reserved.
.\" Copyright (c) 2012 Cyril Plisko. All Rights Reserved.
.\" Copyright (c) 2017 Datto Inc.
.\" Copyright (c) 2017 George Melikov. All Rights Reserved.
.\" Copyright 2017 Nexenta Systems, Inc.
.\" Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
.\"
.Dd March 9, 2018
.Dt ZPOOL 8 SMM
.Os Linux
.Sh NAME
.Nm zpool
.Nd configure ZFS storage pools
.Sh SYNOPSIS
.Nm
.Fl ?
.Nm
.Cm add
.Op Fl fgLnP
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool vdev Ns ...
.Nm
.Cm attach
.Op Fl f
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool device new_device
.Nm
.Cm clear
.Ar pool
.Op Ar device
.Nm
.Cm create
.Op Fl dfn
.Op Fl m Ar mountpoint
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Oo Fl o Ar feature@feature Ns = Ns Ar value Oc
.Oo Fl O Ar file-system-property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Ar pool vdev Ns ...
.Nm
.Cm destroy
.Op Fl f
.Ar pool
.Nm
.Cm detach
.Ar pool device
.Nm
.Cm events
.Op Fl vHf Oo Ar pool Oc | Fl c
.Nm
.Cm export
.Op Fl a
.Op Fl f
.Ar pool Ns ...
.Nm
.Cm get
.Op Fl Hp
.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns ...
.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns ...
.Ar pool Ns ...
.Nm
.Cm history
.Op Fl il
.Oo Ar pool Oc Ns ...
.Nm
.Cm import
.Op Fl D
.Op Fl d Ar dir Ns | Ns device
.Nm
.Cm import
.Fl a
.Op Fl DflmN
.Op Fl F Oo Fl n Oc Oo Fl T Oc Oo Fl X Oc
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Nm
.Cm import
.Op Fl Dflm
.Op Fl F Oo Fl n Oc Oo Fl T Oc Oo Fl X Oc
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Op Fl s
.Ar pool Ns | Ns Ar id
.Op Ar newpool Oo Fl t Oc
.Nm
.Cm iostat
.Op Oo Oo Fl c Ar SCRIPT Oc Oo Fl lq Oc Oc Ns | Ns Fl rw
.Op Fl T Sy u Ns | Ns Sy d
.Op Fl ghHLpPvy
.Oo Oo Ar pool Ns ... Oc Ns | Ns Oo Ar pool vdev Ns ... Oc Ns | Ns Oo Ar vdev Ns ... Oc Oc
.Op Ar interval Op Ar count
.Nm
.Cm labelclear
.Op Fl f
.Ar device
.Nm
.Cm list
.Op Fl HgLpPv
.Op Fl o Ar property Ns Oo , Ns Ar property Oc Ns ...
.Op Fl T Sy u Ns | Ns Sy d
.Oo Ar pool Oc Ns ...
.Op Ar interval Op Ar count
.Nm
.Cm offline
.Op Fl f
.Op Fl t
.Ar pool Ar device Ns ...
.Nm
.Cm online
.Op Fl e
.Ar pool Ar device Ns ...
.Nm
.Cm reguid
.Ar pool
.Nm
.Cm reopen
.Op Fl n
.Ar pool
.Nm
.Cm remove
+.Op Fl np
.Ar pool Ar device Ns ...
.Nm
+.Cm remove
+.Fl s
+.Ar pool
+.Nm
.Cm replace
.Op Fl f
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool Ar device Op Ar new_device
.Nm
.Cm scrub
.Op Fl s | Fl p
.Ar pool Ns ...
.Nm
.Cm set
.Ar property Ns = Ns Ar value
.Ar pool
.Nm
.Cm split
.Op Fl gLlnP
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Ar pool newpool
.Oo Ar device Oc Ns ...
.Nm
.Cm status
.Oo Fl c Ar SCRIPT Oc
.Op Fl gLPvxD
.Op Fl T Sy u Ns | Ns Sy d
.Oo Ar pool Oc Ns ...
.Op Ar interval Op Ar count
.Nm
.Cm sync
.Oo Ar pool Oc Ns ...
.Nm
.Cm upgrade
.Nm
.Cm upgrade
.Fl v
.Nm
.Cm upgrade
.Op Fl V Ar version
.Fl a Ns | Ns Ar pool Ns ...
.Sh DESCRIPTION
The
.Nm
command configures ZFS storage pools.
A storage pool is a collection of devices that provides physical storage and
data replication for ZFS datasets.
All datasets within a storage pool share the same space.
See
.Xr zfs 8
for information on managing datasets.
.Ss Virtual Devices (vdevs)
A "virtual device" describes a single device or a collection of devices
organized according to certain performance and fault characteristics.
The following virtual devices are supported:
.Bl -tag -width Ds
.It Sy disk
A block device, typically located under
.Pa /dev .
ZFS can use individual slices or partitions, though the recommended mode of
operation is to use whole disks.
A disk can be specified by a full path, or it can be a shorthand name
.Po the relative portion of the path under
.Pa /dev
.Pc .
A whole disk can be specified by omitting the slice or partition designation.
For example,
.Pa sda
is equivalent to
.Pa /dev/sda .
When given a whole disk, ZFS automatically labels the disk, if necessary.
.It Sy file
A regular file.
The use of files as a backing store is strongly discouraged.
It is designed primarily for experimental purposes, as the fault tolerance of a
file is only as good as the file system of which it is a part.
A file must be specified by a full path.
.It Sy mirror
A mirror of two or more devices.
Data is replicated in an identical fashion across all components of a mirror.
A mirror with N disks of size X can hold X bytes and can withstand (N-1) devices
failing before data integrity is compromised.
.It Sy raidz , raidz1 , raidz2 , raidz3
A variation on RAID-5 that allows for better distribution of parity and
eliminates the RAID-5
.Qq write hole
.Pq in which data and parity become inconsistent after a power loss .
Data and parity is striped across all disks within a raidz group.
.Pp
A raidz group can have single-, double-, or triple-parity, meaning that the
raidz group can sustain one, two, or three failures, respectively, without
losing any data.
The
.Sy raidz1
vdev type specifies a single-parity raidz group; the
.Sy raidz2
vdev type specifies a double-parity raidz group; and the
.Sy raidz3
vdev type specifies a triple-parity raidz group.
The
.Sy raidz
vdev type is an alias for
.Sy raidz1 .
.Pp
A raidz group with N disks of size X with P parity disks can hold approximately
(N-P)*X bytes and can withstand P device(s) failing before data integrity is
compromised.
The minimum number of devices in a raidz group is one more than the number of
parity disks.
The recommended number is between 3 and 9 to help increase performance.
.It Sy spare
A special pseudo-vdev which keeps track of available hot spares for a pool.
For more information, see the
.Sx Hot Spares
section.
.It Sy log
A separate intent log device.
If more than one log device is specified, then writes are load-balanced between
devices.
Log devices can be mirrored.
However, raidz vdev types are not supported for the intent log.
For more information, see the
.Sx Intent Log
section.
.It Sy cache
A device used to cache storage pool data.
A cache device cannot be configured as a mirror or raidz group.
For more information, see the
.Sx Cache Devices
section.
.El
.Pp
Virtual devices cannot be nested, so a mirror or raidz virtual device can only
contain files or disks.
Mirrors of mirrors
.Pq or other combinations
are not allowed.
.Pp
A pool can have any number of virtual devices at the top of the configuration
.Po known as
.Qq root vdevs
.Pc .
Data is dynamically distributed across all top-level devices to balance data
among devices.
As new virtual devices are added, ZFS automatically places data on the newly
available devices.
.Pp
Virtual devices are specified one at a time on the command line, separated by
whitespace.
The keywords
.Sy mirror
and
.Sy raidz
are used to distinguish where a group ends and another begins.
For example, the following creates two root vdevs, each a mirror of two disks:
.Bd -literal
# zpool create mypool mirror sda sdb mirror sdc sdd
.Ed
.Ss Device Failure and Recovery
ZFS supports a rich set of mechanisms for handling device failure and data
corruption.
All metadata and data is checksummed, and ZFS automatically repairs bad data
from a good copy when corruption is detected.
.Pp
In order to take advantage of these features, a pool must make use of some form
of redundancy, using either mirrored or raidz groups.
While ZFS supports running in a non-redundant configuration, where each root
vdev is simply a disk or file, this is strongly discouraged.
A single case of bit corruption can render some or all of your data unavailable.
.Pp
A pool's health status is described by one of three states: online, degraded,
or faulted.
An online pool has all devices operating normally.
A degraded pool is one in which one or more devices have failed, but the data is
still available due to a redundant configuration.
A faulted pool has corrupted metadata, or one or more faulted devices, and
insufficient replicas to continue functioning.
.Pp
The health of the top-level vdev, such as mirror or raidz device, is
potentially impacted by the state of its associated vdevs, or component
devices.
A top-level vdev or component device is in one of the following states:
.Bl -tag -width "DEGRADED"
.It Sy DEGRADED
One or more top-level vdevs is in the degraded state because one or more
component devices are offline.
Sufficient replicas exist to continue functioning.
.Pp
One or more component devices is in the degraded or faulted state, but
sufficient replicas exist to continue functioning.
The underlying conditions are as follows:
.Bl -bullet
.It
The number of checksum errors exceeds acceptable levels and the device is
degraded as an indication that something may be wrong.
ZFS continues to use the device as necessary.
.It
The number of I/O errors exceeds acceptable levels.
The device could not be marked as faulted because there are insufficient
replicas to continue functioning.
.El
.It Sy FAULTED
One or more top-level vdevs is in the faulted state because one or more
component devices are offline.
Insufficient replicas exist to continue functioning.
.Pp
One or more component devices is in the faulted state, and insufficient
replicas exist to continue functioning.
The underlying conditions are as follows:
.Bl -bullet
.It
The device could be opened, but the contents did not match expected values.
.It
The number of I/O errors exceeds acceptable levels and the device is faulted to
prevent further use of the device.
.El
.It Sy OFFLINE
The device was explicitly taken offline by the
.Nm zpool Cm offline
command.
.It Sy ONLINE
The device is online and functioning.
.It Sy REMOVED
The device was physically removed while the system was running.
Device removal detection is hardware-dependent and may not be supported on all
platforms.
.It Sy UNAVAIL
The device could not be opened.
If a pool is imported when a device was unavailable, then the device will be
identified by a unique identifier instead of its path since the path was never
correct in the first place.
.El
.Pp
If a device is removed and later re-attached to the system, ZFS attempts
to put the device online automatically.
Device attach detection is hardware-dependent and might not be supported on all
platforms.
.Ss Hot Spares
ZFS allows devices to be associated with pools as
.Qq hot spares .
These devices are not actively used in the pool, but when an active device
fails, it is automatically replaced by a hot spare.
To create a pool with hot spares, specify a
.Sy spare
vdev with any number of devices.
For example,
.Bd -literal
# zpool create pool mirror sda sdb spare sdc sdd
.Ed
.Pp
Spares can be shared across multiple pools, and can be added with the
.Nm zpool Cm add
command and removed with the
.Nm zpool Cm remove
command.
Once a spare replacement is initiated, a new
.Sy spare
vdev is created within the configuration that will remain there until the
original device is replaced.
At this point, the hot spare becomes available again if another device fails.
.Pp
If a pool has a shared spare that is currently being used, the pool can not be
exported since other pools may use this shared spare, which may lead to
potential data corruption.
.Pp
An in-progress spare replacement can be cancelled by detaching the hot spare.
If the original faulted device is detached, then the hot spare assumes its
place in the configuration, and is removed from the spare list of all active
pools.
.Pp
Spares cannot replace log devices.
.Ss Intent Log
The ZFS Intent Log (ZIL) satisfies POSIX requirements for synchronous
transactions.
For instance, databases often require their transactions to be on stable storage
devices when returning from a system call.
NFS and other applications can also use
.Xr fsync 2
to ensure data stability.
By default, the intent log is allocated from blocks within the main pool.
However, it might be possible to get better performance using separate intent
log devices such as NVRAM or a dedicated disk.
For example:
.Bd -literal
# zpool create pool sda sdb log sdc
.Ed
.Pp
Multiple log devices can also be specified, and they can be mirrored.
See the
.Sx EXAMPLES
section for an example of mirroring multiple log devices.
.Pp
Log devices can be added, replaced, attached, detached and removed. In
addition, log devices are imported and exported as part of the pool
that contains them.
-Mirrored log devices can be removed by specifying the top-level mirror for the
-log.
+Mirrored devices can be removed by specifying the top-level mirror vdev.
.Ss Cache Devices
Devices can be added to a storage pool as
.Qq cache devices .
These devices provide an additional layer of caching between main memory and
disk.
For read-heavy workloads, where the working set size is much larger than what
can be cached in main memory, using cache devices allow much more of this
working set to be served from low latency media.
Using cache devices provides the greatest performance improvement for random
read-workloads of mostly static content.
.Pp
To create a pool with cache devices, specify a
.Sy cache
vdev with any number of devices.
For example:
.Bd -literal
# zpool create pool sda sdb cache sdc sdd
.Ed
.Pp
Cache devices cannot be mirrored or part of a raidz configuration.
If a read error is encountered on a cache device, that read I/O is reissued to
the original storage pool device, which might be part of a mirrored or raidz
configuration.
.Pp
The content of the cache devices is considered volatile, as is the case with
other system caches.
.Ss Properties
Each pool has several properties associated with it.
Some properties are read-only statistics while others are configurable and
change the behavior of the pool.
.Pp
The following are read-only properties:
.Bl -tag -width Ds
.It Cm allocated
Amount of storage used within the pool.
.It Sy capacity
Percentage of pool space used.
This property can also be referred to by its shortened column name,
.Sy cap .
.It Sy expandsize
Amount of uninitialized space within the pool or device that can be used to
increase the total capacity of the pool.
Uninitialized space consists of any space on an EFI labeled vdev which has not
been brought online
.Po e.g, using
.Nm zpool Cm online Fl e
.Pc .
This space occurs when a LUN is dynamically expanded.
.It Sy fragmentation
The amount of fragmentation in the pool.
.It Sy free
The amount of free space available in the pool.
.It Sy freeing
After a file system or snapshot is destroyed, the space it was using is
returned to the pool asynchronously.
.Sy freeing
is the amount of space remaining to be reclaimed.
Over time
.Sy freeing
will decrease while
.Sy free
increases.
.It Sy health
The current health of the pool.
Health can be one of
.Sy ONLINE , DEGRADED , FAULTED , OFFLINE, REMOVED , UNAVAIL .
.It Sy guid
A unique identifier for the pool.
.It Sy size
Total size of the storage pool.
.It Sy unsupported@ Ns Em feature_guid
Information about unsupported features that are enabled on the pool.
See
.Xr zpool-features 5
for details.
.El
.Pp
The space usage properties report actual physical space available to the
storage pool.
The physical space can be different from the total amount of space that any
contained datasets can actually use.
The amount of space used in a raidz configuration depends on the characteristics
of the data being written.
In addition, ZFS reserves some space for internal accounting that the
.Xr zfs 8
command takes into account, but the
.Nm
command does not.
For non-full pools of a reasonable size, these effects should be invisible.
For small pools, or pools that are close to being completely full, these
discrepancies may become more noticeable.
.Pp
The following property can be set at creation time and import time:
.Bl -tag -width Ds
.It Sy altroot
Alternate root directory.
If set, this directory is prepended to any mount points within the pool.
This can be used when examining an unknown pool where the mount points cannot be
trusted, or in an alternate boot environment, where the typical paths are not
valid.
.Sy altroot
is not a persistent property.
It is valid only while the system is up.
Setting
.Sy altroot
defaults to using
.Sy cachefile Ns = Ns Sy none ,
though this may be overridden using an explicit setting.
.El
.Pp
The following property can be set only at import time:
.Bl -tag -width Ds
.It Sy readonly Ns = Ns Sy on Ns | Ns Sy off
If set to
.Sy on ,
the pool will be imported in read-only mode.
This property can also be referred to by its shortened column name,
.Sy rdonly .
.El
.Pp
The following properties can be set at creation time and import time, and later
changed with the
.Nm zpool Cm set
command:
.Bl -tag -width Ds
.It Sy ashift Ns = Ns Sy ashift
Pool sector size exponent, to the power of
.Sy 2
(internally referred to as
.Sy ashift
). Values from 9 to 16, inclusive, are valid; also, the special
value 0 (the default) means to auto-detect using the kernel's block
layer and a ZFS internal exception list. I/O operations will be aligned
to the specified size boundaries. Additionally, the minimum (disk)
write size will be set to the specified size, so this represents a
space vs. performance trade-off. For optimal performance, the pool
sector size should be greater than or equal to the sector size of the
underlying disks. The typical case for setting this property is when
performance is important and the underlying disks use 4KiB sectors but
report 512B sectors to the OS (for compatibility reasons); in that
case, set
.Sy ashift=12
(which is 1<<12 = 4096). When set, this property is
used as the default hint value in subsequent vdev operations (add,
attach and replace). Changing this value will not modify any existing
vdev, not even on disk replacement; however it can be used, for
instance, to replace a dying 512B sectors disk with a newer 4KiB
sectors device: this will probably result in bad performance but at the
same time could prevent loss of data.
.It Sy autoexpand Ns = Ns Sy on Ns | Ns Sy off
Controls automatic pool expansion when the underlying LUN is grown.
If set to
.Sy on ,
the pool will be resized according to the size of the expanded device.
If the device is part of a mirror or raidz then all devices within that
mirror/raidz group must be expanded before the new space is made available to
the pool.
The default behavior is
.Sy off .
This property can also be referred to by its shortened column name,
.Sy expand .
.It Sy autoreplace Ns = Ns Sy on Ns | Ns Sy off
Controls automatic device replacement.
If set to
.Sy off ,
device replacement must be initiated by the administrator by using the
.Nm zpool Cm replace
command.
If set to
.Sy on ,
any new device, found in the same physical location as a device that previously
belonged to the pool, is automatically formatted and replaced.
The default behavior is
.Sy off .
This property can also be referred to by its shortened column name,
.Sy replace .
Autoreplace can also be used with virtual disks (like device
mapper) provided that you use the /dev/disk/by-vdev paths setup by
vdev_id.conf. See the
.Xr vdev_id 8
man page for more details.
Autoreplace and autoonline require the ZFS Event Daemon be configured and
running. See the
.Xr zed 8
man page for more details.
.It Sy bootfs Ns = Ns Sy (unset) Ns | Ns Ar pool Ns / Ns Ar dataset
Identifies the default bootable dataset for the root pool. This property is
expected to be set mainly by the installation and upgrade programs.
Not all Linux distribution boot processes use the bootfs property.
.It Sy cachefile Ns = Ns Ar path Ns | Ns Sy none
Controls the location of where the pool configuration is cached.
Discovering all pools on system startup requires a cached copy of the
configuration data that is stored on the root file system.
All pools in this cache are automatically imported when the system boots.
Some environments, such as install and clustering, need to cache this
information in a different location so that pools are not automatically
imported.
Setting this property caches the pool configuration in a different location that
can later be imported with
.Nm zpool Cm import Fl c .
Setting it to the special value
.Sy none
creates a temporary pool that is never cached, and the special value
.Qq
.Pq empty string
uses the default location.
.Pp
Multiple pools can share the same cache file.
Because the kernel destroys and recreates this file when pools are added and
removed, care should be taken when attempting to access this file.
When the last pool using a
.Sy cachefile
is exported or destroyed, the file will be empty.
.It Sy comment Ns = Ns Ar text
A text string consisting of printable ASCII characters that will be stored
such that it is available even if the pool becomes faulted.
An administrator can provide additional information about a pool using this
property.
.It Sy dedupditto Ns = Ns Ar number
Threshold for the number of block ditto copies.
If the reference count for a deduplicated block increases above this number, a
new ditto copy of this block is automatically stored.
The default setting is
.Sy 0
which causes no ditto copies to be created for deduplicated blocks.
The minimum legal nonzero setting is
.Sy 100 .
.It Sy delegation Ns = Ns Sy on Ns | Ns Sy off
Controls whether a non-privileged user is granted access based on the dataset
permissions defined on the dataset.
See
.Xr zfs 8
for more information on ZFS delegated administration.
.It Sy failmode Ns = Ns Sy wait Ns | Ns Sy continue Ns | Ns Sy panic
Controls the system behavior in the event of catastrophic pool failure.
This condition is typically a result of a loss of connectivity to the underlying
storage device(s) or a failure of all devices within the pool.
The behavior of such an event is determined as follows:
.Bl -tag -width "continue"
.It Sy wait
Blocks all I/O access until the device connectivity is recovered and the errors
are cleared.
This is the default behavior.
.It Sy continue
Returns
.Er EIO
to any new write I/O requests but allows reads to any of the remaining healthy
devices.
Any write requests that have yet to be committed to disk would be blocked.
.It Sy panic
Prints out a message to the console and generates a system crash dump.
.El
.It Sy feature@ Ns Ar feature_name Ns = Ns Sy enabled
The value of this property is the current state of
.Ar feature_name .
The only valid value when setting this property is
.Sy enabled
which moves
.Ar feature_name
to the enabled state.
See
.Xr zpool-features 5
for details on feature states.
.It Sy listsnapshots Ns = Ns Sy on Ns | Ns Sy off
Controls whether information about snapshots associated with this pool is
output when
.Nm zfs Cm list
is run without the
.Fl t
option.
The default value is
.Sy off .
This property can also be referred to by its shortened name,
.Sy listsnaps .
.It Sy multihost Ns = Ns Sy on Ns | Ns Sy off
Controls whether a pool activity check should be performed during
.Nm zpool Cm import .
When a pool is determined to be active it cannot be imported, even with the
.Fl f
option. This property is intended to be used in failover configurations
where multiple hosts have access to a pool on shared storage. When this
property is on, periodic writes to storage occur to show the pool is in use.
See
.Sy zfs_multihost_interval
in the
.Xr zfs-module-parameters 5
man page. In order to enable this property each host must set a unique hostid.
See
.Xr genhostid 1
.Xr zgenhostid 8
.Xr spl-module-parameters 5
for additional details. The default value is
.Sy off .
.It Sy version Ns = Ns Ar version
The current on-disk version of the pool.
This can be increased, but never decreased.
The preferred method of updating pools is with the
.Nm zpool Cm upgrade
command, though this property can be used when a specific version is needed for
backwards compatibility.
Once feature flags are enabled on a pool this property will no longer have a
value.
.El
.Ss Subcommands
All subcommands that modify state are logged persistently to the pool in their
original form.
.Pp
The
.Nm
command provides subcommands to create and destroy storage pools, add capacity
to storage pools, and provide information about the storage pools.
The following subcommands are supported:
.Bl -tag -width Ds
.It Xo
.Nm
.Fl ?
.Xc
Displays a help message.
.It Xo
.Nm
.Cm add
.Op Fl fgLnP
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool vdev Ns ...
.Xc
Adds the specified virtual devices to the given pool.
The
.Ar vdev
specification is described in the
.Sx Virtual Devices
section.
The behavior of the
.Fl f
option, and the device checks performed are described in the
.Nm zpool Cm create
subcommand.
.Bl -tag -width Ds
.It Fl f
Forces use of
.Ar vdev Ns s ,
even if they appear in use or specify a conflicting replication level.
Not all devices can be overridden in this manner.
.It Fl g
Display
.Ar vdev ,
GUIDs instead of the normal device names. These GUIDs can be used in place of
device names for the zpool detach/offline/remove/replace commands.
.It Fl L
Display real paths for
.Ar vdev Ns s
resolving all symbolic links. This can be used to look up the current block
device name regardless of the /dev/disk/ path used to open it.
.It Fl n
Displays the configuration that would be used without actually adding the
.Ar vdev Ns s .
The actual pool creation can still fail due to insufficient privileges or
device sharing.
.It Fl P
Display real paths for
.Ar vdev Ns s
instead of only the last component of the path. This can be used in
conjunction with the -L flag.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties. See the
.Sx Properties
section for a list of valid properties that can be set. The only property
supported at the moment is ashift.
.El
.It Xo
.Nm
.Cm attach
.Op Fl f
.Oo Fl o Ar property Ns = Ns Ar value Oc
.Ar pool device new_device
.Xc
Attaches
.Ar new_device
to the existing
.Ar device .
The existing device cannot be part of a raidz configuration.
If
.Ar device
is not currently part of a mirrored configuration,
.Ar device
automatically transforms into a two-way mirror of
.Ar device
and
.Ar new_device .
If
.Ar device
is part of a two-way mirror, attaching
.Ar new_device
creates a three-way mirror, and so on.
In either case,
.Ar new_device
begins to resilver immediately.
.Bl -tag -width Ds
.It Fl f
Forces use of
.Ar new_device ,
even if its appears to be in use.
Not all devices can be overridden in this manner.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties. See the
.Sx Properties
section for a list of valid properties that can be set. The only property
supported at the moment is ashift.
.El
.It Xo
.Nm
.Cm clear
.Ar pool
.Op Ar device
.Xc
Clears device errors in a pool.
If no arguments are specified, all device errors within the pool are cleared.
If one or more devices is specified, only those errors associated with the
specified device or devices are cleared.
.It Xo
.Nm
.Cm create
.Op Fl dfn
.Op Fl m Ar mountpoint
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Oo Fl o Ar feature@feature Ns = Ns Ar value Oc Ns ...
.Oo Fl O Ar file-system-property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Op Fl t Ar tname
.Ar pool vdev Ns ...
.Xc
Creates a new storage pool containing the virtual devices specified on the
command line.
The pool name must begin with a letter, and can only contain
alphanumeric characters as well as underscore
.Pq Qq Sy _ ,
dash
.Pq Qq Sy \&- ,
colon
.Pq Qq Sy \&: ,
space
.Pq Qq Sy \&\ ,
and period
.Pq Qq Sy \&. .
The pool names
.Sy mirror ,
.Sy raidz ,
.Sy spare
and
.Sy log
are reserved, as are names beginning with
.Sy mirror ,
.Sy raidz ,
.Sy spare ,
and the pattern
.Sy c[0-9] .
The
.Ar vdev
specification is described in the
.Sx Virtual Devices
section.
.Pp
The command verifies that each device specified is accessible and not currently
in use by another subsystem.
There are some uses, such as being currently mounted, or specified as the
dedicated dump device, that prevents a device from ever being used by ZFS.
Other uses, such as having a preexisting UFS file system, can be overridden with
the
.Fl f
option.
.Pp
The command also checks that the replication strategy for the pool is
consistent.
An attempt to combine redundant and non-redundant storage in a single pool, or
to mix disks and files, results in an error unless
.Fl f
is specified.
The use of differently sized devices within a single raidz or mirror group is
also flagged as an error unless
.Fl f
is specified.
.Pp
Unless the
.Fl R
option is specified, the default mount point is
.Pa / Ns Ar pool .
The mount point must not exist or must be empty, or else the root dataset
cannot be mounted.
This can be overridden with the
.Fl m
option.
.Pp
By default all supported features are enabled on the new pool unless the
.Fl d
option is specified.
.Bl -tag -width Ds
.It Fl d
Do not enable any features on the new pool.
Individual features can be enabled by setting their corresponding properties to
.Sy enabled
with the
.Fl o
option.
See
.Xr zpool-features 5
for details about feature properties.
.It Fl f
Forces use of
.Ar vdev Ns s ,
even if they appear in use or specify a conflicting replication level.
Not all devices can be overridden in this manner.
.It Fl m Ar mountpoint
Sets the mount point for the root dataset.
The default mount point is
.Pa /pool
or
.Pa altroot/pool
if
.Ar altroot
is specified.
The mount point must be an absolute path,
.Sy legacy ,
or
.Sy none .
For more information on dataset mount points, see
.Xr zfs 8 .
.It Fl n
Displays the configuration that would be used without actually creating the
pool.
The actual pool creation can still fail due to insufficient privileges or
device sharing.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties.
See the
.Sx Properties
section for a list of valid properties that can be set.
.It Fl o Ar feature@feature Ns = Ns Ar value
Sets the given pool feature. See the
.Xr zpool-features 5
section for a list of valid features that can be set.
Value can be either disabled or enabled.
.It Fl O Ar file-system-property Ns = Ns Ar value
Sets the given file system properties in the root file system of the pool.
See the
.Sx Properties
section of
.Xr zfs 8
for a list of valid properties that can be set.
.It Fl R Ar root
Equivalent to
.Fl o Sy cachefile Ns = Ns Sy none Fl o Sy altroot Ns = Ns Ar root
.It Fl t Ar tname
Sets the in-core pool name to
.Sy tname
while the on-disk name will be the name specified as the pool name
.Sy pool .
This will set the default cachefile property to none. This is intended
to handle name space collisions when creating pools for other systems,
such as virtual machines or physical machines whose pools live on network
block devices.
.El
.It Xo
.Nm
.Cm destroy
.Op Fl f
.Ar pool
.Xc
Destroys the given pool, freeing up any devices for other use.
This command tries to unmount any active datasets before destroying the pool.
.Bl -tag -width Ds
.It Fl f
Forces any active datasets contained within the pool to be unmounted.
.El
.It Xo
.Nm
.Cm detach
.Ar pool device
.Xc
Detaches
.Ar device
from a mirror.
The operation is refused if there are no other valid replicas of the data.
If device may be re-added to the pool later on then consider the
.Sy zpool offline
command instead.
.It Xo
.Nm
.Cm events
.Op Fl vHf Oo Ar pool Oc | Fl c
.Xc
Lists all recent events generated by the ZFS kernel modules. These events
are consumed by the
.Xr zed 8
and used to automate administrative tasks such as replacing a failed device
with a hot spare. For more information about the subclasses and event payloads
that can be generated see the
.Xr zfs-events 5
man page.
.Bl -tag -width Ds
.It Fl c
Clear all previous events.
.It Fl f
Follow mode.
.It Fl H
Scripted mode. Do not display headers, and separate fields by a
single tab instead of arbitrary space.
.It Fl v
Print the entire payload for each event.
.El
.It Xo
.Nm
.Cm export
.Op Fl a
.Op Fl f
.Ar pool Ns ...
.Xc
Exports the given pools from the system.
All devices are marked as exported, but are still considered in use by other
subsystems.
The devices can be moved between systems
.Pq even those of different endianness
and imported as long as a sufficient number of devices are present.
.Pp
Before exporting the pool, all datasets within the pool are unmounted.
A pool can not be exported if it has a shared spare that is currently being
used.
.Pp
For pools to be portable, you must give the
.Nm
command whole disks, not just partitions, so that ZFS can label the disks with
portable EFI labels.
Otherwise, disk drivers on platforms of different endianness will not recognize
the disks.
.Bl -tag -width Ds
.It Fl a
Exports all pools imported on the system.
.It Fl f
Forcefully unmount all datasets, using the
.Nm unmount Fl f
command.
.Pp
This command will forcefully export the pool even if it has a shared spare that
is currently being used.
This may lead to potential data corruption.
.El
.It Xo
.Nm
.Cm get
.Op Fl Hp
.Op Fl o Ar field Ns Oo , Ns Ar field Oc Ns ...
.Sy all Ns | Ns Ar property Ns Oo , Ns Ar property Oc Ns ...
.Ar pool Ns ...
.Xc
Retrieves the given list of properties
.Po
or all properties if
.Sy all
is used
.Pc
for the specified storage pool(s).
These properties are displayed with the following fields:
.Bd -literal
name Name of storage pool
property Property name
value Property value
source Property source, either 'default' or 'local'.
.Ed
.Pp
See the
.Sx Properties
section for more information on the available pool properties.
.Bl -tag -width Ds
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a single tab instead of arbitrary
space.
.It Fl o Ar field
A comma-separated list of columns to display.
.Sy name Ns \&, Ns Sy property Ns \&, Ns Sy value Ns \&, Ns Sy source
is the default value.
.It Fl p
Display numbers in parsable (exact) values.
.El
.It Xo
.Nm
.Cm history
.Op Fl il
.Oo Ar pool Oc Ns ...
.Xc
Displays the command history of the specified pool(s) or all pools if no pool is
specified.
.Bl -tag -width Ds
.It Fl i
Displays internally logged ZFS events in addition to user initiated events.
.It Fl l
Displays log records in long format, which in addition to standard format
includes, the user name, the hostname, and the zone in which the operation was
performed.
.El
.It Xo
.Nm
.Cm import
.Op Fl D
.Op Fl d Ar dir Ns | Ns device
.Xc
Lists pools available to import.
If the
.Fl d
option is not specified, this command searches for devices in
.Pa /dev .
The
.Fl d
option can be specified multiple times, and all directories are searched.
If the device appears to be part of an exported pool, this command displays a
summary of the pool with the name of the pool, a numeric identifier, as well as
the vdev layout and current health of the device for each device or file.
Destroyed pools, pools that were previously destroyed with the
.Nm zpool Cm destroy
command, are not listed unless the
.Fl D
option is specified.
.Pp
The numeric identifier is unique, and can be used instead of the pool name when
multiple exported pools of the same name are available.
.Bl -tag -width Ds
.It Fl c Ar cachefile
Reads configuration from the given
.Ar cachefile
that was created with the
.Sy cachefile
pool property.
This
.Ar cachefile
is used instead of searching for devices.
.It Fl d Ar dir Ns | Ns Ar device
Uses
.Ar device
or searches for devices or files in
.Ar dir .
The
.Fl d
option can be specified multiple times.
.It Fl D
Lists destroyed pools only.
.El
.It Xo
.Nm
.Cm import
.Fl a
.Op Fl DflmN
.Op Fl F Oo Fl n Oc Oo Fl T Oc Oo Fl X Oc
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Op Fl s
.Xc
Imports all pools found in the search directories.
Identical to the previous command, except that all pools with a sufficient
number of devices available are imported.
Destroyed pools, pools that were previously destroyed with the
.Nm zpool Cm destroy
command, will not be imported unless the
.Fl D
option is specified.
.Bl -tag -width Ds
.It Fl a
Searches for and imports all pools found.
.It Fl c Ar cachefile
Reads configuration from the given
.Ar cachefile
that was created with the
.Sy cachefile
pool property.
This
.Ar cachefile
is used instead of searching for devices.
.It Fl d Ar dir Ns | Ns Ar device
Uses
.Ar device
or searches for devices or files in
.Ar dir .
The
.Fl d
option can be specified multiple times.
This option is incompatible with the
.Fl c
option.
.It Fl D
Imports destroyed pools only.
The
.Fl f
option is also required.
.It Fl f
Forces import, even if the pool appears to be potentially active.
.It Fl F
Recovery mode for a non-importable pool.
Attempt to return the pool to an importable state by discarding the last few
transactions.
Not all damaged pools can be recovered by using this option.
If successful, the data from the discarded transactions is irretrievably lost.
This option is ignored if the pool is importable or already imported.
.It Fl l
Indicates that this command will request encryption keys for all encrypted
datasets it attempts to mount as it is bringing the pool online. Note that if
any datasets have a
.Sy keylocation
of
.Sy prompt
this command will block waiting for the keys to be entered. Without this flag
encrypted datasets will be left unavailable until the keys are loaded.
.It Fl m
Allows a pool to import when there is a missing log device.
Recent transactions can be lost because the log device will be discarded.
.It Fl n
Used with the
.Fl F
recovery option.
Determines whether a non-importable pool can be made importable again, but does
not actually perform the pool recovery.
For more details about pool recovery mode, see the
.Fl F
option, above.
.It Fl N
Import the pool without mounting any file systems.
.It Fl o Ar mntopts
Comma-separated list of mount options to use when mounting datasets within the
pool.
See
.Xr zfs 8
for a description of dataset properties and mount options.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property on the imported pool.
See the
.Sx Properties
section for more information on the available pool properties.
.It Fl R Ar root
Sets the
.Sy cachefile
property to
.Sy none
and the
.Sy altroot
property to
.Ar root .
.It Fl s
Scan using the default search path, the libblkid cache will not be
consulted. A custom search path may be specified by setting the
ZPOOL_IMPORT_PATH environment variable.
.It Fl X
Used with the
.Fl F
recovery option. Determines whether extreme
measures to find a valid txg should take place. This allows the pool to
be rolled back to a txg which is no longer guaranteed to be consistent.
Pools imported at an inconsistent txg may contain uncorrectable
checksum errors. For more details about pool recovery mode, see the
.Fl F
option, above. WARNING: This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.It Fl T
Specify the txg to use for rollback. Implies
.Fl FX .
For more details
about pool recovery mode, see the
.Fl X
option, above. WARNING: This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.El
.It Xo
.Nm
.Cm import
.Op Fl Dflm
.Op Fl F Oo Fl n Oc Oo Fl t Oc Oo Fl T Oc Oo Fl X Oc
.Op Fl c Ar cachefile Ns | Ns Fl d Ar dir Ns | Ns device
.Op Fl o Ar mntopts
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Op Fl s
.Ar pool Ns | Ns Ar id
.Op Ar newpool
.Xc
Imports a specific pool.
A pool can be identified by its name or the numeric identifier.
If
.Ar newpool
is specified, the pool is imported using the name
.Ar newpool .
Otherwise, it is imported with the same name as its exported name.
.Pp
If a device is removed from a system without running
.Nm zpool Cm export
first, the device appears as potentially active.
It cannot be determined if this was a failed export, or whether the device is
really in use from another host.
To import a pool in this state, the
.Fl f
option is required.
.Bl -tag -width Ds
.It Fl c Ar cachefile
Reads configuration from the given
.Ar cachefile
that was created with the
.Sy cachefile
pool property.
This
.Ar cachefile
is used instead of searching for devices.
.It Fl d Ar dir Ns | Ns Ar device
Uses
.Ar device
or searches for devices or files in
.Ar dir .
The
.Fl d
option can be specified multiple times.
This option is incompatible with the
.Fl c
option.
.It Fl D
Imports destroyed pool.
The
.Fl f
option is also required.
.It Fl f
Forces import, even if the pool appears to be potentially active.
.It Fl F
Recovery mode for a non-importable pool.
Attempt to return the pool to an importable state by discarding the last few
transactions.
Not all damaged pools can be recovered by using this option.
If successful, the data from the discarded transactions is irretrievably lost.
This option is ignored if the pool is importable or already imported.
.It Fl l
Indicates that this command will request encryption keys for all encrypted
datasets it attempts to mount as it is bringing the pool online. Note that if
any datasets have a
.Sy keylocation
of
.Sy prompt
this command will block waiting for the keys to be entered. Without this flag
encrypted datasets will be left unavailable until the keys are loaded.
.It Fl m
Allows a pool to import when there is a missing log device.
Recent transactions can be lost because the log device will be discarded.
.It Fl n
Used with the
.Fl F
recovery option.
Determines whether a non-importable pool can be made importable again, but does
not actually perform the pool recovery.
For more details about pool recovery mode, see the
.Fl F
option, above.
.It Fl o Ar mntopts
Comma-separated list of mount options to use when mounting datasets within the
pool.
See
.Xr zfs 8
for a description of dataset properties and mount options.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property on the imported pool.
See the
.Sx Properties
section for more information on the available pool properties.
.It Fl R Ar root
Sets the
.Sy cachefile
property to
.Sy none
and the
.Sy altroot
property to
.Ar root .
.It Fl s
Scan using the default search path, the libblkid cache will not be
consulted. A custom search path may be specified by setting the
ZPOOL_IMPORT_PATH environment variable.
.It Fl X
Used with the
.Fl F
recovery option. Determines whether extreme
measures to find a valid txg should take place. This allows the pool to
be rolled back to a txg which is no longer guaranteed to be consistent.
Pools imported at an inconsistent txg may contain uncorrectable
checksum errors. For more details about pool recovery mode, see the
.Fl F
option, above. WARNING: This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.It Fl T
Specify the txg to use for rollback. Implies
.Fl FX .
For more details
about pool recovery mode, see the
.Fl X
option, above. WARNING: This option can be extremely hazardous to the
health of your pool and should only be used as a last resort.
.It Fl t
Used with
.Sy newpool .
Specifies that
.Sy newpool
is temporary. Temporary pool names last until export. Ensures that
the original pool name will be used in all label updates and therefore
is retained upon export.
Will also set -o cachefile=none when not explicitly specified.
.El
.It Xo
.Nm
.Cm iostat
.Op Oo Oo Fl c Ar SCRIPT Oc Oo Fl lq Oc Oc Ns | Ns Fl rw
.Op Fl T Sy u Ns | Ns Sy d
.Op Fl ghHLpPvy
.Oo Oo Ar pool Ns ... Oc Ns | Ns Oo Ar pool vdev Ns ... Oc Ns | Ns Oo Ar vdev Ns ... Oc Oc
.Op Ar interval Op Ar count
.Xc
Displays I/O statistics for the given pools/vdevs. You can pass in a
list of pools, a pool and list of vdevs in that pool, or a list of any
vdevs from any pool. If no items are specified, statistics for every
pool in the system are shown.
When given an
.Ar interval ,
the statistics are printed every
.Ar interval
seconds until ^C is pressed. If count is specified, the command exits
after count reports are printed. The first report printed is always
the statistics since boot regardless of whether
.Ar interval
and
.Ar count
are passed. However, this behavior can be suppressed with the
.Fl y
flag. Also note that the units of
.Sy K ,
.Sy M ,
.Sy G ...
that are printed in the report are in base 1024. To get the raw
values, use the
.Fl p
flag.
.Bl -tag -width Ds
.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns ...
Run a script (or scripts) on each vdev and include the output as a new column
in the
.Nm zpool Cm iostat
output. Users can run any script found in their
.Pa ~/.zpool.d
directory or from the system
.Pa /etc/zfs/zpool.d
directory. Script names containing the slash (/) character are not allowed.
The default search path can be overridden by setting the
ZPOOL_SCRIPTS_PATH environment variable. A privileged user can run
.Fl c
if they have the ZPOOL_SCRIPTS_AS_ROOT
environment variable set. If a script requires the use of a privileged
command, like
.Xr smartctl 8 ,
then it's recommended you allow the user access to it in
.Pa /etc/sudoers
or add the user to the
.Pa /etc/sudoers.d/zfs
file.
.Pp
If
.Fl c
is passed without a script name, it prints a list of all scripts.
.Fl c
also sets verbose mode
.No \&( Ns Fl v Ns No \&).
.Pp
Script output should be in the form of "name=value". The column name is
set to "name" and the value is set to "value". Multiple lines can be
used to output multiple columns. The first line of output not in the
"name=value" format is displayed without a column title, and no more
output after that is displayed. This can be useful for printing error
messages. Blank or NULL values are printed as a '-' to make output
awk-able.
.Pp
The following environment variables are set before running each script:
.Bl -tag -width "VDEV_PATH"
.It Sy VDEV_PATH
Full path to the vdev
.El
.Bl -tag -width "VDEV_UPATH"
.It Sy VDEV_UPATH
Underlying path to the vdev (/dev/sd*). For use with device mapper,
multipath, or partitioned vdevs.
.El
.Bl -tag -width "VDEV_ENC_SYSFS_PATH"
.It Sy VDEV_ENC_SYSFS_PATH
The sysfs path to the enclosure for the vdev (if any).
.El
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Sy u
for a printed representation of the internal representation of time.
See
.Xr time 2 .
Specify
.Sy d
for standard date format.
See
.Xr date 1 .
.It Fl g
Display vdev GUIDs instead of the normal device names. These GUIDs
can be used in place of device names for the zpool
detach/offline/remove/replace commands.
.It Fl H
Scripted mode. Do not display headers, and separate fields by a
single tab instead of arbitrary space.
.It Fl L
Display real paths for vdevs resolving all symbolic links. This can
be used to look up the current block device name regardless of the
.Pa /dev/disk/
path used to open it.
.It Fl p
Display numbers in parsable (exact) values. Time values are in
nanoseconds.
.It Fl P
Display full paths for vdevs instead of only the last component of
the path. This can be used in conjunction with the
.Fl L
flag.
.It Fl r
Print request size histograms for the leaf ZIOs. This includes
histograms of individual ZIOs (
.Ar ind )
and aggregate ZIOs (
.Ar agg ).
These stats can be useful for seeing how well the ZFS IO aggregator is
working. Do not confuse these request size stats with the block layer
requests; it's possible ZIOs can be broken up before being sent to the
block device.
.It Fl v
Verbose statistics Reports usage statistics for individual vdevs within the
pool, in addition to the pool-wide statistics.
.It Fl y
.It Fl w
.It Fl l
Include average latency statistics:
.Pp
.Ar total_wait :
Average total IO time (queuing + disk IO time).
.Ar disk_wait :
Average disk IO time (time reading/writing the disk).
.Ar syncq_wait :
Average amount of time IO spent in synchronous priority queues. Does
not include disk time.
.Ar asyncq_wait :
Average amount of time IO spent in asynchronous priority queues.
Does not include disk time.
.Ar scrub :
Average queuing time in scrub queue. Does not include disk time.
.It Fl q
Include active queue statistics. Each priority queue has both
pending (
.Ar pend )
and active (
.Ar activ )
IOs. Pending IOs are waiting to
be issued to the disk, and active IOs have been issued to disk and are
waiting for completion. These stats are broken out by priority queue:
.Pp
.Ar syncq_read/write :
Current number of entries in synchronous priority
queues.
.Ar asyncq_read/write :
Current number of entries in asynchronous priority queues.
.Ar scrubq_read :
Current number of entries in scrub queue.
.Pp
All queue statistics are instantaneous measurements of the number of
entries in the queues. If you specify an interval, the measurements
will be sampled from the end of the interval.
.El
.It Xo
.Nm
.Cm labelclear
.Op Fl f
.Ar device
.Xc
Removes ZFS label information from the specified
.Ar device .
The
.Ar device
must not be part of an active pool configuration.
.Bl -tag -width Ds
.It Fl f
Treat exported or foreign devices as inactive.
.El
.It Xo
.Nm
.Cm list
.Op Fl HgLpPv
.Op Fl o Ar property Ns Oo , Ns Ar property Oc Ns ...
.Op Fl T Sy u Ns | Ns Sy d
.Oo Ar pool Oc Ns ...
.Op Ar interval Op Ar count
.Xc
Lists the given pools along with a health status and space usage.
If no
.Ar pool Ns s
are specified, all pools in the system are listed.
When given an
.Ar interval ,
the information is printed every
.Ar interval
seconds until ^C is pressed.
If
.Ar count
is specified, the command exits after
.Ar count
reports are printed.
.Bl -tag -width Ds
.It Fl g
Display vdev GUIDs instead of the normal device names. These GUIDs
can be used in place of device names for the zpool
detach/offline/remove/replace commands.
.It Fl H
Scripted mode.
Do not display headers, and separate fields by a single tab instead of arbitrary
space.
.It Fl o Ar property
Comma-separated list of properties to display.
See the
.Sx Properties
section for a list of valid properties.
The default list is
.Cm name , size , allocated , free , expandsize , fragmentation , capacity ,
.Cm dedupratio , health , altroot .
.It Fl L
Display real paths for vdevs resolving all symbolic links. This can
be used to look up the current block device name regardless of the
/dev/disk/ path used to open it.
.It Fl p
Display numbers in parsable
.Pq exact
values.
.It Fl P
Display full paths for vdevs instead of only the last component of
the path. This can be used in conjunction with the
.Fl L flag.
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Fl u
for a printed representation of the internal representation of time.
See
.Xr time 2 .
Specify
.Fl d
for standard date format.
See
.Xr date 1 .
.It Fl v
Verbose statistics.
Reports usage statistics for individual vdevs within the pool, in addition to
the pool-wise statistics.
.El
.It Xo
.Nm
.Cm offline
.Op Fl f
.Op Fl t
.Ar pool Ar device Ns ...
.Xc
Takes the specified physical device offline.
While the
.Ar device
is offline, no attempt is made to read or write to the device.
This command is not applicable to spares.
.Bl -tag -width Ds
.It Fl f
Force fault. Instead of offlining the disk, put it into a faulted
state. The fault will persist across imports unless the
.Fl t
flag was specified.
.It Fl t
Temporary.
Upon reboot, the specified physical device reverts to its previous state.
.El
.It Xo
.Nm
.Cm online
.Op Fl e
.Ar pool Ar device Ns ...
.Xc
Brings the specified physical device online.
This command is not applicable to spares.
.Bl -tag -width Ds
.It Fl e
Expand the device to use all available space.
If the device is part of a mirror or raidz then all devices must be expanded
before the new space will become available to the pool.
.El
.It Xo
.Nm
.Cm reguid
.Ar pool
.Xc
Generates a new unique identifier for the pool.
You must ensure that all devices in this pool are online and healthy before
performing this action.
.It Xo
.Nm
.Cm reopen
.Op Fl n
.Ar pool
.Xc
Reopen all the vdevs associated with the pool.
.Bl -tag -width Ds
.It Fl n
Do not restart an in-progress scrub operation. This is not recommended and can
result in partially resilvered devices unless a second scrub is performed.
.El
.It Xo
.Nm
.Cm remove
+.Op Fl np
.Ar pool Ar device Ns ...
.Xc
Removes the specified device from the pool.
-This command currently only supports removing hot spares, cache, and log
-devices.
-A mirrored log device can be removed by specifying the top-level mirror for the
-log.
-Non-log devices that are part of a mirrored configuration can be removed using
+This command currently only supports removing hot spares, cache, log
+devices and mirrored top-level vdevs (mirror of leaf devices); but not raidz.
+.sp
+Removing a top-level vdev reduces the total amount of space in the storage pool.
+The specified device will be evacuated by copying all allocated space from it to
+the other devices in the pool.
+In this case, the
+.Nm zpool Cm remove
+command initiates the removal and returns, while the evacuation continues in
+the background.
+The removal progress can be monitored with
+.Nm zpool Cm status.
+This feature must be enabled to be used, see
+.Xr zpool-features 5
+.Pp
+A mirrored top-level device (log or data) can be removed by specifying the top-level mirror for the
+same.
+Non-log devices or data devices that are part of a mirrored configuration can be removed using
the
.Nm zpool Cm detach
command.
-Non-redundant and raidz devices cannot be removed from a pool.
+.Bl -tag -width Ds
+.It Fl n
+Do not actually perform the removal ("no-op").
+Instead, print the estimated amount of memory that will be used by the
+mapping table after the removal completes.
+This is nonzero only for top-level vdevs.
+.El
+.Bl -tag -width Ds
+.It Fl p
+Used in conjunction with the
+.Fl n
+flag, displays numbers as parsable (exact) values.
+.El
+.It Xo
+.Nm
+.Cm remove
+.Fl s
+.Ar pool
+.Xc
+Stops and cancels an in-progress removal of a top-level vdev.
.It Xo
.Nm
.Cm replace
.Op Fl f
.Op Fl o Ar property Ns = Ns Ar value
.Ar pool Ar device Op Ar new_device
.Xc
Replaces
.Ar old_device
with
.Ar new_device .
This is equivalent to attaching
.Ar new_device ,
waiting for it to resilver, and then detaching
.Ar old_device .
.Pp
The size of
.Ar new_device
must be greater than or equal to the minimum size of all the devices in a mirror
or raidz configuration.
.Pp
.Ar new_device
is required if the pool is not redundant.
If
.Ar new_device
is not specified, it defaults to
.Ar old_device .
This form of replacement is useful after an existing disk has failed and has
been physically replaced.
In this case, the new disk may have the same
.Pa /dev
path as the old device, even though it is actually a different disk.
ZFS recognizes this.
.Bl -tag -width Ds
.It Fl f
Forces use of
.Ar new_device ,
even if its appears to be in use.
Not all devices can be overridden in this manner.
.It Fl o Ar property Ns = Ns Ar value
Sets the given pool properties. See the
.Sx Properties
section for a list of valid properties that can be set.
The only property supported at the moment is
.Sy ashift .
.El
.It Xo
.Nm
.Cm scrub
.Op Fl s | Fl p
.Ar pool Ns ...
.Xc
Begins a scrub or resumes a paused scrub.
The scrub examines all data in the specified pools to verify that it checksums
correctly.
For replicated
.Pq mirror or raidz
devices, ZFS automatically repairs any damage discovered during the scrub.
The
.Nm zpool Cm status
command reports the progress of the scrub and summarizes the results of the
scrub upon completion.
.Pp
Scrubbing and resilvering are very similar operations.
The difference is that resilvering only examines data that ZFS knows to be out
of date
.Po
for example, when attaching a new device to a mirror or replacing an existing
device
.Pc ,
whereas scrubbing examines all data to discover silent errors due to hardware
faults or disk failure.
.Pp
Because scrubbing and resilvering are I/O-intensive operations, ZFS only allows
one at a time.
If a scrub is paused, the
.Nm zpool Cm scrub
resumes it.
If a resilver is in progress, ZFS does not allow a scrub to be started until the
resilver completes.
.Bl -tag -width Ds
.It Fl s
Stop scrubbing.
.El
.Bl -tag -width Ds
.It Fl p
Pause scrubbing.
Scrub pause state and progress are periodically synced to disk.
If the system is restarted or pool is exported during a paused scrub,
even after import, scrub will remain paused until it is resumed.
Once resumed the scrub will pick up from the place where it was last
checkpointed to disk.
To resume a paused scrub issue
.Nm zpool Cm scrub
again.
.El
.It Xo
.Nm
.Cm set
.Ar property Ns = Ns Ar value
.Ar pool
.Xc
Sets the given property on the specified pool.
See the
.Sx Properties
section for more information on what properties can be set and acceptable
values.
.It Xo
.Nm
.Cm split
.Op Fl gLlnP
.Oo Fl o Ar property Ns = Ns Ar value Oc Ns ...
.Op Fl R Ar root
.Ar pool newpool
.Op Ar device ...
.Xc
Splits devices off
.Ar pool
creating
.Ar newpool .
All vdevs in
.Ar pool
must be mirrors and the pool must not be in the process of resilvering.
At the time of the split,
.Ar newpool
will be a replica of
.Ar pool .
By default, the
last device in each mirror is split from
.Ar pool
to create
.Ar newpool .
.Pp
The optional device specification causes the specified device(s) to be
included in the new
.Ar pool
and, should any devices remain unspecified,
the last device in each mirror is used as would be by default.
.Bl -tag -width Ds
.It Fl g
Display vdev GUIDs instead of the normal device names. These GUIDs
can be used in place of device names for the zpool
detach/offline/remove/replace commands.
.It Fl L
Display real paths for vdevs resolving all symbolic links. This can
be used to look up the current block device name regardless of the
.Pa /dev/disk/
path used to open it.
.It Fl l
Indicates that this command will request encryption keys for all encrypted
datasets it attempts to mount as it is bringing the new pool online. Note that
if any datasets have a
.Sy keylocation
of
.Sy prompt
this command will block waiting for the keys to be entered. Without this flag
encrypted datasets will be left unavailable until the keys are loaded.
.It Fl n
Do dry run, do not actually perform the split.
Print out the expected configuration of
.Ar newpool .
.It Fl P
Display full paths for vdevs instead of only the last component of
the path. This can be used in conjunction with the
.Fl L flag.
.It Fl o Ar property Ns = Ns Ar value
Sets the specified property for
.Ar newpool .
See the
.Sx Properties
section for more information on the available pool properties.
.It Fl R Ar root
Set
.Sy altroot
for
.Ar newpool
to
.Ar root
and automatically import it.
.El
.It Xo
.Nm
.Cm status
.Op Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns ...
.Op Fl gLPvxD
.Op Fl T Sy u Ns | Ns Sy d
.Oo Ar pool Oc Ns ...
.Op Ar interval Op Ar count
.Xc
Displays the detailed health status for the given pools.
If no
.Ar pool
is specified, then the status of each pool in the system is displayed.
For more information on pool and device health, see the
.Sx Device Failure and Recovery
section.
.Pp
If a scrub or resilver is in progress, this command reports the percentage done
and the estimated time to completion.
Both of these are only approximate, because the amount of data in the pool and
the other workloads on the system can change.
.Bl -tag -width Ds
.It Fl c Op Ar SCRIPT1 Ns Oo , Ns Ar SCRIPT2 Oc Ns ...
Run a script (or scripts) on each vdev and include the output as a new column
in the
.Nm zpool Cm status
output. See the
.Fl c
option of
.Nm zpool Cm iostat
for complete details.
.It Fl g
Display vdev GUIDs instead of the normal device names. These GUIDs
can be used in place of device names for the zpool
detach/offline/remove/replace commands.
.It Fl L
Display real paths for vdevs resolving all symbolic links. This can
be used to look up the current block device name regardless of the
.Pa /dev/disk/
path used to open it.
.It Fl P
Display full paths for vdevs instead of only the last component of
the path. This can be used in conjunction with the
.Fl L flag.
.It Fl D
Display a histogram of deduplication statistics, showing the allocated
.Pq physically present on disk
and referenced
.Pq logically referenced in the pool
block counts and sizes by reference count.
.It Fl T Sy u Ns | Ns Sy d
Display a time stamp.
Specify
.Fl u
for a printed representation of the internal representation of time.
See
.Xr time 2 .
Specify
.Fl d
for standard date format.
See
.Xr date 1 .
.It Fl v
Displays verbose data error information, printing out a complete list of all
data errors since the last complete pool scrub.
.It Fl x
Only display status for pools that are exhibiting errors or are otherwise
unavailable.
Warnings about pools not using the latest on-disk format will not be included.
.El
.It Xo
.Nm
.Cm sync
.Op Ar pool ...
.Xc
This command forces all in-core dirty data to be written to the primary
pool storage and not the ZIL. It will also update administrative
information including quota reporting. Without arguments,
.Sy zpool sync
will sync all pools on the system. Otherwise, it will sync only the
specified pool(s).
.It Xo
.Nm
.Cm upgrade
.Xc
Displays pools which do not have all supported features enabled and pools
formatted using a legacy ZFS version number.
These pools can continue to be used, but some features may not be available.
Use
.Nm zpool Cm upgrade Fl a
to enable all features on all pools.
.It Xo
.Nm
.Cm upgrade
.Fl v
.Xc
Displays legacy ZFS versions supported by the current software.
See
.Xr zpool-features 5
for a description of feature flags features supported by the current software.
.It Xo
.Nm
.Cm upgrade
.Op Fl V Ar version
.Fl a Ns | Ns Ar pool Ns ...
.Xc
Enables all supported features on the given pool.
Once this is done, the pool will no longer be accessible on systems that do not
support feature flags.
See
.Xr zfs-features 5
for details on compatibility with systems that support feature flags, but do not
support all features enabled on the pool.
.Bl -tag -width Ds
.It Fl a
Enables all supported features on all pools.
.It Fl V Ar version
Upgrade to the specified legacy version.
If the
.Fl V
flag is specified, no features will be enabled on the pool.
This option can only be used to increase the version number up to the last
supported legacy version number.
.El
.El
.Sh EXIT STATUS
The following exit values are returned:
.Bl -tag -width Ds
.It Sy 0
Successful completion.
.It Sy 1
An error occurred.
.It Sy 2
Invalid command line options were specified.
.El
.Sh EXAMPLES
.Bl -tag -width Ds
.It Sy Example 1 No Creating a RAID-Z Storage Pool
The following command creates a pool with a single raidz root vdev that
consists of six disks.
.Bd -literal
# zpool create tank raidz sda sdb sdc sdd sde sdf
.Ed
.It Sy Example 2 No Creating a Mirrored Storage Pool
The following command creates a pool with two mirrors, where each mirror
contains two disks.
.Bd -literal
# zpool create tank mirror sda sdb mirror sdc sdd
.Ed
.It Sy Example 3 No Creating a ZFS Storage Pool by Using Partitions
The following command creates an unmirrored pool using two disk partitions.
.Bd -literal
# zpool create tank sda1 sdb2
.Ed
.It Sy Example 4 No Creating a ZFS Storage Pool by Using Files
The following command creates an unmirrored pool using files.
While not recommended, a pool based on files can be useful for experimental
purposes.
.Bd -literal
# zpool create tank /path/to/file/a /path/to/file/b
.Ed
.It Sy Example 5 No Adding a Mirror to a ZFS Storage Pool
The following command adds two mirrored disks to the pool
.Em tank ,
assuming the pool is already made up of two-way mirrors.
The additional space is immediately available to any datasets within the pool.
.Bd -literal
# zpool add tank mirror sda sdb
.Ed
.It Sy Example 6 No Listing Available ZFS Storage Pools
The following command lists all available pools on the system.
In this case, the pool
.Em zion
is faulted due to a missing device.
The results from this command are similar to the following:
.Bd -literal
# zpool list
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
rpool 19.9G 8.43G 11.4G - 33% 42% 1.00x ONLINE -
tank 61.5G 20.0G 41.5G - 48% 32% 1.00x ONLINE -
zion - - - - - - - FAULTED -
.Ed
.It Sy Example 7 No Destroying a ZFS Storage Pool
The following command destroys the pool
.Em tank
and any datasets contained within.
.Bd -literal
# zpool destroy -f tank
.Ed
.It Sy Example 8 No Exporting a ZFS Storage Pool
The following command exports the devices in pool
.Em tank
so that they can be relocated or later imported.
.Bd -literal
# zpool export tank
.Ed
.It Sy Example 9 No Importing a ZFS Storage Pool
The following command displays available pools, and then imports the pool
.Em tank
for use on the system.
The results from this command are similar to the following:
.Bd -literal
# zpool import
pool: tank
id: 15451357997522795478
state: ONLINE
action: The pool can be imported using its name or numeric identifier.
config:
tank ONLINE
mirror ONLINE
sda ONLINE
sdb ONLINE
# zpool import tank
.Ed
.It Sy Example 10 No Upgrading All ZFS Storage Pools to the Current Version
The following command upgrades all ZFS Storage pools to the current version of
the software.
.Bd -literal
# zpool upgrade -a
This system is currently running ZFS version 2.
.Ed
.It Sy Example 11 No Managing Hot Spares
The following command creates a new pool with an available hot spare:
.Bd -literal
# zpool create tank mirror sda sdb spare sdc
.Ed
.Pp
If one of the disks were to fail, the pool would be reduced to the degraded
state.
The failed device can be replaced using the following command:
.Bd -literal
# zpool replace tank sda sdd
.Ed
.Pp
Once the data has been resilvered, the spare is automatically removed and is
made available for use should another device fail.
The hot spare can be permanently removed from the pool using the following
command:
.Bd -literal
# zpool remove tank sdc
.Ed
.It Sy Example 12 No Creating a ZFS Pool with Mirrored Separate Intent Logs
The following command creates a ZFS storage pool consisting of two, two-way
mirrors and mirrored log devices:
.Bd -literal
# zpool create pool mirror sda sdb mirror sdc sdd log mirror \\
sde sdf
.Ed
.It Sy Example 13 No Adding Cache Devices to a ZFS Pool
The following command adds two disks for use as cache devices to a ZFS storage
pool:
.Bd -literal
# zpool add pool cache sdc sdd
.Ed
.Pp
Once added, the cache devices gradually fill with content from main memory.
Depending on the size of your cache devices, it could take over an hour for
them to fill.
Capacity and reads can be monitored using the
.Cm iostat
option as follows:
.Bd -literal
# zpool iostat -v pool 5
.Ed
-.It Sy Example 14 No Removing a Mirrored Log Device
-The following command removes the mirrored log device
-.Sy mirror-2 .
+.It Sy Example 14 No Removing a Mirrored top-level (Log or Data) Device
+The following commands remove the mirrored log device
+.Sy mirror-2
+and mirrored top-level data device
+.Sy mirror-1 .
+.Pp
Given this configuration:
.Bd -literal
pool: tank
state: ONLINE
scrub: none requested
config:
NAME STATE READ WRITE CKSUM
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
sda ONLINE 0 0 0
sdb ONLINE 0 0 0
mirror-1 ONLINE 0 0 0
sdc ONLINE 0 0 0
sdd ONLINE 0 0 0
logs
mirror-2 ONLINE 0 0 0
sde ONLINE 0 0 0
sdf ONLINE 0 0 0
.Ed
.Pp
The command to remove the mirrored log
.Sy mirror-2
is:
.Bd -literal
# zpool remove tank mirror-2
.Ed
+.Pp
+The command to remove the mirrored data
+.Sy mirror-1
+is:
+.Bd -literal
+# zpool remove tank mirror-1
+.Ed
.It Sy Example 15 No Displaying expanded space on a device
The following command displays the detailed information for the pool
.Em data .
This pool is comprised of a single raidz vdev where one of its devices
increased its capacity by 10GB.
In this example, the pool will not be able to utilize this extra capacity until
all the devices under the raidz vdev have been expanded.
.Bd -literal
# zpool list -v data
NAME SIZE ALLOC FREE EXPANDSZ FRAG CAP DEDUP HEALTH ALTROOT
data 23.9G 14.6G 9.30G - 48% 61% 1.00x ONLINE -
raidz1 23.9G 14.6G 9.30G - 48%
sda - - - - -
sdb - - - 10G -
sdc - - - - -
.Ed
.It Sy Example 16 No Adding output columns
Additional columns can be added to the
.Nm zpool Cm status
and
.Nm zpool Cm iostat
output with
.Fl c
option.
.Bd -literal
# zpool status -c vendor,model,size
NAME STATE READ WRITE CKSUM vendor model size
tank ONLINE 0 0 0
mirror-0 ONLINE 0 0 0
U1 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U10 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U11 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U12 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U13 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
U14 ONLINE 0 0 0 SEAGATE ST8000NM0075 7.3T
# zpool iostat -vc slaves
capacity operations bandwidth
pool alloc free read write read write slaves
---------- ----- ----- ----- ----- ----- ----- ---------
tank 20.4G 7.23T 26 152 20.7M 21.6M
mirror 20.4G 7.23T 26 152 20.7M 21.6M
U1 - - 0 31 1.46K 20.6M sdb sdff
U10 - - 0 1 3.77K 13.3K sdas sdgw
U11 - - 0 1 288K 13.3K sdat sdgx
U12 - - 0 1 78.4K 13.3K sdau sdgy
U13 - - 0 1 128K 13.3K sdav sdgz
U14 - - 0 1 63.2K 13.3K sdfk sdg
.Ed
.El
.Sh ENVIRONMENT VARIABLES
.Bl -tag -width "ZFS_ABORT"
.It Ev ZFS_ABORT
Cause
.Nm zpool
to dump core on exit for the purposes of running
.Sy ::findleaks .
.El
.Bl -tag -width "ZPOOL_IMPORT_PATH"
.It Ev ZPOOL_IMPORT_PATH
The search path for devices or files to use with the pool. This is a colon-separated list of directories in which
.Nm zpool
looks for device nodes and files.
Similar to the
.Fl d
option in
.Nm zpool import .
.El
.Bl -tag -width "ZPOOL_VDEV_NAME_GUID"
.It Ev ZPOOL_VDEV_NAME_GUID
Cause
.Nm zpool subcommands to output vdev guids by default. This behavior
is identical to the
.Nm zpool status -g
command line option.
.El
.Bl -tag -width "ZPOOL_VDEV_NAME_FOLLOW_LINKS"
.It Ev ZPOOL_VDEV_NAME_FOLLOW_LINKS
Cause
.Nm zpool
subcommands to follow links for vdev names by default. This behavior is identical to the
.Nm zpool status -L
command line option.
.El
.Bl -tag -width "ZPOOL_VDEV_NAME_PATH"
.It Ev ZPOOL_VDEV_NAME_PATH
Cause
.Nm zpool
subcommands to output full vdev path names by default. This
behavior is identical to the
.Nm zpool status -p
command line option.
.El
.Bl -tag -width "ZFS_VDEV_DEVID_OPT_OUT"
.It Ev ZFS_VDEV_DEVID_OPT_OUT
Older ZFS on Linux implementations had issues when attempting to display pool
config VDEV names if a
.Sy devid
NVP value is present in the pool's config.
.Pp
For example, a pool that originated on illumos platform would have a devid
value in the config and
.Nm zpool status
would fail when listing the config.
This would also be true for future Linux based pools.
.Pp
A pool can be stripped of any
.Sy devid
values on import or prevented from adding
them on
.Nm zpool create
or
.Nm zpool add
by setting
.Sy ZFS_VDEV_DEVID_OPT_OUT .
.El
.Bl -tag -width "ZPOOL_SCRIPTS_AS_ROOT"
.It Ev ZPOOL_SCRIPTS_AS_ROOT
Allow a privileged user to run the
.Nm zpool status/iostat
with the
.Fl c
option. Normally, only unprivileged users are allowed to run
.Fl c .
.El
.Bl -tag -width "ZPOOL_SCRIPTS_PATH"
.It Ev ZPOOL_SCRIPTS_PATH
The search path for scripts when running
.Nm zpool status/iostat
with the
.Fl c
option. This is a colon-separated list of directories and overrides the default
.Pa ~/.zpool.d
and
.Pa /etc/zfs/zpool.d
search paths.
.El
.Bl -tag -width "ZPOOL_SCRIPTS_ENABLED"
.It Ev ZPOOL_SCRIPTS_ENABLED
Allow a user to run
.Nm zpool status/iostat
with the
.Fl c
option. If
.Sy ZPOOL_SCRIPTS_ENABLED
is not set, it is assumed that the user is allowed to run
.Nm zpool status/iostat -c .
.El
.Sh INTERFACE STABILITY
.Sy Evolving
.Sh SEE ALSO
.Xr zfs-events 5 ,
.Xr zfs-module-parameters 5 ,
.Xr zpool-features 5 ,
.Xr zed 8 ,
.Xr zfs 8
diff --git a/module/zcommon/zfeature_common.c b/module/zcommon/zfeature_common.c
index 36d0d9613fc7..6ab3abe11914 100644
--- a/module/zcommon/zfeature_common.c
+++ b/module/zcommon/zfeature_common.c
@@ -1,345 +1,361 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014, Nexenta Systems, Inc. All rights reserved.
*/
#ifdef _KERNEL
#include <sys/systm.h>
#else
#include <errno.h>
#include <string.h>
#endif
#include <sys/debug.h>
#include <sys/fs/zfs.h>
#include <sys/inttypes.h>
#include <sys/types.h>
#include "zfeature_common.h"
/*
* Set to disable all feature checks while opening pools, allowing pools with
* unsupported features to be opened. Set for testing only.
*/
boolean_t zfeature_checks_disable = B_FALSE;
zfeature_info_t spa_feature_table[SPA_FEATURES];
/*
* Valid characters for feature guids. This list is mainly for aesthetic
* purposes and could be expanded in the future. There are different allowed
* characters in the guids reverse dns portion (before the colon) and its
* short name (after the colon).
*/
static int
valid_char(char c, boolean_t after_colon)
{
return ((c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') ||
(after_colon && c == '_') ||
(!after_colon && (c == '.' || c == '-')));
}
/*
* Every feature guid must contain exactly one colon which separates a reverse
* dns organization name from the feature's "short" name (e.g.
* "com.company:feature_name").
*/
boolean_t
zfeature_is_valid_guid(const char *name)
{
int i;
boolean_t has_colon = B_FALSE;
i = 0;
while (name[i] != '\0') {
char c = name[i++];
if (c == ':') {
if (has_colon)
return (B_FALSE);
has_colon = B_TRUE;
continue;
}
if (!valid_char(c, has_colon))
return (B_FALSE);
}
return (has_colon);
}
boolean_t
zfeature_is_supported(const char *guid)
{
if (zfeature_checks_disable)
return (B_TRUE);
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *feature = &spa_feature_table[i];
if (strcmp(guid, feature->fi_guid) == 0)
return (B_TRUE);
}
return (B_FALSE);
}
int
zfeature_lookup_name(const char *name, spa_feature_t *res)
{
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t *feature = &spa_feature_table[i];
if (strcmp(name, feature->fi_uname) == 0) {
if (res != NULL)
*res = i;
return (0);
}
}
return (ENOENT);
}
boolean_t
zfeature_depends_on(spa_feature_t fid, spa_feature_t check)
{
zfeature_info_t *feature = &spa_feature_table[fid];
for (int i = 0; feature->fi_depends[i] != SPA_FEATURE_NONE; i++) {
if (feature->fi_depends[i] == check)
return (B_TRUE);
}
return (B_FALSE);
}
static boolean_t
deps_contains_feature(const spa_feature_t *deps, const spa_feature_t feature)
{
for (int i = 0; deps[i] != SPA_FEATURE_NONE; i++)
if (deps[i] == feature)
return (B_TRUE);
return (B_FALSE);
}
static void
zfeature_register(spa_feature_t fid, const char *guid, const char *name,
const char *desc, zfeature_flags_t flags, const spa_feature_t *deps)
{
zfeature_info_t *feature = &spa_feature_table[fid];
static spa_feature_t nodeps[] = { SPA_FEATURE_NONE };
ASSERT(name != NULL);
ASSERT(desc != NULL);
ASSERT((flags & ZFEATURE_FLAG_READONLY_COMPAT) == 0 ||
(flags & ZFEATURE_FLAG_MOS) == 0);
ASSERT3U(fid, <, SPA_FEATURES);
ASSERT(zfeature_is_valid_guid(guid));
if (deps == NULL)
deps = nodeps;
VERIFY(((flags & ZFEATURE_FLAG_PER_DATASET) == 0) ||
(deps_contains_feature(deps, SPA_FEATURE_EXTENSIBLE_DATASET)));
feature->fi_feature = fid;
feature->fi_guid = guid;
feature->fi_uname = name;
feature->fi_desc = desc;
feature->fi_flags = flags;
feature->fi_depends = deps;
}
void
zpool_feature_init(void)
{
zfeature_register(SPA_FEATURE_ASYNC_DESTROY,
"com.delphix:async_destroy", "async_destroy",
"Destroy filesystems asynchronously.",
ZFEATURE_FLAG_READONLY_COMPAT, NULL);
zfeature_register(SPA_FEATURE_EMPTY_BPOBJ,
"com.delphix:empty_bpobj", "empty_bpobj",
"Snapshots use less space.",
ZFEATURE_FLAG_READONLY_COMPAT, NULL);
zfeature_register(SPA_FEATURE_LZ4_COMPRESS,
"org.illumos:lz4_compress", "lz4_compress",
"LZ4 compression algorithm support.",
ZFEATURE_FLAG_ACTIVATE_ON_ENABLE, NULL);
zfeature_register(SPA_FEATURE_MULTI_VDEV_CRASH_DUMP,
"com.joyent:multi_vdev_crash_dump", "multi_vdev_crash_dump",
"Crash dumps to multiple vdev pools.",
0, NULL);
zfeature_register(SPA_FEATURE_SPACEMAP_HISTOGRAM,
"com.delphix:spacemap_histogram", "spacemap_histogram",
"Spacemaps maintain space histograms.",
ZFEATURE_FLAG_READONLY_COMPAT, NULL);
zfeature_register(SPA_FEATURE_ENABLED_TXG,
"com.delphix:enabled_txg", "enabled_txg",
"Record txg at which a feature is enabled",
ZFEATURE_FLAG_READONLY_COMPAT, NULL);
{
static const spa_feature_t hole_birth_deps[] = {
SPA_FEATURE_ENABLED_TXG,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_HOLE_BIRTH,
"com.delphix:hole_birth", "hole_birth",
"Retain hole birth txg for more precise zfs send",
ZFEATURE_FLAG_MOS | ZFEATURE_FLAG_ACTIVATE_ON_ENABLE,
hole_birth_deps);
}
zfeature_register(SPA_FEATURE_EXTENSIBLE_DATASET,
"com.delphix:extensible_dataset", "extensible_dataset",
"Enhanced dataset functionality, used by other features.",
0, NULL);
{
static const spa_feature_t bookmarks_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_BOOKMARKS,
"com.delphix:bookmarks", "bookmarks",
"\"zfs bookmark\" command",
ZFEATURE_FLAG_READONLY_COMPAT, bookmarks_deps);
}
{
static const spa_feature_t filesystem_limits_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_FS_SS_LIMIT,
"com.joyent:filesystem_limits", "filesystem_limits",
"Filesystem and snapshot limits.",
ZFEATURE_FLAG_READONLY_COMPAT, filesystem_limits_deps);
}
zfeature_register(SPA_FEATURE_EMBEDDED_DATA,
"com.delphix:embedded_data", "embedded_data",
"Blocks which compress very well use even less space.",
ZFEATURE_FLAG_MOS | ZFEATURE_FLAG_ACTIVATE_ON_ENABLE,
NULL);
{
static const spa_feature_t large_blocks_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_LARGE_BLOCKS,
"org.open-zfs:large_blocks", "large_blocks",
"Support for blocks larger than 128KB.",
ZFEATURE_FLAG_PER_DATASET, large_blocks_deps);
}
{
static const spa_feature_t large_dnode_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_LARGE_DNODE,
"org.zfsonlinux:large_dnode", "large_dnode",
"Variable on-disk size of dnodes.",
ZFEATURE_FLAG_PER_DATASET, large_dnode_deps);
}
{
static const spa_feature_t sha512_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_SHA512,
"org.illumos:sha512", "sha512",
"SHA-512/256 hash algorithm.",
ZFEATURE_FLAG_PER_DATASET, sha512_deps);
}
{
static const spa_feature_t skein_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_SKEIN,
"org.illumos:skein", "skein",
"Skein hash algorithm.",
ZFEATURE_FLAG_PER_DATASET, skein_deps);
}
{
static const spa_feature_t edonr_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_EDONR,
"org.illumos:edonr", "edonr",
"Edon-R hash algorithm.",
ZFEATURE_FLAG_PER_DATASET, edonr_deps);
}
+ zfeature_register(SPA_FEATURE_DEVICE_REMOVAL,
+ "com.delphix:device_removal", "device_removal",
+ "Top-level vdevs can be removed, reducing logical pool size.",
+ ZFEATURE_FLAG_MOS, NULL);
+ {
+ static const spa_feature_t obsolete_counts_deps[] = {
+ SPA_FEATURE_EXTENSIBLE_DATASET,
+ SPA_FEATURE_DEVICE_REMOVAL,
+ SPA_FEATURE_NONE
+ };
+ zfeature_register(SPA_FEATURE_OBSOLETE_COUNTS,
+ "com.delphix:obsolete_counts", "obsolete_counts",
+ "Reduce memory used by removed devices when their blocks are "
+ "freed or remapped.",
+ ZFEATURE_FLAG_READONLY_COMPAT, obsolete_counts_deps);
+ }
{
static const spa_feature_t userobj_accounting_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_USEROBJ_ACCOUNTING,
"org.zfsonlinux:userobj_accounting", "userobj_accounting",
"User/Group object accounting.",
ZFEATURE_FLAG_READONLY_COMPAT | ZFEATURE_FLAG_PER_DATASET,
userobj_accounting_deps);
}
{
static const spa_feature_t encryption_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_ENCRYPTION,
"com.datto:encryption", "encryption",
"Support for dataset level encryption",
ZFEATURE_FLAG_PER_DATASET, encryption_deps);
}
{
static const spa_feature_t project_quota_deps[] = {
SPA_FEATURE_EXTENSIBLE_DATASET,
SPA_FEATURE_NONE
};
zfeature_register(SPA_FEATURE_PROJECT_QUOTA,
"org.zfsonlinux:project_quota", "project_quota",
"space/object accounting based on project ID.",
ZFEATURE_FLAG_READONLY_COMPAT | ZFEATURE_FLAG_PER_DATASET,
project_quota_deps);
}
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(zfeature_lookup_name);
EXPORT_SYMBOL(zfeature_is_supported);
EXPORT_SYMBOL(zfeature_is_valid_guid);
EXPORT_SYMBOL(zfeature_depends_on);
EXPORT_SYMBOL(zpool_feature_init);
EXPORT_SYMBOL(spa_feature_table);
#endif
diff --git a/module/zcommon/zfs_deleg.c b/module/zcommon/zfs_deleg.c
index 3a51bc49aa9c..c6ba278cffd8 100644
--- a/module/zcommon/zfs_deleg.c
+++ b/module/zcommon/zfs_deleg.c
@@ -1,250 +1,251 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2010 Nexenta Systems, Inc. All rights reserved.
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
* Copyright 2016 Igor Kozhukhov <ikozhukhov@gmail.com>
*/
#include <sys/zfs_context.h>
#if defined(_KERNEL)
#include <sys/systm.h>
#include <sys/sunddi.h>
#include <sys/ctype.h>
#else
#include <stdio.h>
#include <unistd.h>
#include <strings.h>
#include <libnvpair.h>
#include <ctype.h>
#endif
#include <sys/dsl_deleg.h>
#include "zfs_prop.h"
#include "zfs_deleg.h"
#include "zfs_namecheck.h"
zfs_deleg_perm_tab_t zfs_deleg_perm_tab[] = {
{ZFS_DELEG_PERM_ALLOW},
{ZFS_DELEG_PERM_BOOKMARK},
{ZFS_DELEG_PERM_CLONE},
{ZFS_DELEG_PERM_CREATE},
{ZFS_DELEG_PERM_DESTROY},
{ZFS_DELEG_PERM_DIFF},
{ZFS_DELEG_PERM_MOUNT},
{ZFS_DELEG_PERM_PROMOTE},
{ZFS_DELEG_PERM_RECEIVE},
+ {ZFS_DELEG_PERM_REMAP},
{ZFS_DELEG_PERM_RENAME},
{ZFS_DELEG_PERM_ROLLBACK},
{ZFS_DELEG_PERM_SNAPSHOT},
{ZFS_DELEG_PERM_SHARE},
{ZFS_DELEG_PERM_SEND},
{ZFS_DELEG_PERM_USERPROP},
{ZFS_DELEG_PERM_USERQUOTA},
{ZFS_DELEG_PERM_GROUPQUOTA},
{ZFS_DELEG_PERM_USERUSED},
{ZFS_DELEG_PERM_GROUPUSED},
{ZFS_DELEG_PERM_USEROBJQUOTA},
{ZFS_DELEG_PERM_GROUPOBJQUOTA},
{ZFS_DELEG_PERM_USEROBJUSED},
{ZFS_DELEG_PERM_GROUPOBJUSED},
{ZFS_DELEG_PERM_HOLD},
{ZFS_DELEG_PERM_RELEASE},
{ZFS_DELEG_PERM_LOAD_KEY},
{ZFS_DELEG_PERM_CHANGE_KEY},
{ZFS_DELEG_PERM_PROJECTUSED},
{ZFS_DELEG_PERM_PROJECTQUOTA},
{ZFS_DELEG_PERM_PROJECTOBJUSED},
{ZFS_DELEG_PERM_PROJECTOBJQUOTA},
{NULL}
};
static int
zfs_valid_permission_name(const char *perm)
{
if (zfs_deleg_canonicalize_perm(perm))
return (0);
return (permset_namecheck(perm, NULL, NULL));
}
const char *
zfs_deleg_canonicalize_perm(const char *perm)
{
int i;
zfs_prop_t prop;
for (i = 0; zfs_deleg_perm_tab[i].z_perm != NULL; i++) {
if (strcmp(perm, zfs_deleg_perm_tab[i].z_perm) == 0)
return (perm);
}
prop = zfs_name_to_prop(perm);
if (prop != ZPROP_INVAL && zfs_prop_delegatable(prop))
return (zfs_prop_to_name(prop));
return (NULL);
}
static int
zfs_validate_who(char *who)
{
char *p;
if (who[2] != ZFS_DELEG_FIELD_SEP_CHR)
return (-1);
switch (who[0]) {
case ZFS_DELEG_USER:
case ZFS_DELEG_GROUP:
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_GROUP_SETS:
if (who[1] != ZFS_DELEG_LOCAL && who[1] != ZFS_DELEG_DESCENDENT)
return (-1);
for (p = &who[3]; *p; p++)
if (!isdigit(*p))
return (-1);
break;
case ZFS_DELEG_NAMED_SET:
case ZFS_DELEG_NAMED_SET_SETS:
if (who[1] != ZFS_DELEG_NA)
return (-1);
return (permset_namecheck(&who[3], NULL, NULL));
case ZFS_DELEG_CREATE:
case ZFS_DELEG_CREATE_SETS:
if (who[1] != ZFS_DELEG_NA)
return (-1);
if (who[3] != '\0')
return (-1);
break;
case ZFS_DELEG_EVERYONE:
case ZFS_DELEG_EVERYONE_SETS:
if (who[1] != ZFS_DELEG_LOCAL && who[1] != ZFS_DELEG_DESCENDENT)
return (-1);
if (who[3] != '\0')
return (-1);
break;
default:
return (-1);
}
return (0);
}
int
zfs_deleg_verify_nvlist(nvlist_t *nvp)
{
nvpair_t *who, *perm_name;
nvlist_t *perms;
int error;
if (nvp == NULL)
return (-1);
who = nvlist_next_nvpair(nvp, NULL);
if (who == NULL)
return (-1);
do {
if (zfs_validate_who(nvpair_name(who)))
return (-1);
error = nvlist_lookup_nvlist(nvp, nvpair_name(who), &perms);
if (error && error != ENOENT)
return (-1);
if (error == ENOENT)
continue;
perm_name = nvlist_next_nvpair(perms, NULL);
if (perm_name == NULL) {
return (-1);
}
do {
error = zfs_valid_permission_name(
nvpair_name(perm_name));
if (error)
return (-1);
} while ((perm_name = nvlist_next_nvpair(perms, perm_name))
!= NULL);
} while ((who = nvlist_next_nvpair(nvp, who)) != NULL);
return (0);
}
/*
* Construct the base attribute name. The base attribute names
* are the "key" to locate the jump objects which contain the actual
* permissions. The base attribute names are encoded based on
* type of entry and whether it is a local or descendent permission.
*
* Arguments:
* attr - attribute name return string, attribute is assumed to be
* ZFS_MAX_DELEG_NAME long.
* type - type of entry to construct
* inheritchr - inheritance type (local,descendent, or NA for create and
* permission set definitions
* data - is either a permission set name or a 64 bit uid/gid.
*/
void
zfs_deleg_whokey(char *attr, zfs_deleg_who_type_t type,
char inheritchr, void *data)
{
int len = ZFS_MAX_DELEG_NAME;
uint64_t *id = data;
switch (type) {
case ZFS_DELEG_USER:
case ZFS_DELEG_GROUP:
case ZFS_DELEG_USER_SETS:
case ZFS_DELEG_GROUP_SETS:
(void) snprintf(attr, len, "%c%c%c%lld", type, inheritchr,
ZFS_DELEG_FIELD_SEP_CHR, (longlong_t)*id);
break;
case ZFS_DELEG_NAMED_SET_SETS:
case ZFS_DELEG_NAMED_SET:
(void) snprintf(attr, len, "%c-%c%s", type,
ZFS_DELEG_FIELD_SEP_CHR, (char *)data);
break;
case ZFS_DELEG_CREATE:
case ZFS_DELEG_CREATE_SETS:
(void) snprintf(attr, len, "%c-%c", type,
ZFS_DELEG_FIELD_SEP_CHR);
break;
case ZFS_DELEG_EVERYONE:
case ZFS_DELEG_EVERYONE_SETS:
(void) snprintf(attr, len, "%c%c%c", type, inheritchr,
ZFS_DELEG_FIELD_SEP_CHR);
break;
default:
ASSERT(!"bad zfs_deleg_who_type_t");
}
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(zfs_deleg_verify_nvlist);
EXPORT_SYMBOL(zfs_deleg_whokey);
EXPORT_SYMBOL(zfs_deleg_canonicalize_perm);
#endif
diff --git a/module/zcommon/zfs_prop.c b/module/zcommon/zfs_prop.c
index 0d44fd139b7a..34b7228ec9be 100644
--- a/module/zcommon/zfs_prop.c
+++ b/module/zcommon/zfs_prop.c
@@ -1,895 +1,897 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright 2016, Joyent, Inc.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zio.h>
#include <sys/spa.h>
#include <sys/u8_textprep.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_znode.h>
#include <sys/dsl_crypt.h>
#include "zfs_prop.h"
#include "zfs_deleg.h"
#include "zfs_fletcher.h"
#if defined(_KERNEL)
#include <sys/systm.h>
#else
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#endif
static zprop_desc_t zfs_prop_table[ZFS_NUM_PROPS];
/* Note this is indexed by zfs_userquota_prop_t, keep the order the same */
const char *zfs_userquota_prop_prefixes[] = {
"userused@",
"userquota@",
"groupused@",
"groupquota@",
"userobjused@",
"userobjquota@",
"groupobjused@",
"groupobjquota@",
"projectused@",
"projectquota@",
"projectobjused@",
"projectobjquota@"
};
zprop_desc_t *
zfs_prop_get_table(void)
{
return (zfs_prop_table);
}
void
zfs_prop_init(void)
{
static zprop_index_t checksum_table[] = {
{ "on", ZIO_CHECKSUM_ON },
{ "off", ZIO_CHECKSUM_OFF },
{ "fletcher2", ZIO_CHECKSUM_FLETCHER_2 },
{ "fletcher4", ZIO_CHECKSUM_FLETCHER_4 },
{ "sha256", ZIO_CHECKSUM_SHA256 },
{ "noparity", ZIO_CHECKSUM_NOPARITY },
{ "sha512", ZIO_CHECKSUM_SHA512 },
{ "skein", ZIO_CHECKSUM_SKEIN },
{ "edonr", ZIO_CHECKSUM_EDONR },
{ NULL }
};
static zprop_index_t dedup_table[] = {
{ "on", ZIO_CHECKSUM_ON },
{ "off", ZIO_CHECKSUM_OFF },
{ "verify", ZIO_CHECKSUM_ON | ZIO_CHECKSUM_VERIFY },
{ "sha256", ZIO_CHECKSUM_SHA256 },
{ "sha256,verify",
ZIO_CHECKSUM_SHA256 | ZIO_CHECKSUM_VERIFY },
{ "sha512", ZIO_CHECKSUM_SHA512 },
{ "sha512,verify",
ZIO_CHECKSUM_SHA512 | ZIO_CHECKSUM_VERIFY },
{ "skein", ZIO_CHECKSUM_SKEIN },
{ "skein,verify",
ZIO_CHECKSUM_SKEIN | ZIO_CHECKSUM_VERIFY },
{ "edonr,verify",
ZIO_CHECKSUM_EDONR | ZIO_CHECKSUM_VERIFY },
{ NULL }
};
static zprop_index_t compress_table[] = {
{ "on", ZIO_COMPRESS_ON },
{ "off", ZIO_COMPRESS_OFF },
{ "lzjb", ZIO_COMPRESS_LZJB },
{ "gzip", ZIO_COMPRESS_GZIP_6 }, /* gzip default */
{ "gzip-1", ZIO_COMPRESS_GZIP_1 },
{ "gzip-2", ZIO_COMPRESS_GZIP_2 },
{ "gzip-3", ZIO_COMPRESS_GZIP_3 },
{ "gzip-4", ZIO_COMPRESS_GZIP_4 },
{ "gzip-5", ZIO_COMPRESS_GZIP_5 },
{ "gzip-6", ZIO_COMPRESS_GZIP_6 },
{ "gzip-7", ZIO_COMPRESS_GZIP_7 },
{ "gzip-8", ZIO_COMPRESS_GZIP_8 },
{ "gzip-9", ZIO_COMPRESS_GZIP_9 },
{ "zle", ZIO_COMPRESS_ZLE },
{ "lz4", ZIO_COMPRESS_LZ4 },
{ NULL }
};
static zprop_index_t crypto_table[] = {
{ "on", ZIO_CRYPT_ON },
{ "off", ZIO_CRYPT_OFF },
{ "aes-128-ccm", ZIO_CRYPT_AES_128_CCM },
{ "aes-192-ccm", ZIO_CRYPT_AES_192_CCM },
{ "aes-256-ccm", ZIO_CRYPT_AES_256_CCM },
{ "aes-128-gcm", ZIO_CRYPT_AES_128_GCM },
{ "aes-192-gcm", ZIO_CRYPT_AES_192_GCM },
{ "aes-256-gcm", ZIO_CRYPT_AES_256_GCM },
{ NULL }
};
static zprop_index_t keyformat_table[] = {
{ "none", ZFS_KEYFORMAT_NONE },
{ "raw", ZFS_KEYFORMAT_RAW },
{ "hex", ZFS_KEYFORMAT_HEX },
{ "passphrase", ZFS_KEYFORMAT_PASSPHRASE },
{ NULL }
};
static zprop_index_t snapdir_table[] = {
{ "hidden", ZFS_SNAPDIR_HIDDEN },
{ "visible", ZFS_SNAPDIR_VISIBLE },
{ NULL }
};
static zprop_index_t snapdev_table[] = {
{ "hidden", ZFS_SNAPDEV_HIDDEN },
{ "visible", ZFS_SNAPDEV_VISIBLE },
{ NULL }
};
static zprop_index_t acltype_table[] = {
{ "off", ZFS_ACLTYPE_OFF },
{ "disabled", ZFS_ACLTYPE_OFF },
{ "noacl", ZFS_ACLTYPE_OFF },
{ "posixacl", ZFS_ACLTYPE_POSIXACL },
{ NULL }
};
static zprop_index_t acl_inherit_table[] = {
{ "discard", ZFS_ACL_DISCARD },
{ "noallow", ZFS_ACL_NOALLOW },
{ "restricted", ZFS_ACL_RESTRICTED },
{ "passthrough", ZFS_ACL_PASSTHROUGH },
{ "secure", ZFS_ACL_RESTRICTED }, /* bkwrd compatibility */
{ "passthrough-x", ZFS_ACL_PASSTHROUGH_X },
{ NULL }
};
static zprop_index_t case_table[] = {
{ "sensitive", ZFS_CASE_SENSITIVE },
{ "insensitive", ZFS_CASE_INSENSITIVE },
{ "mixed", ZFS_CASE_MIXED },
{ NULL }
};
static zprop_index_t copies_table[] = {
{ "1", 1 },
{ "2", 2 },
{ "3", 3 },
{ NULL }
};
/*
* Use the unique flags we have to send to u8_strcmp() and/or
* u8_textprep() to represent the various normalization property
* values.
*/
static zprop_index_t normalize_table[] = {
{ "none", 0 },
{ "formD", U8_TEXTPREP_NFD },
{ "formKC", U8_TEXTPREP_NFKC },
{ "formC", U8_TEXTPREP_NFC },
{ "formKD", U8_TEXTPREP_NFKD },
{ NULL }
};
static zprop_index_t version_table[] = {
{ "1", 1 },
{ "2", 2 },
{ "3", 3 },
{ "4", 4 },
{ "5", 5 },
{ "current", ZPL_VERSION },
{ NULL }
};
static zprop_index_t boolean_table[] = {
{ "off", 0 },
{ "on", 1 },
{ NULL }
};
static zprop_index_t keystatus_table[] = {
{ "none", ZFS_KEYSTATUS_NONE},
{ "unavailable", ZFS_KEYSTATUS_UNAVAILABLE},
{ "available", ZFS_KEYSTATUS_AVAILABLE},
{ NULL }
};
static zprop_index_t logbias_table[] = {
{ "latency", ZFS_LOGBIAS_LATENCY },
{ "throughput", ZFS_LOGBIAS_THROUGHPUT },
{ NULL }
};
static zprop_index_t canmount_table[] = {
{ "off", ZFS_CANMOUNT_OFF },
{ "on", ZFS_CANMOUNT_ON },
{ "noauto", ZFS_CANMOUNT_NOAUTO },
{ NULL }
};
static zprop_index_t cache_table[] = {
{ "none", ZFS_CACHE_NONE },
{ "metadata", ZFS_CACHE_METADATA },
{ "all", ZFS_CACHE_ALL },
{ NULL }
};
static zprop_index_t sync_table[] = {
{ "standard", ZFS_SYNC_STANDARD },
{ "always", ZFS_SYNC_ALWAYS },
{ "disabled", ZFS_SYNC_DISABLED },
{ NULL }
};
static zprop_index_t xattr_table[] = {
{ "off", ZFS_XATTR_OFF },
{ "on", ZFS_XATTR_DIR },
{ "sa", ZFS_XATTR_SA },
{ "dir", ZFS_XATTR_DIR },
{ NULL }
};
static zprop_index_t dnsize_table[] = {
{ "legacy", ZFS_DNSIZE_LEGACY },
{ "auto", ZFS_DNSIZE_AUTO },
{ "1k", ZFS_DNSIZE_1K },
{ "2k", ZFS_DNSIZE_2K },
{ "4k", ZFS_DNSIZE_4K },
{ "8k", ZFS_DNSIZE_8K },
{ "16k", ZFS_DNSIZE_16K },
{ NULL }
};
static zprop_index_t redundant_metadata_table[] = {
{ "all", ZFS_REDUNDANT_METADATA_ALL },
{ "most", ZFS_REDUNDANT_METADATA_MOST },
{ NULL }
};
static zprop_index_t volmode_table[] = {
{ "default", ZFS_VOLMODE_DEFAULT },
{ "full", ZFS_VOLMODE_GEOM },
{ "geom", ZFS_VOLMODE_GEOM },
{ "dev", ZFS_VOLMODE_DEV },
{ "none", ZFS_VOLMODE_NONE },
{ NULL }
};
/* inherit index properties */
zprop_register_index(ZFS_PROP_REDUNDANT_METADATA, "redundant_metadata",
ZFS_REDUNDANT_METADATA_ALL,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"all | most", "REDUND_MD",
redundant_metadata_table);
zprop_register_index(ZFS_PROP_SYNC, "sync", ZFS_SYNC_STANDARD,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"standard | always | disabled", "SYNC",
sync_table);
zprop_register_index(ZFS_PROP_CHECKSUM, "checksum",
ZIO_CHECKSUM_DEFAULT, PROP_INHERIT, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_VOLUME,
"on | off | fletcher2 | fletcher4 | sha256 | sha512 | "
"skein | edonr", "CHECKSUM", checksum_table);
zprop_register_index(ZFS_PROP_DEDUP, "dedup", ZIO_CHECKSUM_OFF,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"on | off | verify | sha256[,verify], sha512[,verify], "
"skein[,verify], edonr,verify", "DEDUP", dedup_table);
zprop_register_index(ZFS_PROP_COMPRESSION, "compression",
ZIO_COMPRESS_DEFAULT, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"on | off | lzjb | gzip | gzip-[1-9] | zle | lz4", "COMPRESS",
compress_table);
zprop_register_index(ZFS_PROP_SNAPDIR, "snapdir", ZFS_SNAPDIR_HIDDEN,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"hidden | visible", "SNAPDIR", snapdir_table);
zprop_register_index(ZFS_PROP_SNAPDEV, "snapdev", ZFS_SNAPDEV_HIDDEN,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"hidden | visible", "SNAPDEV", snapdev_table);
zprop_register_index(ZFS_PROP_ACLTYPE, "acltype", ZFS_ACLTYPE_OFF,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"noacl | posixacl", "ACLTYPE", acltype_table);
zprop_register_index(ZFS_PROP_ACLINHERIT, "aclinherit",
ZFS_ACL_RESTRICTED, PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"discard | noallow | restricted | passthrough | passthrough-x",
"ACLINHERIT", acl_inherit_table);
zprop_register_index(ZFS_PROP_COPIES, "copies", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"1 | 2 | 3", "COPIES", copies_table);
zprop_register_index(ZFS_PROP_PRIMARYCACHE, "primarycache",
ZFS_CACHE_ALL, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME,
"all | none | metadata", "PRIMARYCACHE", cache_table);
zprop_register_index(ZFS_PROP_SECONDARYCACHE, "secondarycache",
ZFS_CACHE_ALL, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME,
"all | none | metadata", "SECONDARYCACHE", cache_table);
zprop_register_index(ZFS_PROP_LOGBIAS, "logbias", ZFS_LOGBIAS_LATENCY,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"latency | throughput", "LOGBIAS", logbias_table);
zprop_register_index(ZFS_PROP_XATTR, "xattr", ZFS_XATTR_DIR,
PROP_INHERIT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"on | off | dir | sa", "XATTR", xattr_table);
zprop_register_index(ZFS_PROP_DNODESIZE, "dnodesize",
ZFS_DNSIZE_LEGACY, PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"legacy | auto | 1k | 2k | 4k | 8k | 16k", "DNSIZE", dnsize_table);
zprop_register_index(ZFS_PROP_VOLMODE, "volmode",
ZFS_VOLMODE_DEFAULT, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"default | full | geom | dev | none", "VOLMODE", volmode_table);
/* inherit index (boolean) properties */
zprop_register_index(ZFS_PROP_ATIME, "atime", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "ATIME", boolean_table);
zprop_register_index(ZFS_PROP_RELATIME, "relatime", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "RELATIME", boolean_table);
zprop_register_index(ZFS_PROP_DEVICES, "devices", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "DEVICES",
boolean_table);
zprop_register_index(ZFS_PROP_EXEC, "exec", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "EXEC",
boolean_table);
zprop_register_index(ZFS_PROP_SETUID, "setuid", 1, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "SETUID",
boolean_table);
zprop_register_index(ZFS_PROP_READONLY, "readonly", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "on | off", "RDONLY",
boolean_table);
zprop_register_index(ZFS_PROP_ZONED, "zoned", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "ZONED", boolean_table);
zprop_register_index(ZFS_PROP_VSCAN, "vscan", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "VSCAN", boolean_table);
zprop_register_index(ZFS_PROP_NBMAND, "nbmand", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT, "on | off", "NBMAND",
boolean_table);
zprop_register_index(ZFS_PROP_OVERLAY, "overlay", 0, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "on | off", "OVERLAY", boolean_table);
/* default index properties */
zprop_register_index(ZFS_PROP_VERSION, "version", 0, PROP_DEFAULT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"1 | 2 | 3 | 4 | 5 | current", "VERSION", version_table);
zprop_register_index(ZFS_PROP_CANMOUNT, "canmount", ZFS_CANMOUNT_ON,
PROP_DEFAULT, ZFS_TYPE_FILESYSTEM, "on | off | noauto",
"CANMOUNT", canmount_table);
/* readonly index properties */
zprop_register_index(ZFS_PROP_MOUNTED, "mounted", 0, PROP_READONLY,
ZFS_TYPE_FILESYSTEM, "yes | no", "MOUNTED", boolean_table);
zprop_register_index(ZFS_PROP_DEFER_DESTROY, "defer_destroy", 0,
PROP_READONLY, ZFS_TYPE_SNAPSHOT, "yes | no", "DEFER_DESTROY",
boolean_table);
zprop_register_index(ZFS_PROP_KEYSTATUS, "keystatus",
ZFS_KEYSTATUS_NONE, PROP_READONLY, ZFS_TYPE_DATASET,
"none | unavailable | available",
"KEYSTATUS", keystatus_table);
/* set once index properties */
zprop_register_index(ZFS_PROP_NORMALIZE, "normalization", 0,
PROP_ONETIME, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"none | formC | formD | formKC | formKD", "NORMALIZATION",
normalize_table);
zprop_register_index(ZFS_PROP_CASE, "casesensitivity",
ZFS_CASE_SENSITIVE, PROP_ONETIME, ZFS_TYPE_FILESYSTEM |
ZFS_TYPE_SNAPSHOT,
"sensitive | insensitive | mixed", "CASE", case_table);
zprop_register_index(ZFS_PROP_KEYFORMAT, "keyformat",
ZFS_KEYFORMAT_NONE, PROP_ONETIME_DEFAULT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"none | raw | hex | passphrase", "KEYFORMAT", keyformat_table);
zprop_register_index(ZFS_PROP_ENCRYPTION, "encryption",
ZIO_CRYPT_DEFAULT, PROP_ONETIME, ZFS_TYPE_DATASET,
"on | off | aes-128-ccm | aes-192-ccm | aes-256-ccm | "
"aes-128-gcm | aes-192-gcm | aes-256-gcm", "ENCRYPTION",
crypto_table);
/* set once index (boolean) properties */
zprop_register_index(ZFS_PROP_UTF8ONLY, "utf8only", 0, PROP_ONETIME,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_SNAPSHOT,
"on | off", "UTF8ONLY", boolean_table);
/* string properties */
zprop_register_string(ZFS_PROP_ORIGIN, "origin", NULL, PROP_READONLY,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<snapshot>", "ORIGIN");
zprop_register_string(ZFS_PROP_CLONES, "clones", NULL, PROP_READONLY,
ZFS_TYPE_SNAPSHOT, "<dataset>[,...]", "CLONES");
zprop_register_string(ZFS_PROP_MOUNTPOINT, "mountpoint", "/",
PROP_INHERIT, ZFS_TYPE_FILESYSTEM, "<path> | legacy | none",
"MOUNTPOINT");
zprop_register_string(ZFS_PROP_SHARENFS, "sharenfs", "off",
PROP_INHERIT, ZFS_TYPE_FILESYSTEM, "on | off | share(1M) options",
"SHARENFS");
zprop_register_string(ZFS_PROP_TYPE, "type", NULL, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK,
"filesystem | volume | snapshot | bookmark", "TYPE");
zprop_register_string(ZFS_PROP_SHARESMB, "sharesmb", "off",
PROP_INHERIT, ZFS_TYPE_FILESYSTEM,
"on | off | sharemgr(1M) options", "SHARESMB");
zprop_register_string(ZFS_PROP_MLSLABEL, "mlslabel",
ZFS_MLSLABEL_DEFAULT, PROP_INHERIT, ZFS_TYPE_DATASET,
"<sensitivity label>", "MLSLABEL");
zprop_register_string(ZFS_PROP_SELINUX_CONTEXT, "context",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux context>",
"CONTEXT");
zprop_register_string(ZFS_PROP_SELINUX_FSCONTEXT, "fscontext",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux fscontext>",
"FSCONTEXT");
zprop_register_string(ZFS_PROP_SELINUX_DEFCONTEXT, "defcontext",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux defcontext>",
"DEFCONTEXT");
zprop_register_string(ZFS_PROP_SELINUX_ROOTCONTEXT, "rootcontext",
"none", PROP_DEFAULT, ZFS_TYPE_DATASET, "<selinux rootcontext>",
"ROOTCONTEXT");
zprop_register_string(ZFS_PROP_RECEIVE_RESUME_TOKEN,
"receive_resume_token",
NULL, PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<string token>", "RESUMETOK");
zprop_register_string(ZFS_PROP_ENCRYPTION_ROOT, "encryptionroot", NULL,
PROP_READONLY, ZFS_TYPE_DATASET, "<filesystem | volume>",
"ENCROOT");
zprop_register_string(ZFS_PROP_KEYLOCATION, "keylocation",
"none", PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"prompt | <file URI>", "KEYLOCATION");
/* readonly number properties */
zprop_register_number(ZFS_PROP_USED, "used", 0, PROP_READONLY,
ZFS_TYPE_DATASET, "<size>", "USED");
zprop_register_number(ZFS_PROP_AVAILABLE, "available", 0, PROP_READONLY,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>", "AVAIL");
zprop_register_number(ZFS_PROP_REFERENCED, "referenced", 0,
PROP_READONLY, ZFS_TYPE_DATASET, "<size>", "REFER");
zprop_register_number(ZFS_PROP_COMPRESSRATIO, "compressratio", 0,
PROP_READONLY, ZFS_TYPE_DATASET,
"<1.00x or higher if compressed>", "RATIO");
zprop_register_number(ZFS_PROP_REFRATIO, "refcompressratio", 0,
PROP_READONLY, ZFS_TYPE_DATASET,
"<1.00x or higher if compressed>", "REFRATIO");
zprop_register_number(ZFS_PROP_VOLBLOCKSIZE, "volblocksize",
ZVOL_DEFAULT_BLOCKSIZE, PROP_ONETIME,
ZFS_TYPE_VOLUME, "512 to 128k, power of 2", "VOLBLOCK");
zprop_register_number(ZFS_PROP_USEDSNAP, "usedbysnapshots", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"USEDSNAP");
zprop_register_number(ZFS_PROP_USEDDS, "usedbydataset", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"USEDDS");
zprop_register_number(ZFS_PROP_USEDCHILD, "usedbychildren", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"USEDCHILD");
zprop_register_number(ZFS_PROP_USEDREFRESERV, "usedbyrefreservation", 0,
PROP_READONLY,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>", "USEDREFRESERV");
zprop_register_number(ZFS_PROP_USERREFS, "userrefs", 0, PROP_READONLY,
ZFS_TYPE_SNAPSHOT, "<count>", "USERREFS");
zprop_register_number(ZFS_PROP_WRITTEN, "written", 0, PROP_READONLY,
ZFS_TYPE_DATASET, "<size>", "WRITTEN");
zprop_register_number(ZFS_PROP_LOGICALUSED, "logicalused", 0,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "<size>",
"LUSED");
zprop_register_number(ZFS_PROP_LOGICALREFERENCED, "logicalreferenced",
0, PROP_READONLY, ZFS_TYPE_DATASET, "<size>", "LREFER");
zprop_register_number(ZFS_PROP_FILESYSTEM_COUNT, "filesystem_count",
UINT64_MAX, PROP_READONLY, ZFS_TYPE_FILESYSTEM,
"<count>", "FSCOUNT");
zprop_register_number(ZFS_PROP_SNAPSHOT_COUNT, "snapshot_count",
UINT64_MAX, PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<count>", "SSCOUNT");
zprop_register_number(ZFS_PROP_GUID, "guid", 0, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "<uint64>", "GUID");
zprop_register_number(ZFS_PROP_CREATETXG, "createtxg", 0, PROP_READONLY,
ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "<uint64>", "CREATETXG");
+ zprop_register_hidden(ZFS_PROP_REMAPTXG, "remaptxg", PROP_TYPE_NUMBER,
+ PROP_READONLY, ZFS_TYPE_DATASET, "REMAPTXG");
zprop_register_number(ZFS_PROP_PBKDF2_ITERS, "pbkdf2iters",
0, PROP_ONETIME_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<iters>", "PBKDF2ITERS");
/* default number properties */
zprop_register_number(ZFS_PROP_QUOTA, "quota", 0, PROP_DEFAULT,
ZFS_TYPE_FILESYSTEM, "<size> | none", "QUOTA");
zprop_register_number(ZFS_PROP_RESERVATION, "reservation", 0,
PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<size> | none", "RESERV");
zprop_register_number(ZFS_PROP_VOLSIZE, "volsize", 0, PROP_DEFAULT,
ZFS_TYPE_SNAPSHOT | ZFS_TYPE_VOLUME, "<size>", "VOLSIZE");
zprop_register_number(ZFS_PROP_REFQUOTA, "refquota", 0, PROP_DEFAULT,
ZFS_TYPE_FILESYSTEM, "<size> | none", "REFQUOTA");
zprop_register_number(ZFS_PROP_REFRESERVATION, "refreservation", 0,
PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<size> | none", "REFRESERV");
zprop_register_number(ZFS_PROP_FILESYSTEM_LIMIT, "filesystem_limit",
UINT64_MAX, PROP_DEFAULT, ZFS_TYPE_FILESYSTEM,
"<count> | none", "FSLIMIT");
zprop_register_number(ZFS_PROP_SNAPSHOT_LIMIT, "snapshot_limit",
UINT64_MAX, PROP_DEFAULT, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME,
"<count> | none", "SSLIMIT");
/* inherit number properties */
zprop_register_number(ZFS_PROP_RECORDSIZE, "recordsize",
SPA_OLD_MAXBLOCKSIZE, PROP_INHERIT,
ZFS_TYPE_FILESYSTEM, "512 to 1M, power of 2", "RECSIZE");
/* hidden properties */
zprop_register_hidden(ZFS_PROP_NUMCLONES, "numclones", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_SNAPSHOT, "NUMCLONES");
zprop_register_hidden(ZFS_PROP_NAME, "name", PROP_TYPE_STRING,
PROP_READONLY, ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK, "NAME");
zprop_register_hidden(ZFS_PROP_ISCSIOPTIONS, "iscsioptions",
PROP_TYPE_STRING, PROP_INHERIT, ZFS_TYPE_VOLUME, "ISCSIOPTIONS");
zprop_register_hidden(ZFS_PROP_STMF_SHAREINFO, "stmf_sbd_lu",
PROP_TYPE_STRING, PROP_INHERIT, ZFS_TYPE_VOLUME,
"STMF_SBD_LU");
zprop_register_hidden(ZFS_PROP_USERACCOUNTING, "useraccounting",
PROP_TYPE_NUMBER, PROP_READONLY, ZFS_TYPE_DATASET,
"USERACCOUNTING");
zprop_register_hidden(ZFS_PROP_UNIQUE, "unique", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_DATASET, "UNIQUE");
zprop_register_hidden(ZFS_PROP_OBJSETID, "objsetid", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_DATASET, "OBJSETID");
zprop_register_hidden(ZFS_PROP_INCONSISTENT, "inconsistent",
PROP_TYPE_NUMBER, PROP_READONLY, ZFS_TYPE_DATASET, "INCONSISTENT");
zprop_register_hidden(ZFS_PROP_PREV_SNAP, "prevsnap", PROP_TYPE_STRING,
PROP_READONLY, ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "PREVSNAP");
zprop_register_hidden(ZFS_PROP_PBKDF2_SALT, "pbkdf2salt",
PROP_TYPE_NUMBER, PROP_ONETIME_DEFAULT,
ZFS_TYPE_FILESYSTEM | ZFS_TYPE_VOLUME, "PBKDF2SALT");
zprop_register_hidden(ZFS_PROP_KEY_GUID, "keyguid", PROP_TYPE_NUMBER,
PROP_READONLY, ZFS_TYPE_DATASET, "KEYGUID");
/*
* Property to be removed once libbe is integrated
*/
zprop_register_hidden(ZFS_PROP_PRIVATE, "priv_prop",
PROP_TYPE_NUMBER, PROP_READONLY, ZFS_TYPE_FILESYSTEM,
"PRIV_PROP");
/* oddball properties */
zprop_register_impl(ZFS_PROP_CREATION, "creation", PROP_TYPE_NUMBER, 0,
NULL, PROP_READONLY, ZFS_TYPE_DATASET | ZFS_TYPE_BOOKMARK,
"<date>", "CREATION", B_FALSE, B_TRUE, NULL);
}
boolean_t
zfs_prop_delegatable(zfs_prop_t prop)
{
zprop_desc_t *pd = &zfs_prop_table[prop];
/* The mlslabel property is never delegatable. */
if (prop == ZFS_PROP_MLSLABEL)
return (B_FALSE);
return (pd->pd_attr != PROP_READONLY);
}
/*
* Given a zfs dataset property name, returns the corresponding property ID.
*/
zfs_prop_t
zfs_name_to_prop(const char *propname)
{
return (zprop_name_to_prop(propname, ZFS_TYPE_DATASET));
}
/*
* For user property names, we allow all lowercase alphanumeric characters, plus
* a few useful punctuation characters.
*/
static int
valid_char(char c)
{
return ((c >= 'a' && c <= 'z') ||
(c >= '0' && c <= '9') ||
c == '-' || c == '_' || c == '.' || c == ':');
}
/*
* Returns true if this is a valid user-defined property (one with a ':').
*/
boolean_t
zfs_prop_user(const char *name)
{
int i;
char c;
boolean_t foundsep = B_FALSE;
for (i = 0; i < strlen(name); i++) {
c = name[i];
if (!valid_char(c))
return (B_FALSE);
if (c == ':')
foundsep = B_TRUE;
}
if (!foundsep)
return (B_FALSE);
return (B_TRUE);
}
/*
* Returns true if this is a valid userspace-type property (one with a '@').
* Note that after the @, any character is valid (eg, another @, for SID
* user@domain).
*/
boolean_t
zfs_prop_userquota(const char *name)
{
zfs_userquota_prop_t prop;
for (prop = 0; prop < ZFS_NUM_USERQUOTA_PROPS; prop++) {
if (strncmp(name, zfs_userquota_prop_prefixes[prop],
strlen(zfs_userquota_prop_prefixes[prop])) == 0) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* Returns true if this is a valid written@ property.
* Note that after the @, any character is valid (eg, another @, for
* written@pool/fs@origin).
*/
boolean_t
zfs_prop_written(const char *name)
{
static const char *prefix = "written@";
return (strncmp(name, prefix, strlen(prefix)) == 0);
}
/*
* Tables of index types, plus functions to convert between the user view
* (strings) and internal representation (uint64_t).
*/
int
zfs_prop_string_to_index(zfs_prop_t prop, const char *string, uint64_t *index)
{
return (zprop_string_to_index(prop, string, index, ZFS_TYPE_DATASET));
}
int
zfs_prop_index_to_string(zfs_prop_t prop, uint64_t index, const char **string)
{
return (zprop_index_to_string(prop, index, string, ZFS_TYPE_DATASET));
}
uint64_t
zfs_prop_random_value(zfs_prop_t prop, uint64_t seed)
{
return (zprop_random_value(prop, seed, ZFS_TYPE_DATASET));
}
/*
* Returns TRUE if the property applies to any of the given dataset types.
*/
boolean_t
zfs_prop_valid_for_type(int prop, zfs_type_t types, boolean_t headcheck)
{
return (zprop_valid_for_type(prop, types, headcheck));
}
zprop_type_t
zfs_prop_get_type(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_proptype);
}
/*
* Returns TRUE if the property is readonly.
*/
boolean_t
zfs_prop_readonly(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_attr == PROP_READONLY ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME_DEFAULT);
}
/*
* Returns TRUE if the property is visible (not hidden).
*/
boolean_t
zfs_prop_visible(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_visible);
}
/*
* Returns TRUE if the property is only allowed to be set once.
*/
boolean_t
zfs_prop_setonce(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_attr == PROP_ONETIME ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME_DEFAULT);
}
const char *
zfs_prop_default_string(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_strdefault);
}
uint64_t
zfs_prop_default_numeric(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_numdefault);
}
/*
* Given a dataset property ID, returns the corresponding name.
* Assuming the zfs dataset property ID is valid.
*/
const char *
zfs_prop_to_name(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_name);
}
/*
* Returns TRUE if the property is inheritable.
*/
boolean_t
zfs_prop_inheritable(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_attr == PROP_INHERIT ||
zfs_prop_table[prop].pd_attr == PROP_ONETIME);
}
/*
* Returns TRUE if property is one of the encryption properties that requires
* a loaded encryption key to modify.
*/
boolean_t
zfs_prop_encryption_key_param(zfs_prop_t prop)
{
/*
* keylocation does not count as an encryption property. It can be
* changed at will without needing the master keys.
*/
return (prop == ZFS_PROP_PBKDF2_SALT || prop == ZFS_PROP_PBKDF2_ITERS ||
prop == ZFS_PROP_KEYFORMAT);
}
/*
* Helper function used by both kernelspace and userspace to check the
* keylocation property. If encrypted is set, the keylocation must be valid
* for an encrypted dataset.
*/
boolean_t
zfs_prop_valid_keylocation(const char *str, boolean_t encrypted)
{
if (strcmp("none", str) == 0)
return (!encrypted);
else if (strcmp("prompt", str) == 0)
return (B_TRUE);
else if (strlen(str) > 8 && strncmp("file:///", str, 8) == 0)
return (B_TRUE);
return (B_FALSE);
}
#ifndef _KERNEL
/*
* Returns a string describing the set of acceptable values for the given
* zfs property, or NULL if it cannot be set.
*/
const char *
zfs_prop_values(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_values);
}
/*
* Returns TRUE if this property is a string type. Note that index types
* (compression, checksum) are treated as strings in userland, even though they
* are stored numerically on disk.
*/
int
zfs_prop_is_string(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_proptype == PROP_TYPE_STRING ||
zfs_prop_table[prop].pd_proptype == PROP_TYPE_INDEX);
}
/*
* Returns the column header for the given property. Used only in
* 'zfs list -o', but centralized here with the other property information.
*/
const char *
zfs_prop_column_name(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_colname);
}
/*
* Returns whether the given property should be displayed right-justified for
* 'zfs list'.
*/
boolean_t
zfs_prop_align_right(zfs_prop_t prop)
{
return (zfs_prop_table[prop].pd_rightalign);
}
#endif
#if defined(_KERNEL) && defined(HAVE_SPL)
static int __init
zcommon_init(void)
{
fletcher_4_init();
return (0);
}
static void __exit
zcommon_fini(void)
{
fletcher_4_fini();
}
module_init(zcommon_init);
module_exit(zcommon_fini);
MODULE_DESCRIPTION("Generic ZFS support");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_LICENSE(ZFS_META_LICENSE);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
/* zfs dataset property functions */
EXPORT_SYMBOL(zfs_userquota_prop_prefixes);
EXPORT_SYMBOL(zfs_prop_init);
EXPORT_SYMBOL(zfs_prop_get_type);
EXPORT_SYMBOL(zfs_prop_get_table);
EXPORT_SYMBOL(zfs_prop_delegatable);
EXPORT_SYMBOL(zfs_prop_visible);
/* Dataset property functions shared between libzfs and kernel. */
EXPORT_SYMBOL(zfs_prop_default_string);
EXPORT_SYMBOL(zfs_prop_default_numeric);
EXPORT_SYMBOL(zfs_prop_readonly);
EXPORT_SYMBOL(zfs_prop_inheritable);
EXPORT_SYMBOL(zfs_prop_encryption_key_param);
EXPORT_SYMBOL(zfs_prop_valid_keylocation);
EXPORT_SYMBOL(zfs_prop_setonce);
EXPORT_SYMBOL(zfs_prop_to_name);
EXPORT_SYMBOL(zfs_name_to_prop);
EXPORT_SYMBOL(zfs_prop_user);
EXPORT_SYMBOL(zfs_prop_userquota);
EXPORT_SYMBOL(zfs_prop_index_to_string);
EXPORT_SYMBOL(zfs_prop_string_to_index);
EXPORT_SYMBOL(zfs_prop_valid_for_type);
EXPORT_SYMBOL(zfs_prop_written);
#endif
diff --git a/module/zfs/Makefile.in b/module/zfs/Makefile.in
index fe0d5b523ced..fefb2965456b 100644
--- a/module/zfs/Makefile.in
+++ b/module/zfs/Makefile.in
@@ -1,151 +1,155 @@
src = @abs_top_srcdir@/module/zfs
obj = @abs_builddir@
target_cpu = @target_cpu@
MODULE := zfs
obj-$(CONFIG_ZFS) := $(MODULE).o
ccflags-y := $(ZFS_MODULE_CFLAGS) $(ZFS_MODULE_CPPFLAGS)
# Suppress unused-value warnings in sparc64 architecture headers
ifeq ($(target_cpu),sparc64)
ccflags-y += -Wno-unused-value
endif
# Suppress unused but set variable warnings often due to ASSERTs
ccflags-y += $(NO_UNUSED_BUT_SET_VARIABLE)
$(MODULE)-objs += abd.o
$(MODULE)-objs += arc.o
$(MODULE)-objs += blkptr.o
$(MODULE)-objs += bplist.o
$(MODULE)-objs += bpobj.o
$(MODULE)-objs += dbuf.o
$(MODULE)-objs += dbuf_stats.o
$(MODULE)-objs += bptree.o
$(MODULE)-objs += bqueue.o
$(MODULE)-objs += ddt.o
$(MODULE)-objs += ddt_zap.o
$(MODULE)-objs += dmu.o
$(MODULE)-objs += dmu_diff.o
$(MODULE)-objs += dmu_object.o
$(MODULE)-objs += dmu_objset.o
$(MODULE)-objs += dmu_send.o
$(MODULE)-objs += dmu_traverse.o
$(MODULE)-objs += dmu_tx.o
$(MODULE)-objs += dmu_zfetch.o
$(MODULE)-objs += dnode.o
$(MODULE)-objs += dnode_sync.o
$(MODULE)-objs += dsl_dataset.o
$(MODULE)-objs += dsl_deadlist.o
$(MODULE)-objs += dsl_deleg.o
$(MODULE)-objs += dsl_bookmark.o
$(MODULE)-objs += dsl_dir.o
$(MODULE)-objs += dsl_crypt.o
$(MODULE)-objs += dsl_pool.o
$(MODULE)-objs += dsl_prop.o
$(MODULE)-objs += dsl_scan.o
$(MODULE)-objs += dsl_synctask.o
$(MODULE)-objs += edonr_zfs.o
$(MODULE)-objs += fm.o
$(MODULE)-objs += gzip.o
$(MODULE)-objs += hkdf.o
$(MODULE)-objs += lzjb.o
$(MODULE)-objs += lz4.o
$(MODULE)-objs += metaslab.o
$(MODULE)-objs += mmp.o
$(MODULE)-objs += multilist.o
$(MODULE)-objs += pathname.o
$(MODULE)-objs += policy.o
$(MODULE)-objs += range_tree.o
$(MODULE)-objs += refcount.o
$(MODULE)-objs += rrwlock.o
$(MODULE)-objs += sa.o
$(MODULE)-objs += sha256.o
$(MODULE)-objs += skein_zfs.o
$(MODULE)-objs += spa.o
$(MODULE)-objs += spa_boot.o
$(MODULE)-objs += spa_config.o
$(MODULE)-objs += spa_errlog.o
$(MODULE)-objs += spa_history.o
$(MODULE)-objs += spa_misc.o
$(MODULE)-objs += spa_stats.o
$(MODULE)-objs += space_map.o
$(MODULE)-objs += space_reftree.o
$(MODULE)-objs += txg.o
$(MODULE)-objs += trace.o
$(MODULE)-objs += uberblock.o
$(MODULE)-objs += unique.o
$(MODULE)-objs += vdev.o
$(MODULE)-objs += vdev_cache.o
$(MODULE)-objs += vdev_disk.o
$(MODULE)-objs += vdev_file.o
+$(MODULE)-objs += vdev_indirect.o
+$(MODULE)-objs += vdev_indirect_births.o
+$(MODULE)-objs += vdev_indirect_mapping.o
$(MODULE)-objs += vdev_label.o
$(MODULE)-objs += vdev_mirror.o
$(MODULE)-objs += vdev_missing.o
$(MODULE)-objs += vdev_queue.o
$(MODULE)-objs += vdev_raidz.o
$(MODULE)-objs += vdev_raidz_math.o
$(MODULE)-objs += vdev_raidz_math_scalar.o
+$(MODULE)-objs += vdev_removal.o
$(MODULE)-objs += vdev_root.o
$(MODULE)-objs += zap.o
$(MODULE)-objs += zap_leaf.o
$(MODULE)-objs += zap_micro.o
$(MODULE)-objs += zcp.o
$(MODULE)-objs += zcp_get.o
$(MODULE)-objs += zcp_global.o
$(MODULE)-objs += zcp_iter.o
$(MODULE)-objs += zcp_synctask.o
$(MODULE)-objs += zfeature.o
$(MODULE)-objs += zfs_acl.o
$(MODULE)-objs += zfs_byteswap.o
$(MODULE)-objs += zfs_ctldir.o
$(MODULE)-objs += zfs_debug.o
$(MODULE)-objs += zfs_dir.o
$(MODULE)-objs += zfs_fm.o
$(MODULE)-objs += zfs_fuid.o
$(MODULE)-objs += zfs_ioctl.o
$(MODULE)-objs += zfs_log.o
$(MODULE)-objs += zfs_onexit.o
$(MODULE)-objs += zfs_ratelimit.o
$(MODULE)-objs += zfs_replay.o
$(MODULE)-objs += zfs_rlock.o
$(MODULE)-objs += zfs_sa.o
$(MODULE)-objs += zfs_vfsops.o
$(MODULE)-objs += zfs_vnops.o
$(MODULE)-objs += zfs_znode.o
$(MODULE)-objs += zil.o
$(MODULE)-objs += zio.o
$(MODULE)-objs += zio_checksum.o
$(MODULE)-objs += zio_compress.o
$(MODULE)-objs += zio_crypt.o
$(MODULE)-objs += zio_inject.o
$(MODULE)-objs += zle.o
$(MODULE)-objs += zpl_ctldir.o
$(MODULE)-objs += zpl_export.o
$(MODULE)-objs += zpl_file.o
$(MODULE)-objs += zpl_inode.o
$(MODULE)-objs += zpl_super.o
$(MODULE)-objs += zpl_xattr.o
$(MODULE)-objs += zrlock.o
$(MODULE)-objs += zvol.o
$(MODULE)-objs += dsl_destroy.o
$(MODULE)-objs += dsl_userhold.o
$(MODULE)-objs += qat.o
$(MODULE)-objs += qat_compress.o
$(MODULE)-objs += qat_crypt.o
# Suppress incorrect warnings from versions of objtool which are not
# aware of x86 EVEX prefix instructions used for AVX512.
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512bw.o := y
OBJECT_FILES_NON_STANDARD_vdev_raidz_math_avx512f.o := y
$(MODULE)-$(CONFIG_X86) += vdev_raidz_math_sse2.o
$(MODULE)-$(CONFIG_X86) += vdev_raidz_math_ssse3.o
$(MODULE)-$(CONFIG_X86) += vdev_raidz_math_avx2.o
$(MODULE)-$(CONFIG_X86) += vdev_raidz_math_avx512f.o
$(MODULE)-$(CONFIG_X86) += vdev_raidz_math_avx512bw.o
$(MODULE)-$(CONFIG_ARM64) += vdev_raidz_math_aarch64_neon.o
$(MODULE)-$(CONFIG_ARM64) += vdev_raidz_math_aarch64_neonx2.o
diff --git a/module/zfs/arc.c b/module/zfs/arc.c
index d73d6ffcc1e6..040e943655f4 100644
--- a/module/zfs/arc.c
+++ b/module/zfs/arc.c
@@ -1,9183 +1,9183 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 by Saso Kiselkov. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
*/
/*
* DVA-based Adjustable Replacement Cache
*
* While much of the theory of operation used here is
* based on the self-tuning, low overhead replacement cache
* presented by Megiddo and Modha at FAST 2003, there are some
* significant differences:
*
* 1. The Megiddo and Modha model assumes any page is evictable.
* Pages in its cache cannot be "locked" into memory. This makes
* the eviction algorithm simple: evict the last page in the list.
* This also make the performance characteristics easy to reason
* about. Our cache is not so simple. At any given moment, some
* subset of the blocks in the cache are un-evictable because we
* have handed out a reference to them. Blocks are only evictable
* when there are no external references active. This makes
* eviction far more problematic: we choose to evict the evictable
* blocks that are the "lowest" in the list.
*
* There are times when it is not possible to evict the requested
* space. In these circumstances we are unable to adjust the cache
* size. To prevent the cache growing unbounded at these times we
* implement a "cache throttle" that slows the flow of new data
* into the cache until we can make space available.
*
* 2. The Megiddo and Modha model assumes a fixed cache size.
* Pages are evicted when the cache is full and there is a cache
* miss. Our model has a variable sized cache. It grows with
* high use, but also tries to react to memory pressure from the
* operating system: decreasing its size when system memory is
* tight.
*
* 3. The Megiddo and Modha model assumes a fixed page size. All
* elements of the cache are therefore exactly the same size. So
* when adjusting the cache size following a cache miss, its simply
* a matter of choosing a single page to evict. In our model, we
* have variable sized cache blocks (rangeing from 512 bytes to
* 128K bytes). We therefore choose a set of blocks to evict to make
* space for a cache miss that approximates as closely as possible
* the space used by the new block.
*
* See also: "ARC: A Self-Tuning, Low Overhead Replacement Cache"
* by N. Megiddo & D. Modha, FAST 2003
*/
/*
* The locking model:
*
* A new reference to a cache buffer can be obtained in two
* ways: 1) via a hash table lookup using the DVA as a key,
* or 2) via one of the ARC lists. The arc_read() interface
* uses method 1, while the internal ARC algorithms for
* adjusting the cache use method 2. We therefore provide two
* types of locks: 1) the hash table lock array, and 2) the
* ARC list locks.
*
* Buffers do not have their own mutexes, rather they rely on the
* hash table mutexes for the bulk of their protection (i.e. most
* fields in the arc_buf_hdr_t are protected by these mutexes).
*
* buf_hash_find() returns the appropriate mutex (held) when it
* locates the requested buffer in the hash table. It returns
* NULL for the mutex if the buffer was not in the table.
*
* buf_hash_remove() expects the appropriate hash mutex to be
* already held before it is invoked.
*
* Each ARC state also has a mutex which is used to protect the
* buffer list associated with the state. When attempting to
* obtain a hash table lock while holding an ARC list lock you
* must use: mutex_tryenter() to avoid deadlock. Also note that
* the active state mutex must be held before the ghost state mutex.
*
* It as also possible to register a callback which is run when the
* arc_meta_limit is reached and no buffers can be safely evicted. In
* this case the arc user should drop a reference on some arc buffers so
* they can be reclaimed and the arc_meta_limit honored. For example,
* when using the ZPL each dentry holds a references on a znode. These
* dentries must be pruned before the arc buffer holding the znode can
* be safely evicted.
*
* Note that the majority of the performance stats are manipulated
* with atomic operations.
*
* The L2ARC uses the l2ad_mtx on each vdev for the following:
*
* - L2ARC buflist creation
* - L2ARC buflist eviction
* - L2ARC write completion, which walks L2ARC buflists
* - ARC header destruction, as it removes from L2ARC buflists
* - ARC header release, as it removes from L2ARC buflists
*/
/*
* ARC operation:
*
* Every block that is in the ARC is tracked by an arc_buf_hdr_t structure.
* This structure can point either to a block that is still in the cache or to
* one that is only accessible in an L2 ARC device, or it can provide
* information about a block that was recently evicted. If a block is
* only accessible in the L2ARC, then the arc_buf_hdr_t only has enough
* information to retrieve it from the L2ARC device. This information is
* stored in the l2arc_buf_hdr_t sub-structure of the arc_buf_hdr_t. A block
* that is in this state cannot access the data directly.
*
* Blocks that are actively being referenced or have not been evicted
* are cached in the L1ARC. The L1ARC (l1arc_buf_hdr_t) is a structure within
* the arc_buf_hdr_t that will point to the data block in memory. A block can
* only be read by a consumer if it has an l1arc_buf_hdr_t. The L1ARC
* caches data in two ways -- in a list of ARC buffers (arc_buf_t) and
* also in the arc_buf_hdr_t's private physical data block pointer (b_pabd).
*
* The L1ARC's data pointer may or may not be uncompressed. The ARC has the
* ability to store the physical data (b_pabd) associated with the DVA of the
* arc_buf_hdr_t. Since the b_pabd is a copy of the on-disk physical block,
* it will match its on-disk compression characteristics. This behavior can be
* disabled by setting 'zfs_compressed_arc_enabled' to B_FALSE. When the
* compressed ARC functionality is disabled, the b_pabd will point to an
* uncompressed version of the on-disk data.
*
* Data in the L1ARC is not accessed by consumers of the ARC directly. Each
* arc_buf_hdr_t can have multiple ARC buffers (arc_buf_t) which reference it.
* Each ARC buffer (arc_buf_t) is being actively accessed by a specific ARC
* consumer. The ARC will provide references to this data and will keep it
* cached until it is no longer in use. The ARC caches only the L1ARC's physical
* data block and will evict any arc_buf_t that is no longer referenced. The
* amount of memory consumed by the arc_buf_ts' data buffers can be seen via the
* "overhead_size" kstat.
*
* Depending on the consumer, an arc_buf_t can be requested in uncompressed or
* compressed form. The typical case is that consumers will want uncompressed
* data, and when that happens a new data buffer is allocated where the data is
* decompressed for them to use. Currently the only consumer who wants
* compressed arc_buf_t's is "zfs send", when it streams data exactly as it
* exists on disk. When this happens, the arc_buf_t's data buffer is shared
* with the arc_buf_hdr_t.
*
* Here is a diagram showing an arc_buf_hdr_t referenced by two arc_buf_t's. The
* first one is owned by a compressed send consumer (and therefore references
* the same compressed data buffer as the arc_buf_hdr_t) and the second could be
* used by any other consumer (and has its own uncompressed copy of the data
* buffer).
*
* arc_buf_hdr_t
* +-----------+
* | fields |
* | common to |
* | L1- and |
* | L2ARC |
* +-----------+
* | l2arc_buf_hdr_t
* | |
* +-----------+
* | l1arc_buf_hdr_t
* | | arc_buf_t
* | b_buf +------------>+-----------+ arc_buf_t
* | b_pabd +-+ |b_next +---->+-----------+
* +-----------+ | |-----------| |b_next +-->NULL
* | |b_comp = T | +-----------+
* | |b_data +-+ |b_comp = F |
* | +-----------+ | |b_data +-+
* +->+------+ | +-----------+ |
* compressed | | | |
* data | |<--------------+ | uncompressed
* +------+ compressed, | data
* shared +-->+------+
* data | |
* | |
* +------+
*
* When a consumer reads a block, the ARC must first look to see if the
* arc_buf_hdr_t is cached. If the hdr is cached then the ARC allocates a new
* arc_buf_t and either copies uncompressed data into a new data buffer from an
* existing uncompressed arc_buf_t, decompresses the hdr's b_pabd buffer into a
* new data buffer, or shares the hdr's b_pabd buffer, depending on whether the
* hdr is compressed and the desired compression characteristics of the
* arc_buf_t consumer. If the arc_buf_t ends up sharing data with the
* arc_buf_hdr_t and both of them are uncompressed then the arc_buf_t must be
* the last buffer in the hdr's b_buf list, however a shared compressed buf can
* be anywhere in the hdr's list.
*
* The diagram below shows an example of an uncompressed ARC hdr that is
* sharing its data with an arc_buf_t (note that the shared uncompressed buf is
* the last element in the buf list):
*
* arc_buf_hdr_t
* +-----------+
* | |
* | |
* | |
* +-----------+
* l2arc_buf_hdr_t| |
* | |
* +-----------+
* l1arc_buf_hdr_t| |
* | | arc_buf_t (shared)
* | b_buf +------------>+---------+ arc_buf_t
* | | |b_next +---->+---------+
* | b_pabd +-+ |---------| |b_next +-->NULL
* +-----------+ | | | +---------+
* | |b_data +-+ | |
* | +---------+ | |b_data +-+
* +->+------+ | +---------+ |
* | | | |
* uncompressed | | | |
* data +------+ | |
* ^ +->+------+ |
* | uncompressed | | |
* | data | | |
* | +------+ |
* +---------------------------------+
*
* Writing to the ARC requires that the ARC first discard the hdr's b_pabd
* since the physical block is about to be rewritten. The new data contents
* will be contained in the arc_buf_t. As the I/O pipeline performs the write,
* it may compress the data before writing it to disk. The ARC will be called
* with the transformed data and will bcopy the transformed on-disk block into
* a newly allocated b_pabd. Writes are always done into buffers which have
* either been loaned (and hence are new and don't have other readers) or
* buffers which have been released (and hence have their own hdr, if there
* were originally other readers of the buf's original hdr). This ensures that
* the ARC only needs to update a single buf and its hdr after a write occurs.
*
* When the L2ARC is in use, it will also take advantage of the b_pabd. The
* L2ARC will always write the contents of b_pabd to the L2ARC. This means
* that when compressed ARC is enabled that the L2ARC blocks are identical
* to the on-disk block in the main data pool. This provides a significant
* advantage since the ARC can leverage the bp's checksum when reading from the
* L2ARC to determine if the contents are valid. However, if the compressed
* ARC is disabled, then the L2ARC's block must be transformed to look
* like the physical block in the main data pool before comparing the
* checksum and determining its validity.
*
* The L1ARC has a slightly different system for storing encrypted data.
* Raw (encrypted + possibly compressed) data has a few subtle differences from
* data that is just compressed. The biggest difference is that it is not
* possible to decrypt encrypted data (or visa versa) if the keys aren't loaded.
* The other difference is that encryption cannot be treated as a suggestion.
* If a caller would prefer compressed data, but they actually wind up with
* uncompressed data the worst thing that could happen is there might be a
* performance hit. If the caller requests encrypted data, however, we must be
* sure they actually get it or else secret information could be leaked. Raw
* data is stored in hdr->b_crypt_hdr.b_rabd. An encrypted header, therefore,
* may have both an encrypted version and a decrypted version of its data at
* once. When a caller needs a raw arc_buf_t, it is allocated and the data is
* copied out of this header. To avoid complications with b_pabd, raw buffers
* cannot be shared.
*/
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/refcount.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/dsl_pool.h>
#include <sys/zio_checksum.h>
#include <sys/multilist.h>
#include <sys/abd.h>
#include <sys/zil.h>
#include <sys/fm/fs/zfs.h>
#ifdef _KERNEL
#include <sys/vmsystm.h>
#include <vm/anon.h>
#include <sys/fs/swapnode.h>
#include <sys/zpl.h>
#include <linux/mm_compat.h>
#include <linux/page_compat.h>
#endif
#include <sys/callb.h>
#include <sys/kstat.h>
#include <sys/dmu_tx.h>
#include <zfs_fletcher.h>
#include <sys/arc_impl.h>
#include <sys/trace_arc.h>
#ifndef _KERNEL
/* set with ZFS_DEBUG=watch, to enable watchpoints on frozen buffers */
boolean_t arc_watch = B_FALSE;
#endif
static kmutex_t arc_reclaim_lock;
static kcondvar_t arc_reclaim_thread_cv;
static boolean_t arc_reclaim_thread_exit;
static kcondvar_t arc_reclaim_waiters_cv;
/*
* The number of headers to evict in arc_evict_state_impl() before
* dropping the sublist lock and evicting from another sublist. A lower
* value means we're more likely to evict the "correct" header (i.e. the
* oldest header in the arc state), but comes with higher overhead
* (i.e. more invocations of arc_evict_state_impl()).
*/
int zfs_arc_evict_batch_limit = 10;
/* number of seconds before growing cache again */
static int arc_grow_retry = 5;
/* shift of arc_c for calculating overflow limit in arc_get_data_impl */
int zfs_arc_overflow_shift = 8;
/* shift of arc_c for calculating both min and max arc_p */
static int arc_p_min_shift = 4;
/* log2(fraction of arc to reclaim) */
static int arc_shrink_shift = 7;
/* percent of pagecache to reclaim arc to */
#ifdef _KERNEL
static uint_t zfs_arc_pc_percent = 0;
#endif
/*
* log2(fraction of ARC which must be free to allow growing).
* I.e. If there is less than arc_c >> arc_no_grow_shift free memory,
* when reading a new block into the ARC, we will evict an equal-sized block
* from the ARC.
*
* This must be less than arc_shrink_shift, so that when we shrink the ARC,
* we will still not allow it to grow.
*/
int arc_no_grow_shift = 5;
/*
* minimum lifespan of a prefetch block in clock ticks
* (initialized in arc_init())
*/
static int arc_min_prefetch_ms;
static int arc_min_prescient_prefetch_ms;
/*
* If this percent of memory is free, don't throttle.
*/
int arc_lotsfree_percent = 10;
static int arc_dead;
/*
* The arc has filled available memory and has now warmed up.
*/
static boolean_t arc_warm;
/*
* log2 fraction of the zio arena to keep free.
*/
int arc_zio_arena_free_shift = 2;
/*
* These tunables are for performance analysis.
*/
unsigned long zfs_arc_max = 0;
unsigned long zfs_arc_min = 0;
unsigned long zfs_arc_meta_limit = 0;
unsigned long zfs_arc_meta_min = 0;
unsigned long zfs_arc_dnode_limit = 0;
unsigned long zfs_arc_dnode_reduce_percent = 10;
int zfs_arc_grow_retry = 0;
int zfs_arc_shrink_shift = 0;
int zfs_arc_p_min_shift = 0;
int zfs_arc_average_blocksize = 8 * 1024; /* 8KB */
int zfs_compressed_arc_enabled = B_TRUE;
/*
* ARC will evict meta buffers that exceed arc_meta_limit. This
* tunable make arc_meta_limit adjustable for different workloads.
*/
unsigned long zfs_arc_meta_limit_percent = 75;
/*
* Percentage that can be consumed by dnodes of ARC meta buffers.
*/
unsigned long zfs_arc_dnode_limit_percent = 10;
/*
* These tunables are Linux specific
*/
unsigned long zfs_arc_sys_free = 0;
int zfs_arc_min_prefetch_ms = 0;
int zfs_arc_min_prescient_prefetch_ms = 0;
int zfs_arc_p_dampener_disable = 1;
int zfs_arc_meta_prune = 10000;
int zfs_arc_meta_strategy = ARC_STRATEGY_META_BALANCED;
int zfs_arc_meta_adjust_restarts = 4096;
int zfs_arc_lotsfree_percent = 10;
/* The 6 states: */
static arc_state_t ARC_anon;
static arc_state_t ARC_mru;
static arc_state_t ARC_mru_ghost;
static arc_state_t ARC_mfu;
static arc_state_t ARC_mfu_ghost;
static arc_state_t ARC_l2c_only;
typedef struct arc_stats {
kstat_named_t arcstat_hits;
kstat_named_t arcstat_misses;
kstat_named_t arcstat_demand_data_hits;
kstat_named_t arcstat_demand_data_misses;
kstat_named_t arcstat_demand_metadata_hits;
kstat_named_t arcstat_demand_metadata_misses;
kstat_named_t arcstat_prefetch_data_hits;
kstat_named_t arcstat_prefetch_data_misses;
kstat_named_t arcstat_prefetch_metadata_hits;
kstat_named_t arcstat_prefetch_metadata_misses;
kstat_named_t arcstat_mru_hits;
kstat_named_t arcstat_mru_ghost_hits;
kstat_named_t arcstat_mfu_hits;
kstat_named_t arcstat_mfu_ghost_hits;
kstat_named_t arcstat_deleted;
/*
* Number of buffers that could not be evicted because the hash lock
* was held by another thread. The lock may not necessarily be held
* by something using the same buffer, since hash locks are shared
* by multiple buffers.
*/
kstat_named_t arcstat_mutex_miss;
/*
* Number of buffers skipped when updating the access state due to the
* header having already been released after acquiring the hash lock.
*/
kstat_named_t arcstat_access_skip;
/*
* Number of buffers skipped because they have I/O in progress, are
* indirect prefetch buffers that have not lived long enough, or are
* not from the spa we're trying to evict from.
*/
kstat_named_t arcstat_evict_skip;
/*
* Number of times arc_evict_state() was unable to evict enough
* buffers to reach its target amount.
*/
kstat_named_t arcstat_evict_not_enough;
kstat_named_t arcstat_evict_l2_cached;
kstat_named_t arcstat_evict_l2_eligible;
kstat_named_t arcstat_evict_l2_ineligible;
kstat_named_t arcstat_evict_l2_skip;
kstat_named_t arcstat_hash_elements;
kstat_named_t arcstat_hash_elements_max;
kstat_named_t arcstat_hash_collisions;
kstat_named_t arcstat_hash_chains;
kstat_named_t arcstat_hash_chain_max;
kstat_named_t arcstat_p;
kstat_named_t arcstat_c;
kstat_named_t arcstat_c_min;
kstat_named_t arcstat_c_max;
kstat_named_t arcstat_size;
/*
* Number of compressed bytes stored in the arc_buf_hdr_t's b_pabd.
* Note that the compressed bytes may match the uncompressed bytes
* if the block is either not compressed or compressed arc is disabled.
*/
kstat_named_t arcstat_compressed_size;
/*
* Uncompressed size of the data stored in b_pabd. If compressed
* arc is disabled then this value will be identical to the stat
* above.
*/
kstat_named_t arcstat_uncompressed_size;
/*
* Number of bytes stored in all the arc_buf_t's. This is classified
* as "overhead" since this data is typically short-lived and will
* be evicted from the arc when it becomes unreferenced unless the
* zfs_keep_uncompressed_metadata or zfs_keep_uncompressed_level
* values have been set (see comment in dbuf.c for more information).
*/
kstat_named_t arcstat_overhead_size;
/*
* Number of bytes consumed by internal ARC structures necessary
* for tracking purposes; these structures are not actually
* backed by ARC buffers. This includes arc_buf_hdr_t structures
* (allocated via arc_buf_hdr_t_full and arc_buf_hdr_t_l2only
* caches), and arc_buf_t structures (allocated via arc_buf_t
* cache).
*/
kstat_named_t arcstat_hdr_size;
/*
* Number of bytes consumed by ARC buffers of type equal to
* ARC_BUFC_DATA. This is generally consumed by buffers backing
* on disk user data (e.g. plain file contents).
*/
kstat_named_t arcstat_data_size;
/*
* Number of bytes consumed by ARC buffers of type equal to
* ARC_BUFC_METADATA. This is generally consumed by buffers
* backing on disk data that is used for internal ZFS
* structures (e.g. ZAP, dnode, indirect blocks, etc).
*/
kstat_named_t arcstat_metadata_size;
/*
* Number of bytes consumed by dmu_buf_impl_t objects.
*/
kstat_named_t arcstat_dbuf_size;
/*
* Number of bytes consumed by dnode_t objects.
*/
kstat_named_t arcstat_dnode_size;
/*
* Number of bytes consumed by bonus buffers.
*/
kstat_named_t arcstat_bonus_size;
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_anon state. This includes *all* buffers in the arc_anon
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
kstat_named_t arcstat_anon_size;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_anon_evictable_data;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_anon state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_anon_evictable_metadata;
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_mru state. This includes *all* buffers in the arc_mru
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
kstat_named_t arcstat_mru_size;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_DATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_mru_evictable_data;
/*
* Number of bytes consumed by ARC buffers that meet the
* following criteria: backing buffers of type ARC_BUFC_METADATA,
* residing in the arc_mru state, and are eligible for eviction
* (e.g. have no outstanding holds on the buffer).
*/
kstat_named_t arcstat_mru_evictable_metadata;
/*
* Total number of bytes that *would have been* consumed by ARC
* buffers in the arc_mru_ghost state. The key thing to note
* here, is the fact that this size doesn't actually indicate
* RAM consumption. The ghost lists only consist of headers and
* don't actually have ARC buffers linked off of these headers.
* Thus, *if* the headers had associated ARC buffers, these
* buffers *would have* consumed this number of bytes.
*/
kstat_named_t arcstat_mru_ghost_size;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mru_ghost state.
*/
kstat_named_t arcstat_mru_ghost_evictable_data;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
*/
kstat_named_t arcstat_mru_ghost_evictable_metadata;
/*
* Total number of bytes consumed by ARC buffers residing in the
* arc_mfu state. This includes *all* buffers in the arc_mfu
* state; e.g. data, metadata, evictable, and unevictable buffers
* are all included in this value.
*/
kstat_named_t arcstat_mfu_size;
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_DATA, and reside in the arc_mfu
* state.
*/
kstat_named_t arcstat_mfu_evictable_data;
/*
* Number of bytes consumed by ARC buffers that are eligible for
* eviction, of type ARC_BUFC_METADATA, and reside in the
* arc_mfu state.
*/
kstat_named_t arcstat_mfu_evictable_metadata;
/*
* Total number of bytes that *would have been* consumed by ARC
* buffers in the arc_mfu_ghost state. See the comment above
* arcstat_mru_ghost_size for more details.
*/
kstat_named_t arcstat_mfu_ghost_size;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_DATA, and linked off the arc_mfu_ghost state.
*/
kstat_named_t arcstat_mfu_ghost_evictable_data;
/*
* Number of bytes that *would have been* consumed by ARC
* buffers that are eligible for eviction, of type
* ARC_BUFC_METADATA, and linked off the arc_mru_ghost state.
*/
kstat_named_t arcstat_mfu_ghost_evictable_metadata;
kstat_named_t arcstat_l2_hits;
kstat_named_t arcstat_l2_misses;
kstat_named_t arcstat_l2_feeds;
kstat_named_t arcstat_l2_rw_clash;
kstat_named_t arcstat_l2_read_bytes;
kstat_named_t arcstat_l2_write_bytes;
kstat_named_t arcstat_l2_writes_sent;
kstat_named_t arcstat_l2_writes_done;
kstat_named_t arcstat_l2_writes_error;
kstat_named_t arcstat_l2_writes_lock_retry;
kstat_named_t arcstat_l2_evict_lock_retry;
kstat_named_t arcstat_l2_evict_reading;
kstat_named_t arcstat_l2_evict_l1cached;
kstat_named_t arcstat_l2_free_on_write;
kstat_named_t arcstat_l2_abort_lowmem;
kstat_named_t arcstat_l2_cksum_bad;
kstat_named_t arcstat_l2_io_error;
kstat_named_t arcstat_l2_lsize;
kstat_named_t arcstat_l2_psize;
kstat_named_t arcstat_l2_hdr_size;
kstat_named_t arcstat_memory_throttle_count;
kstat_named_t arcstat_memory_direct_count;
kstat_named_t arcstat_memory_indirect_count;
kstat_named_t arcstat_memory_all_bytes;
kstat_named_t arcstat_memory_free_bytes;
kstat_named_t arcstat_memory_available_bytes;
kstat_named_t arcstat_no_grow;
kstat_named_t arcstat_tempreserve;
kstat_named_t arcstat_loaned_bytes;
kstat_named_t arcstat_prune;
kstat_named_t arcstat_meta_used;
kstat_named_t arcstat_meta_limit;
kstat_named_t arcstat_dnode_limit;
kstat_named_t arcstat_meta_max;
kstat_named_t arcstat_meta_min;
kstat_named_t arcstat_async_upgrade_sync;
kstat_named_t arcstat_demand_hit_predictive_prefetch;
kstat_named_t arcstat_demand_hit_prescient_prefetch;
kstat_named_t arcstat_need_free;
kstat_named_t arcstat_sys_free;
kstat_named_t arcstat_raw_size;
} arc_stats_t;
static arc_stats_t arc_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "demand_data_hits", KSTAT_DATA_UINT64 },
{ "demand_data_misses", KSTAT_DATA_UINT64 },
{ "demand_metadata_hits", KSTAT_DATA_UINT64 },
{ "demand_metadata_misses", KSTAT_DATA_UINT64 },
{ "prefetch_data_hits", KSTAT_DATA_UINT64 },
{ "prefetch_data_misses", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_hits", KSTAT_DATA_UINT64 },
{ "prefetch_metadata_misses", KSTAT_DATA_UINT64 },
{ "mru_hits", KSTAT_DATA_UINT64 },
{ "mru_ghost_hits", KSTAT_DATA_UINT64 },
{ "mfu_hits", KSTAT_DATA_UINT64 },
{ "mfu_ghost_hits", KSTAT_DATA_UINT64 },
{ "deleted", KSTAT_DATA_UINT64 },
{ "mutex_miss", KSTAT_DATA_UINT64 },
{ "access_skip", KSTAT_DATA_UINT64 },
{ "evict_skip", KSTAT_DATA_UINT64 },
{ "evict_not_enough", KSTAT_DATA_UINT64 },
{ "evict_l2_cached", KSTAT_DATA_UINT64 },
{ "evict_l2_eligible", KSTAT_DATA_UINT64 },
{ "evict_l2_ineligible", KSTAT_DATA_UINT64 },
{ "evict_l2_skip", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "p", KSTAT_DATA_UINT64 },
{ "c", KSTAT_DATA_UINT64 },
{ "c_min", KSTAT_DATA_UINT64 },
{ "c_max", KSTAT_DATA_UINT64 },
{ "size", KSTAT_DATA_UINT64 },
{ "compressed_size", KSTAT_DATA_UINT64 },
{ "uncompressed_size", KSTAT_DATA_UINT64 },
{ "overhead_size", KSTAT_DATA_UINT64 },
{ "hdr_size", KSTAT_DATA_UINT64 },
{ "data_size", KSTAT_DATA_UINT64 },
{ "metadata_size", KSTAT_DATA_UINT64 },
{ "dbuf_size", KSTAT_DATA_UINT64 },
{ "dnode_size", KSTAT_DATA_UINT64 },
{ "bonus_size", KSTAT_DATA_UINT64 },
{ "anon_size", KSTAT_DATA_UINT64 },
{ "anon_evictable_data", KSTAT_DATA_UINT64 },
{ "anon_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_size", KSTAT_DATA_UINT64 },
{ "mru_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mru_ghost_size", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mru_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_size", KSTAT_DATA_UINT64 },
{ "mfu_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_evictable_metadata", KSTAT_DATA_UINT64 },
{ "mfu_ghost_size", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_data", KSTAT_DATA_UINT64 },
{ "mfu_ghost_evictable_metadata", KSTAT_DATA_UINT64 },
{ "l2_hits", KSTAT_DATA_UINT64 },
{ "l2_misses", KSTAT_DATA_UINT64 },
{ "l2_feeds", KSTAT_DATA_UINT64 },
{ "l2_rw_clash", KSTAT_DATA_UINT64 },
{ "l2_read_bytes", KSTAT_DATA_UINT64 },
{ "l2_write_bytes", KSTAT_DATA_UINT64 },
{ "l2_writes_sent", KSTAT_DATA_UINT64 },
{ "l2_writes_done", KSTAT_DATA_UINT64 },
{ "l2_writes_error", KSTAT_DATA_UINT64 },
{ "l2_writes_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_lock_retry", KSTAT_DATA_UINT64 },
{ "l2_evict_reading", KSTAT_DATA_UINT64 },
{ "l2_evict_l1cached", KSTAT_DATA_UINT64 },
{ "l2_free_on_write", KSTAT_DATA_UINT64 },
{ "l2_abort_lowmem", KSTAT_DATA_UINT64 },
{ "l2_cksum_bad", KSTAT_DATA_UINT64 },
{ "l2_io_error", KSTAT_DATA_UINT64 },
{ "l2_size", KSTAT_DATA_UINT64 },
{ "l2_asize", KSTAT_DATA_UINT64 },
{ "l2_hdr_size", KSTAT_DATA_UINT64 },
{ "memory_throttle_count", KSTAT_DATA_UINT64 },
{ "memory_direct_count", KSTAT_DATA_UINT64 },
{ "memory_indirect_count", KSTAT_DATA_UINT64 },
{ "memory_all_bytes", KSTAT_DATA_UINT64 },
{ "memory_free_bytes", KSTAT_DATA_UINT64 },
{ "memory_available_bytes", KSTAT_DATA_INT64 },
{ "arc_no_grow", KSTAT_DATA_UINT64 },
{ "arc_tempreserve", KSTAT_DATA_UINT64 },
{ "arc_loaned_bytes", KSTAT_DATA_UINT64 },
{ "arc_prune", KSTAT_DATA_UINT64 },
{ "arc_meta_used", KSTAT_DATA_UINT64 },
{ "arc_meta_limit", KSTAT_DATA_UINT64 },
{ "arc_dnode_limit", KSTAT_DATA_UINT64 },
{ "arc_meta_max", KSTAT_DATA_UINT64 },
{ "arc_meta_min", KSTAT_DATA_UINT64 },
{ "async_upgrade_sync", KSTAT_DATA_UINT64 },
{ "demand_hit_predictive_prefetch", KSTAT_DATA_UINT64 },
{ "demand_hit_prescient_prefetch", KSTAT_DATA_UINT64 },
{ "arc_need_free", KSTAT_DATA_UINT64 },
{ "arc_sys_free", KSTAT_DATA_UINT64 },
{ "arc_raw_size", KSTAT_DATA_UINT64 }
};
#define ARCSTAT(stat) (arc_stats.stat.value.ui64)
#define ARCSTAT_INCR(stat, val) \
atomic_add_64(&arc_stats.stat.value.ui64, (val))
#define ARCSTAT_BUMP(stat) ARCSTAT_INCR(stat, 1)
#define ARCSTAT_BUMPDOWN(stat) ARCSTAT_INCR(stat, -1)
#define ARCSTAT_MAX(stat, val) { \
uint64_t m; \
while ((val) > (m = arc_stats.stat.value.ui64) && \
(m != atomic_cas_64(&arc_stats.stat.value.ui64, m, (val)))) \
continue; \
}
#define ARCSTAT_MAXSTAT(stat) \
ARCSTAT_MAX(stat##_max, arc_stats.stat.value.ui64)
/*
* We define a macro to allow ARC hits/misses to be easily broken down by
* two separate conditions, giving a total of four different subtypes for
* each of hits and misses (so eight statistics total).
*/
#define ARCSTAT_CONDSTAT(cond1, stat1, notstat1, cond2, stat2, notstat2, stat) \
if (cond1) { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##stat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##stat1##_##notstat2##_##stat); \
} \
} else { \
if (cond2) { \
ARCSTAT_BUMP(arcstat_##notstat1##_##stat2##_##stat); \
} else { \
ARCSTAT_BUMP(arcstat_##notstat1##_##notstat2##_##stat);\
} \
}
kstat_t *arc_ksp;
static arc_state_t *arc_anon;
static arc_state_t *arc_mru;
static arc_state_t *arc_mru_ghost;
static arc_state_t *arc_mfu;
static arc_state_t *arc_mfu_ghost;
static arc_state_t *arc_l2c_only;
/*
* There are several ARC variables that are critical to export as kstats --
* but we don't want to have to grovel around in the kstat whenever we wish to
* manipulate them. For these variables, we therefore define them to be in
* terms of the statistic variable. This assures that we are not introducing
* the possibility of inconsistency by having shadow copies of the variables,
* while still allowing the code to be readable.
*/
#define arc_size ARCSTAT(arcstat_size) /* actual total arc size */
#define arc_p ARCSTAT(arcstat_p) /* target size of MRU */
#define arc_c ARCSTAT(arcstat_c) /* target size of cache */
#define arc_c_min ARCSTAT(arcstat_c_min) /* min target cache size */
#define arc_c_max ARCSTAT(arcstat_c_max) /* max target cache size */
#define arc_no_grow ARCSTAT(arcstat_no_grow) /* do not grow cache size */
#define arc_tempreserve ARCSTAT(arcstat_tempreserve)
#define arc_loaned_bytes ARCSTAT(arcstat_loaned_bytes)
#define arc_meta_limit ARCSTAT(arcstat_meta_limit) /* max size for metadata */
#define arc_dnode_limit ARCSTAT(arcstat_dnode_limit) /* max size for dnodes */
#define arc_meta_min ARCSTAT(arcstat_meta_min) /* min size for metadata */
#define arc_meta_used ARCSTAT(arcstat_meta_used) /* size of metadata */
#define arc_meta_max ARCSTAT(arcstat_meta_max) /* max size of metadata */
#define arc_dbuf_size ARCSTAT(arcstat_dbuf_size) /* dbuf metadata */
#define arc_dnode_size ARCSTAT(arcstat_dnode_size) /* dnode metadata */
#define arc_bonus_size ARCSTAT(arcstat_bonus_size) /* bonus buffer metadata */
#define arc_need_free ARCSTAT(arcstat_need_free) /* bytes to be freed */
#define arc_sys_free ARCSTAT(arcstat_sys_free) /* target system free bytes */
/* size of all b_rabd's in entire arc */
#define arc_raw_size ARCSTAT(arcstat_raw_size)
/* compressed size of entire arc */
#define arc_compressed_size ARCSTAT(arcstat_compressed_size)
/* uncompressed size of entire arc */
#define arc_uncompressed_size ARCSTAT(arcstat_uncompressed_size)
/* number of bytes in the arc from arc_buf_t's */
#define arc_overhead_size ARCSTAT(arcstat_overhead_size)
static list_t arc_prune_list;
static kmutex_t arc_prune_mtx;
static taskq_t *arc_prune_taskq;
#define GHOST_STATE(state) \
((state) == arc_mru_ghost || (state) == arc_mfu_ghost || \
(state) == arc_l2c_only)
#define HDR_IN_HASH_TABLE(hdr) ((hdr)->b_flags & ARC_FLAG_IN_HASH_TABLE)
#define HDR_IO_IN_PROGRESS(hdr) ((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS)
#define HDR_IO_ERROR(hdr) ((hdr)->b_flags & ARC_FLAG_IO_ERROR)
#define HDR_PREFETCH(hdr) ((hdr)->b_flags & ARC_FLAG_PREFETCH)
#define HDR_PRESCIENT_PREFETCH(hdr) \
((hdr)->b_flags & ARC_FLAG_PRESCIENT_PREFETCH)
#define HDR_COMPRESSION_ENABLED(hdr) \
((hdr)->b_flags & ARC_FLAG_COMPRESSED_ARC)
#define HDR_L2CACHE(hdr) ((hdr)->b_flags & ARC_FLAG_L2CACHE)
#define HDR_L2_READING(hdr) \
(((hdr)->b_flags & ARC_FLAG_IO_IN_PROGRESS) && \
((hdr)->b_flags & ARC_FLAG_HAS_L2HDR))
#define HDR_L2_WRITING(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITING)
#define HDR_L2_EVICTED(hdr) ((hdr)->b_flags & ARC_FLAG_L2_EVICTED)
#define HDR_L2_WRITE_HEAD(hdr) ((hdr)->b_flags & ARC_FLAG_L2_WRITE_HEAD)
#define HDR_PROTECTED(hdr) ((hdr)->b_flags & ARC_FLAG_PROTECTED)
#define HDR_NOAUTH(hdr) ((hdr)->b_flags & ARC_FLAG_NOAUTH)
#define HDR_SHARED_DATA(hdr) ((hdr)->b_flags & ARC_FLAG_SHARED_DATA)
#define HDR_ISTYPE_METADATA(hdr) \
((hdr)->b_flags & ARC_FLAG_BUFC_METADATA)
#define HDR_ISTYPE_DATA(hdr) (!HDR_ISTYPE_METADATA(hdr))
#define HDR_HAS_L1HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L1HDR)
#define HDR_HAS_L2HDR(hdr) ((hdr)->b_flags & ARC_FLAG_HAS_L2HDR)
#define HDR_HAS_RABD(hdr) \
(HDR_HAS_L1HDR(hdr) && HDR_PROTECTED(hdr) && \
(hdr)->b_crypt_hdr.b_rabd != NULL)
#define HDR_ENCRYPTED(hdr) \
(HDR_PROTECTED(hdr) && DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
#define HDR_AUTHENTICATED(hdr) \
(HDR_PROTECTED(hdr) && !DMU_OT_IS_ENCRYPTED((hdr)->b_crypt_hdr.b_ot))
/* For storing compression mode in b_flags */
#define HDR_COMPRESS_OFFSET (highbit64(ARC_FLAG_COMPRESS_0) - 1)
#define HDR_GET_COMPRESS(hdr) ((enum zio_compress)BF32_GET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS))
#define HDR_SET_COMPRESS(hdr, cmp) BF32_SET((hdr)->b_flags, \
HDR_COMPRESS_OFFSET, SPA_COMPRESSBITS, (cmp));
#define ARC_BUF_LAST(buf) ((buf)->b_next == NULL)
#define ARC_BUF_SHARED(buf) ((buf)->b_flags & ARC_BUF_FLAG_SHARED)
#define ARC_BUF_COMPRESSED(buf) ((buf)->b_flags & ARC_BUF_FLAG_COMPRESSED)
#define ARC_BUF_ENCRYPTED(buf) ((buf)->b_flags & ARC_BUF_FLAG_ENCRYPTED)
/*
* Other sizes
*/
#define HDR_FULL_CRYPT_SIZE ((int64_t)sizeof (arc_buf_hdr_t))
#define HDR_FULL_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_crypt_hdr))
#define HDR_L2ONLY_SIZE ((int64_t)offsetof(arc_buf_hdr_t, b_l1hdr))
/*
* Hash table routines
*/
#define HT_LOCK_ALIGN 64
#define HT_LOCK_PAD (P2NPHASE(sizeof (kmutex_t), (HT_LOCK_ALIGN)))
struct ht_lock {
kmutex_t ht_lock;
#ifdef _KERNEL
unsigned char pad[HT_LOCK_PAD];
#endif
};
#define BUF_LOCKS 8192
typedef struct buf_hash_table {
uint64_t ht_mask;
arc_buf_hdr_t **ht_table;
struct ht_lock ht_locks[BUF_LOCKS];
} buf_hash_table_t;
static buf_hash_table_t buf_hash_table;
#define BUF_HASH_INDEX(spa, dva, birth) \
(buf_hash(spa, dva, birth) & buf_hash_table.ht_mask)
#define BUF_HASH_LOCK_NTRY(idx) (buf_hash_table.ht_locks[idx & (BUF_LOCKS-1)])
#define BUF_HASH_LOCK(idx) (&(BUF_HASH_LOCK_NTRY(idx).ht_lock))
#define HDR_LOCK(hdr) \
(BUF_HASH_LOCK(BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth)))
uint64_t zfs_crc64_table[256];
/*
* Level 2 ARC
*/
#define L2ARC_WRITE_SIZE (8 * 1024 * 1024) /* initial write max */
#define L2ARC_HEADROOM 2 /* num of writes */
/*
* If we discover during ARC scan any buffers to be compressed, we boost
* our headroom for the next scanning cycle by this percentage multiple.
*/
#define L2ARC_HEADROOM_BOOST 200
#define L2ARC_FEED_SECS 1 /* caching interval secs */
#define L2ARC_FEED_MIN_MS 200 /* min caching interval ms */
/*
* We can feed L2ARC from two states of ARC buffers, mru and mfu,
* and each of the state has two types: data and metadata.
*/
#define L2ARC_FEED_TYPES 4
#define l2arc_writes_sent ARCSTAT(arcstat_l2_writes_sent)
#define l2arc_writes_done ARCSTAT(arcstat_l2_writes_done)
/* L2ARC Performance Tunables */
unsigned long l2arc_write_max = L2ARC_WRITE_SIZE; /* def max write size */
unsigned long l2arc_write_boost = L2ARC_WRITE_SIZE; /* extra warmup write */
unsigned long l2arc_headroom = L2ARC_HEADROOM; /* # of dev writes */
unsigned long l2arc_headroom_boost = L2ARC_HEADROOM_BOOST;
unsigned long l2arc_feed_secs = L2ARC_FEED_SECS; /* interval seconds */
unsigned long l2arc_feed_min_ms = L2ARC_FEED_MIN_MS; /* min interval msecs */
int l2arc_noprefetch = B_TRUE; /* don't cache prefetch bufs */
int l2arc_feed_again = B_TRUE; /* turbo warmup */
int l2arc_norw = B_FALSE; /* no reads during writes */
/*
* L2ARC Internals
*/
static list_t L2ARC_dev_list; /* device list */
static list_t *l2arc_dev_list; /* device list pointer */
static kmutex_t l2arc_dev_mtx; /* device list mutex */
static l2arc_dev_t *l2arc_dev_last; /* last device used */
static list_t L2ARC_free_on_write; /* free after write buf list */
static list_t *l2arc_free_on_write; /* free after write list ptr */
static kmutex_t l2arc_free_on_write_mtx; /* mutex for list */
static uint64_t l2arc_ndev; /* number of devices */
typedef struct l2arc_read_callback {
arc_buf_hdr_t *l2rcb_hdr; /* read header */
blkptr_t l2rcb_bp; /* original blkptr */
zbookmark_phys_t l2rcb_zb; /* original bookmark */
int l2rcb_flags; /* original flags */
abd_t *l2rcb_abd; /* temporary buffer */
} l2arc_read_callback_t;
typedef struct l2arc_data_free {
/* protected by l2arc_free_on_write_mtx */
abd_t *l2df_abd;
size_t l2df_size;
arc_buf_contents_t l2df_type;
list_node_t l2df_list_node;
} l2arc_data_free_t;
typedef enum arc_fill_flags {
ARC_FILL_LOCKED = 1 << 0, /* hdr lock is held */
ARC_FILL_COMPRESSED = 1 << 1, /* fill with compressed data */
ARC_FILL_ENCRYPTED = 1 << 2, /* fill with encrypted data */
ARC_FILL_NOAUTH = 1 << 3, /* don't attempt to authenticate */
ARC_FILL_IN_PLACE = 1 << 4 /* fill in place (special case) */
} arc_fill_flags_t;
static kmutex_t l2arc_feed_thr_lock;
static kcondvar_t l2arc_feed_thr_cv;
static uint8_t l2arc_thread_exit;
static abd_t *arc_get_data_abd(arc_buf_hdr_t *, uint64_t, void *);
static void *arc_get_data_buf(arc_buf_hdr_t *, uint64_t, void *);
static void arc_get_data_impl(arc_buf_hdr_t *, uint64_t, void *);
static void arc_free_data_abd(arc_buf_hdr_t *, abd_t *, uint64_t, void *);
static void arc_free_data_buf(arc_buf_hdr_t *, void *, uint64_t, void *);
static void arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag);
static void arc_hdr_free_abd(arc_buf_hdr_t *, boolean_t);
static void arc_hdr_alloc_abd(arc_buf_hdr_t *, boolean_t);
static void arc_access(arc_buf_hdr_t *, kmutex_t *);
static boolean_t arc_is_overflowing(void);
static void arc_buf_watch(arc_buf_t *);
static void arc_tuning_update(void);
static void arc_prune_async(int64_t);
static uint64_t arc_all_memory(void);
static arc_buf_contents_t arc_buf_type(arc_buf_hdr_t *);
static uint32_t arc_bufc_to_flags(arc_buf_contents_t);
static inline void arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static inline void arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags);
static boolean_t l2arc_write_eligible(uint64_t, arc_buf_hdr_t *);
static void l2arc_read_done(zio_t *);
static uint64_t
buf_hash(uint64_t spa, const dva_t *dva, uint64_t birth)
{
uint8_t *vdva = (uint8_t *)dva;
uint64_t crc = -1ULL;
int i;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
for (i = 0; i < sizeof (dva_t); i++)
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ vdva[i]) & 0xFF];
crc ^= (spa>>8) ^ birth;
return (crc);
}
#define HDR_EMPTY(hdr) \
((hdr)->b_dva.dva_word[0] == 0 && \
(hdr)->b_dva.dva_word[1] == 0)
#define HDR_EQUAL(spa, dva, birth, hdr) \
((hdr)->b_dva.dva_word[0] == (dva)->dva_word[0]) && \
((hdr)->b_dva.dva_word[1] == (dva)->dva_word[1]) && \
((hdr)->b_birth == birth) && ((hdr)->b_spa == spa)
static void
buf_discard_identity(arc_buf_hdr_t *hdr)
{
hdr->b_dva.dva_word[0] = 0;
hdr->b_dva.dva_word[1] = 0;
hdr->b_birth = 0;
}
static arc_buf_hdr_t *
buf_hash_find(uint64_t spa, const blkptr_t *bp, kmutex_t **lockp)
{
const dva_t *dva = BP_IDENTITY(bp);
uint64_t birth = BP_PHYSICAL_BIRTH(bp);
uint64_t idx = BUF_HASH_INDEX(spa, dva, birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *hdr;
mutex_enter(hash_lock);
for (hdr = buf_hash_table.ht_table[idx]; hdr != NULL;
hdr = hdr->b_hash_next) {
if (HDR_EQUAL(spa, dva, birth, hdr)) {
*lockp = hash_lock;
return (hdr);
}
}
mutex_exit(hash_lock);
*lockp = NULL;
return (NULL);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
* If lockp == NULL, the caller is assumed to already hold the hash lock.
*/
static arc_buf_hdr_t *
buf_hash_insert(arc_buf_hdr_t *hdr, kmutex_t **lockp)
{
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
kmutex_t *hash_lock = BUF_HASH_LOCK(idx);
arc_buf_hdr_t *fhdr;
uint32_t i;
ASSERT(!DVA_IS_EMPTY(&hdr->b_dva));
ASSERT(hdr->b_birth != 0);
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (lockp != NULL) {
*lockp = hash_lock;
mutex_enter(hash_lock);
} else {
ASSERT(MUTEX_HELD(hash_lock));
}
for (fhdr = buf_hash_table.ht_table[idx], i = 0; fhdr != NULL;
fhdr = fhdr->b_hash_next, i++) {
if (HDR_EQUAL(hdr->b_spa, &hdr->b_dva, hdr->b_birth, fhdr))
return (fhdr);
}
hdr->b_hash_next = buf_hash_table.ht_table[idx];
buf_hash_table.ht_table[idx] = hdr;
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
if (i > 0) {
ARCSTAT_BUMP(arcstat_hash_collisions);
if (i == 1)
ARCSTAT_BUMP(arcstat_hash_chains);
ARCSTAT_MAX(arcstat_hash_chain_max, i);
}
ARCSTAT_BUMP(arcstat_hash_elements);
ARCSTAT_MAXSTAT(arcstat_hash_elements);
return (NULL);
}
static void
buf_hash_remove(arc_buf_hdr_t *hdr)
{
arc_buf_hdr_t *fhdr, **hdrp;
uint64_t idx = BUF_HASH_INDEX(hdr->b_spa, &hdr->b_dva, hdr->b_birth);
ASSERT(MUTEX_HELD(BUF_HASH_LOCK(idx)));
ASSERT(HDR_IN_HASH_TABLE(hdr));
hdrp = &buf_hash_table.ht_table[idx];
while ((fhdr = *hdrp) != hdr) {
ASSERT3P(fhdr, !=, NULL);
hdrp = &fhdr->b_hash_next;
}
*hdrp = hdr->b_hash_next;
hdr->b_hash_next = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
/* collect some hash table performance data */
ARCSTAT_BUMPDOWN(arcstat_hash_elements);
if (buf_hash_table.ht_table[idx] &&
buf_hash_table.ht_table[idx]->b_hash_next == NULL)
ARCSTAT_BUMPDOWN(arcstat_hash_chains);
}
/*
* Global data structures and functions for the buf kmem cache.
*/
static kmem_cache_t *hdr_full_cache;
static kmem_cache_t *hdr_full_crypt_cache;
static kmem_cache_t *hdr_l2only_cache;
static kmem_cache_t *buf_cache;
static void
buf_fini(void)
{
int i;
#if defined(_KERNEL) && defined(HAVE_SPL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel\
*/
vmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#else
kmem_free(buf_hash_table.ht_table,
(buf_hash_table.ht_mask + 1) * sizeof (void *));
#endif
for (i = 0; i < BUF_LOCKS; i++)
mutex_destroy(&buf_hash_table.ht_locks[i].ht_lock);
kmem_cache_destroy(hdr_full_cache);
kmem_cache_destroy(hdr_full_crypt_cache);
kmem_cache_destroy(hdr_l2only_cache);
kmem_cache_destroy(buf_cache);
}
/*
* Constructor callback - called when the cache is empty
* and a new buf is requested.
*/
/* ARGSUSED */
static int
hdr_full_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_FULL_SIZE);
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
cv_init(&hdr->b_l1hdr.b_cv, NULL, CV_DEFAULT, NULL);
refcount_create(&hdr->b_l1hdr.b_refcnt);
mutex_init(&hdr->b_l1hdr.b_freeze_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&hdr->b_l1hdr.b_arc_node);
list_link_init(&hdr->b_l2hdr.b_l2node);
multilist_link_init(&hdr->b_l1hdr.b_arc_node);
arc_space_consume(HDR_FULL_SIZE, ARC_SPACE_HDRS);
return (0);
}
/* ARGSUSED */
static int
hdr_full_crypt_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
hdr_full_cons(vbuf, unused, kmflag);
bzero(&hdr->b_crypt_hdr, sizeof (hdr->b_crypt_hdr));
arc_space_consume(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
return (0);
}
/* ARGSUSED */
static int
hdr_l2only_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_hdr_t *hdr = vbuf;
bzero(hdr, HDR_L2ONLY_SIZE);
arc_space_consume(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
return (0);
}
/* ARGSUSED */
static int
buf_cons(void *vbuf, void *unused, int kmflag)
{
arc_buf_t *buf = vbuf;
bzero(buf, sizeof (arc_buf_t));
mutex_init(&buf->b_evict_lock, NULL, MUTEX_DEFAULT, NULL);
arc_space_consume(sizeof (arc_buf_t), ARC_SPACE_HDRS);
return (0);
}
/*
* Destructor callback - called when a cached buf is
* no longer required.
*/
/* ARGSUSED */
static void
hdr_full_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr = vbuf;
ASSERT(HDR_EMPTY(hdr));
cv_destroy(&hdr->b_l1hdr.b_cv);
refcount_destroy(&hdr->b_l1hdr.b_refcnt);
mutex_destroy(&hdr->b_l1hdr.b_freeze_lock);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
arc_space_return(HDR_FULL_SIZE, ARC_SPACE_HDRS);
}
/* ARGSUSED */
static void
hdr_full_crypt_dest(void *vbuf, void *unused)
{
arc_buf_hdr_t *hdr = vbuf;
hdr_full_dest(vbuf, unused);
arc_space_return(sizeof (hdr->b_crypt_hdr), ARC_SPACE_HDRS);
}
/* ARGSUSED */
static void
hdr_l2only_dest(void *vbuf, void *unused)
{
ASSERTV(arc_buf_hdr_t *hdr = vbuf);
ASSERT(HDR_EMPTY(hdr));
arc_space_return(HDR_L2ONLY_SIZE, ARC_SPACE_L2HDRS);
}
/* ARGSUSED */
static void
buf_dest(void *vbuf, void *unused)
{
arc_buf_t *buf = vbuf;
mutex_destroy(&buf->b_evict_lock);
arc_space_return(sizeof (arc_buf_t), ARC_SPACE_HDRS);
}
/*
* Reclaim callback -- invoked when memory is low.
*/
/* ARGSUSED */
static void
hdr_recl(void *unused)
{
dprintf("hdr_recl called\n");
/*
* umem calls the reclaim func when we destroy the buf cache,
* which is after we do arc_fini().
*/
if (!arc_dead)
cv_signal(&arc_reclaim_thread_cv);
}
static void
buf_init(void)
{
uint64_t *ct = NULL;
uint64_t hsize = 1ULL << 12;
int i, j;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < arc_all_memory())
hsize <<= 1;
retry:
buf_hash_table.ht_mask = hsize - 1;
#if defined(_KERNEL) && defined(HAVE_SPL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
buf_hash_table.ht_table =
vmem_zalloc(hsize * sizeof (void*), KM_SLEEP);
#else
buf_hash_table.ht_table =
kmem_zalloc(hsize * sizeof (void*), KM_NOSLEEP);
#endif
if (buf_hash_table.ht_table == NULL) {
ASSERT(hsize > (1ULL << 8));
hsize >>= 1;
goto retry;
}
hdr_full_cache = kmem_cache_create("arc_buf_hdr_t_full", HDR_FULL_SIZE,
0, hdr_full_cons, hdr_full_dest, hdr_recl, NULL, NULL, 0);
hdr_full_crypt_cache = kmem_cache_create("arc_buf_hdr_t_full_crypt",
HDR_FULL_CRYPT_SIZE, 0, hdr_full_crypt_cons, hdr_full_crypt_dest,
hdr_recl, NULL, NULL, 0);
hdr_l2only_cache = kmem_cache_create("arc_buf_hdr_t_l2only",
HDR_L2ONLY_SIZE, 0, hdr_l2only_cons, hdr_l2only_dest, hdr_recl,
NULL, NULL, 0);
buf_cache = kmem_cache_create("arc_buf_t", sizeof (arc_buf_t),
0, buf_cons, buf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < 256; i++)
for (ct = zfs_crc64_table + i, *ct = i, j = 8; j > 0; j--)
*ct = (*ct >> 1) ^ (-(*ct & 1) & ZFS_CRC64_POLY);
for (i = 0; i < BUF_LOCKS; i++) {
mutex_init(&buf_hash_table.ht_locks[i].ht_lock,
NULL, MUTEX_DEFAULT, NULL);
}
}
#define ARC_MINTIME (hz>>4) /* 62 ms */
/*
* This is the size that the buf occupies in memory. If the buf is compressed,
* it will correspond to the compressed size. You should use this method of
* getting the buf size unless you explicitly need the logical size.
*/
uint64_t
arc_buf_size(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_PSIZE(buf->b_hdr) : HDR_GET_LSIZE(buf->b_hdr));
}
uint64_t
arc_buf_lsize(arc_buf_t *buf)
{
return (HDR_GET_LSIZE(buf->b_hdr));
}
/*
* This function will return B_TRUE if the buffer is encrypted in memory.
* This buffer can be decrypted by calling arc_untransform().
*/
boolean_t
arc_is_encrypted(arc_buf_t *buf)
{
return (ARC_BUF_ENCRYPTED(buf) != 0);
}
/*
* Returns B_TRUE if the buffer represents data that has not had its MAC
* verified yet.
*/
boolean_t
arc_is_unauthenticated(arc_buf_t *buf)
{
return (HDR_NOAUTH(buf->b_hdr) != 0);
}
void
arc_get_raw_params(arc_buf_t *buf, boolean_t *byteorder, uint8_t *salt,
uint8_t *iv, uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_PROTECTED(hdr));
bcopy(hdr->b_crypt_hdr.b_salt, salt, ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, iv, ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, mac, ZIO_DATA_MAC_LEN);
*byteorder = (hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
}
/*
* Indicates how this buffer is compressed in memory. If it is not compressed
* the value will be ZIO_COMPRESS_OFF. It can be made normally readable with
* arc_untransform() as long as it is also unencrypted.
*/
enum zio_compress
arc_get_compression(arc_buf_t *buf)
{
return (ARC_BUF_COMPRESSED(buf) ?
HDR_GET_COMPRESS(buf->b_hdr) : ZIO_COMPRESS_OFF);
}
/*
* Return the compression algorithm used to store this data in the ARC. If ARC
* compression is enabled or this is an encrypted block, this will be the same
* as what's used to store it on-disk. Otherwise, this will be ZIO_COMPRESS_OFF.
*/
static inline enum zio_compress
arc_hdr_get_compress(arc_buf_hdr_t *hdr)
{
return (HDR_COMPRESSION_ENABLED(hdr) ?
HDR_GET_COMPRESS(hdr) : ZIO_COMPRESS_OFF);
}
static inline boolean_t
arc_buf_is_shared(arc_buf_t *buf)
{
boolean_t shared = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_pabd != NULL &&
abd_is_linear(buf->b_hdr->b_l1hdr.b_pabd) &&
buf->b_data == abd_to_buf(buf->b_hdr->b_l1hdr.b_pabd));
IMPLY(shared, HDR_SHARED_DATA(buf->b_hdr));
IMPLY(shared, ARC_BUF_SHARED(buf));
IMPLY(shared, ARC_BUF_COMPRESSED(buf) || ARC_BUF_LAST(buf));
/*
* It would be nice to assert arc_can_share() too, but the "hdr isn't
* already being shared" requirement prevents us from doing that.
*/
return (shared);
}
/*
* Free the checksum associated with this header. If there is no checksum, this
* is a no-op.
*/
static inline void
arc_cksum_free(arc_buf_hdr_t *hdr)
{
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
kmem_free(hdr->b_l1hdr.b_freeze_cksum, sizeof (zio_cksum_t));
hdr->b_l1hdr.b_freeze_cksum = NULL;
}
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* Return true iff at least one of the bufs on hdr is not compressed.
* Encrypted buffers count as compressed.
*/
static boolean_t
arc_hdr_has_uncompressed_buf(arc_buf_hdr_t *hdr)
{
for (arc_buf_t *b = hdr->b_l1hdr.b_buf; b != NULL; b = b->b_next) {
if (!ARC_BUF_COMPRESSED(b)) {
return (B_TRUE);
}
}
return (B_FALSE);
}
/*
* If we've turned on the ZFS_DEBUG_MODIFY flag, verify that the buf's data
* matches the checksum that is stored in the hdr. If there is no checksum,
* or if the buf is compressed, this is a no-op.
*/
static void
arc_cksum_verify(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
zio_cksum_t zc;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
arc_hdr_has_uncompressed_buf(hdr));
return;
}
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum == NULL || HDR_IO_ERROR(hdr)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL, &zc);
if (!ZIO_CHECKSUM_EQUAL(*hdr->b_l1hdr.b_freeze_cksum, zc))
panic("buffer modified while frozen!");
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
}
/*
* This function makes the assumption that data stored in the L2ARC
* will be transformed exactly as it is in the main pool. Because of
* this we can verify the checksum against the reading process's bp.
*/
static boolean_t
arc_cksum_is_equal(arc_buf_hdr_t *hdr, zio_t *zio)
{
ASSERT(!BP_IS_EMBEDDED(zio->io_bp));
VERIFY3U(BP_GET_PSIZE(zio->io_bp), ==, HDR_GET_PSIZE(hdr));
/*
* Block pointers always store the checksum for the logical data.
* If the block pointer has the gang bit set, then the checksum
* it represents is for the reconstituted data and not for an
* individual gang member. The zio pipeline, however, must be able to
* determine the checksum of each of the gang constituents so it
* treats the checksum comparison differently than what we need
* for l2arc blocks. This prevents us from using the
* zio_checksum_error() interface directly. Instead we must call the
* zio_checksum_error_impl() so that we can ensure the checksum is
* generated using the correct checksum algorithm and accounts for the
* logical I/O size and not just a gang fragment.
*/
return (zio_checksum_error_impl(zio->io_spa, zio->io_bp,
BP_GET_CHECKSUM(zio->io_bp), zio->io_abd, zio->io_size,
zio->io_offset, NULL) == 0);
}
/*
* Given a buf full of data, if ZFS_DEBUG_MODIFY is enabled this computes a
* checksum and attaches it to the buf's hdr so that we can ensure that the buf
* isn't modified later on. If buf is compressed or there is already a checksum
* on the hdr, this is a no-op (we only checksum uncompressed bufs).
*/
static void
arc_cksum_compute(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
ASSERT(HDR_HAS_L1HDR(hdr));
mutex_enter(&buf->b_hdr->b_l1hdr.b_freeze_lock);
if (hdr->b_l1hdr.b_freeze_cksum != NULL) {
ASSERT(arc_hdr_has_uncompressed_buf(hdr));
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
} else if (ARC_BUF_COMPRESSED(buf)) {
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
return;
}
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(!ARC_BUF_COMPRESSED(buf));
hdr->b_l1hdr.b_freeze_cksum = kmem_alloc(sizeof (zio_cksum_t),
KM_SLEEP);
fletcher_2_native(buf->b_data, arc_buf_size(buf), NULL,
hdr->b_l1hdr.b_freeze_cksum);
mutex_exit(&hdr->b_l1hdr.b_freeze_lock);
arc_buf_watch(buf);
}
#ifndef _KERNEL
void
arc_buf_sigsegv(int sig, siginfo_t *si, void *unused)
{
panic("Got SIGSEGV at address: 0x%lx\n", (long)si->si_addr);
}
#endif
/* ARGSUSED */
static void
arc_buf_unwatch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch) {
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ | PROT_WRITE));
}
#endif
}
/* ARGSUSED */
static void
arc_buf_watch(arc_buf_t *buf)
{
#ifndef _KERNEL
if (arc_watch)
ASSERT0(mprotect(buf->b_data, arc_buf_size(buf),
PROT_READ));
#endif
}
static arc_buf_contents_t
arc_buf_type(arc_buf_hdr_t *hdr)
{
arc_buf_contents_t type;
if (HDR_ISTYPE_METADATA(hdr)) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
}
VERIFY3U(hdr->b_type, ==, type);
return (type);
}
boolean_t
arc_is_metadata(arc_buf_t *buf)
{
return (HDR_ISTYPE_METADATA(buf->b_hdr) != 0);
}
static uint32_t
arc_bufc_to_flags(arc_buf_contents_t type)
{
switch (type) {
case ARC_BUFC_DATA:
/* metadata field is 0 if buffer contains normal data */
return (0);
case ARC_BUFC_METADATA:
return (ARC_FLAG_BUFC_METADATA);
default:
break;
}
panic("undefined ARC buffer type!");
return ((uint32_t)-1);
}
void
arc_buf_thaw(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
arc_cksum_verify(buf);
/*
* Compressed buffers do not manipulate the b_freeze_cksum or
* allocate b_thawed.
*/
if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
arc_hdr_has_uncompressed_buf(hdr));
return;
}
ASSERT(HDR_HAS_L1HDR(hdr));
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
}
void
arc_buf_freeze(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
kmutex_t *hash_lock;
if (!(zfs_flags & ZFS_DEBUG_MODIFY))
return;
if (ARC_BUF_COMPRESSED(buf)) {
ASSERT(hdr->b_l1hdr.b_freeze_cksum == NULL ||
arc_hdr_has_uncompressed_buf(hdr));
return;
}
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_freeze_cksum != NULL ||
hdr->b_l1hdr.b_state == arc_anon);
arc_cksum_compute(buf);
mutex_exit(hash_lock);
}
/*
* The arc_buf_hdr_t's b_flags should never be modified directly. Instead,
* the following functions should be used to ensure that the flags are
* updated in a thread-safe way. When manipulating the flags either
* the hash_lock must be held or the hdr must be undiscoverable. This
* ensures that we're not racing with any other threads when updating
* the flags.
*/
static inline void
arc_hdr_set_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
hdr->b_flags |= flags;
}
static inline void
arc_hdr_clear_flags(arc_buf_hdr_t *hdr, arc_flags_t flags)
{
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
hdr->b_flags &= ~flags;
}
/*
* Setting the compression bits in the arc_buf_hdr_t's b_flags is
* done in a special way since we have to clear and set bits
* at the same time. Consumers that wish to set the compression bits
* must use this function to ensure that the flags are updated in
* thread-safe manner.
*/
static void
arc_hdr_set_compress(arc_buf_hdr_t *hdr, enum zio_compress cmp)
{
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
/*
* Holes and embedded blocks will always have a psize = 0 so
* we ignore the compression of the blkptr and set the
* want to uncompress them. Mark them as uncompressed.
*/
if (!zfs_compressed_arc_enabled || HDR_GET_PSIZE(hdr) == 0) {
arc_hdr_clear_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(!HDR_COMPRESSION_ENABLED(hdr));
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_COMPRESSED_ARC);
ASSERT(HDR_COMPRESSION_ENABLED(hdr));
}
HDR_SET_COMPRESS(hdr, cmp);
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, cmp);
}
/*
* Looks for another buf on the same hdr which has the data decompressed, copies
* from it, and returns true. If no such buf exists, returns false.
*/
static boolean_t
arc_buf_try_copy_decompressed_data(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t copied = B_FALSE;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(!ARC_BUF_COMPRESSED(buf));
for (arc_buf_t *from = hdr->b_l1hdr.b_buf; from != NULL;
from = from->b_next) {
/* can't use our own data buffer */
if (from == buf) {
continue;
}
if (!ARC_BUF_COMPRESSED(from)) {
bcopy(from->b_data, buf->b_data, arc_buf_size(buf));
copied = B_TRUE;
break;
}
}
/*
* There were no decompressed bufs, so there should not be a
* checksum on the hdr either.
*/
EQUIV(!copied, hdr->b_l1hdr.b_freeze_cksum == NULL);
return (copied);
}
/*
* Return the size of the block, b_pabd, that is stored in the arc_buf_hdr_t.
*/
static uint64_t
arc_hdr_size(arc_buf_hdr_t *hdr)
{
uint64_t size;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
HDR_GET_PSIZE(hdr) > 0) {
size = HDR_GET_PSIZE(hdr);
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, 0);
size = HDR_GET_LSIZE(hdr);
}
return (size);
}
static int
arc_hdr_authenticate(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
uint64_t csize;
uint64_t lsize = HDR_GET_LSIZE(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
void *tmpbuf = NULL;
abd_t *abd = hdr->b_l1hdr.b_pabd;
ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_AUTHENTICATED(hdr));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* The MAC is calculated on the compressed data that is stored on disk.
* However, if compressed arc is disabled we will only have the
* decompressed data available to us now. Compress it into a temporary
* abd so we can verify the MAC. The performance overhead of this will
* be relatively low, since most objects in an encrypted objset will
* be encrypted (instead of authenticated) anyway.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
tmpbuf = zio_buf_alloc(lsize);
abd = abd_get_from_buf(tmpbuf, lsize);
abd_take_ownership_of_buf(abd, B_TRUE);
csize = zio_compress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmpbuf, lsize);
ASSERT3U(csize, <=, psize);
abd_zero_off(abd, csize, psize - csize);
}
/*
* Authentication is best effort. We authenticate whenever the key is
* available. If we succeed we clear ARC_FLAG_NOAUTH.
*/
if (hdr->b_crypt_hdr.b_ot == DMU_OT_OBJSET) {
ASSERT3U(HDR_GET_COMPRESS(hdr), ==, ZIO_COMPRESS_OFF);
ASSERT3U(lsize, ==, psize);
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa, dsobj, abd,
psize, hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
} else {
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj, abd, psize,
hdr->b_crypt_hdr.b_mac);
}
if (ret == 0)
arc_hdr_clear_flags(hdr, ARC_FLAG_NOAUTH);
else if (ret != ENOENT)
goto error;
if (tmpbuf != NULL)
abd_free(abd);
return (0);
error:
if (tmpbuf != NULL)
abd_free(abd);
return (ret);
}
/*
* This function will take a header that only has raw encrypted data in
* b_crypt_hdr.b_rabd and decrypt it into a new buffer which is stored in
* b_l1hdr.b_pabd. If designated in the header flags, this function will
* also decompress the data.
*/
static int
arc_hdr_decrypt(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj)
{
int ret;
dsl_crypto_key_t *dck = NULL;
abd_t *cabd = NULL;
void *tmp = NULL;
boolean_t no_crypt = B_FALSE;
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT(HDR_ENCRYPTED(hdr));
arc_hdr_alloc_abd(hdr, B_FALSE);
/*
* We must be careful to use the passed-in dsobj value here and
* not the value in b_dsobj. b_dsobj is meant to be a best guess for
* the L2ARC, which has the luxury of being able to fail without real
* consequences (the data simply won't make it to the L2ARC). In
* reality, the dsobj stored in the header may belong to a dataset
* that has been unmounted or otherwise disowned, meaning the key
* won't be accessible via that dsobj anymore.
*/
ret = spa_keystore_lookup_key(spa, dsobj, FTAG, &dck);
if (ret != 0) {
ret = SET_ERROR(EACCES);
goto error;
}
ret = zio_do_crypt_abd(B_FALSE, &dck->dck_key,
hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_ot,
hdr->b_crypt_hdr.b_iv, hdr->b_crypt_hdr.b_mac,
HDR_GET_PSIZE(hdr), bswap, hdr->b_l1hdr.b_pabd,
hdr->b_crypt_hdr.b_rabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt) {
abd_copy(hdr->b_l1hdr.b_pabd, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
}
/*
* If this header has disabled arc compression but the b_pabd is
* compressed after decrypting it, we need to decompress the newly
* decrypted data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
/*
* We want to make sure that we are correctly honoring the
* zfs_abd_scatter_enabled setting, so we allocate an abd here
* and then loan a buffer from it, rather than allocating a
* linear buffer and wrapping it in an abd later.
*/
cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr));
if (ret != 0) {
abd_return_buf(cabd, tmp, arc_hdr_size(hdr));
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
}
spa_keystore_dsl_key_rele(spa, dck, FTAG);
return (0);
error:
arc_hdr_free_abd(hdr, B_FALSE);
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
arc_free_data_buf(hdr, cabd, arc_hdr_size(hdr), hdr);
return (ret);
}
/*
* This function is called during arc_buf_fill() to prepare the header's
* abd plaintext pointer for use. This involves authenticated protected
* data and decrypting encrypted data into the plaintext abd.
*/
static int
arc_fill_hdr_crypt(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, spa_t *spa,
uint64_t dsobj, boolean_t noauth)
{
int ret;
ASSERT(HDR_PROTECTED(hdr));
if (hash_lock != NULL)
mutex_enter(hash_lock);
if (HDR_NOAUTH(hdr) && !noauth) {
/*
* The caller requested authenticated data but our data has
* not been authenticated yet. Verify the MAC now if we can.
*/
ret = arc_hdr_authenticate(hdr, spa, dsobj);
if (ret != 0)
goto error;
} else if (HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd == NULL) {
/*
* If we only have the encrypted version of the data, but the
* unencrypted version was requested we take this opportunity
* to store the decrypted version in the header for future use.
*/
ret = arc_hdr_decrypt(hdr, spa, dsobj);
if (ret != 0)
goto error;
}
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (0);
error:
if (hash_lock != NULL)
mutex_exit(hash_lock);
return (ret);
}
/*
* This function is used by the dbuf code to decrypt bonus buffers in place.
* The dbuf code itself doesn't have any locking for decrypting a shared dnode
* block, so we use the hash lock here to protect against concurrent calls to
* arc_buf_fill().
*/
static void
arc_buf_untransform_in_place(arc_buf_t *buf, kmutex_t *hash_lock)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(HDR_ENCRYPTED(hdr));
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
ASSERT(HDR_LOCK(hdr) == NULL || MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio_crypt_copy_dnode_bonus(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
hdr->b_crypt_hdr.b_ebufcnt -= 1;
}
/*
* Given a buf that has a data buffer attached to it, this function will
* efficiently fill the buf with data of the specified compression setting from
* the hdr and update the hdr's b_freeze_cksum if necessary. If the buf and hdr
* are already sharing a data buf, no copy is performed.
*
* If the buf is marked as compressed but uncompressed data was requested, this
* will allocate a new data buffer for the buf, remove that flag, and fill the
* buf with uncompressed data. You can't request a compressed buf on a hdr with
* uncompressed data, and (since we haven't added support for it yet) if you
* want compressed data your buf must already be marked as compressed and have
* the correct-sized data buffer.
*/
static int
arc_buf_fill(arc_buf_t *buf, spa_t *spa, uint64_t dsobj, arc_fill_flags_t flags)
{
int error = 0;
arc_buf_hdr_t *hdr = buf->b_hdr;
boolean_t hdr_compressed =
(arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
boolean_t compressed = (flags & ARC_FILL_COMPRESSED) != 0;
boolean_t encrypted = (flags & ARC_FILL_ENCRYPTED) != 0;
dmu_object_byteswap_t bswap = hdr->b_l1hdr.b_byteswap;
kmutex_t *hash_lock = (flags & ARC_FILL_LOCKED) ? NULL : HDR_LOCK(hdr);
ASSERT3P(buf->b_data, !=, NULL);
IMPLY(compressed, hdr_compressed || ARC_BUF_ENCRYPTED(buf));
IMPLY(compressed, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, HDR_ENCRYPTED(hdr));
IMPLY(encrypted, ARC_BUF_ENCRYPTED(buf));
IMPLY(encrypted, ARC_BUF_COMPRESSED(buf));
IMPLY(encrypted, !ARC_BUF_SHARED(buf));
/*
* If the caller wanted encrypted data we just need to copy it from
* b_rabd and potentially byteswap it. We won't be able to do any
* further transforms on it.
*/
if (encrypted) {
ASSERT(HDR_HAS_RABD(hdr));
abd_copy_to_buf(buf->b_data, hdr->b_crypt_hdr.b_rabd,
HDR_GET_PSIZE(hdr));
goto byteswap;
}
/*
* Adjust encrypted and authenticated headers to accomodate the
* request if needed.
*/
if (HDR_PROTECTED(hdr)) {
error = arc_fill_hdr_crypt(hdr, hash_lock, spa,
dsobj, !!(flags & ARC_FILL_NOAUTH));
if (error != 0)
return (error);
}
/*
* There is a special case here for dnode blocks which are
* decrypting their bonus buffers. These blocks may request to
* be decrypted in-place. This is necessary because there may
* be many dnodes pointing into this buffer and there is
* currently no method to synchronize replacing the backing
* b_data buffer and updating all of the pointers. Here we use
* the hash lock to ensure there are no races. If the need
* arises for other types to be decrypted in-place, they must
* add handling here as well.
*/
if ((flags & ARC_FILL_IN_PLACE) != 0) {
ASSERT(!hdr_compressed);
ASSERT(!compressed);
ASSERT(!encrypted);
if (HDR_ENCRYPTED(hdr) && ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(hdr->b_crypt_hdr.b_ot, ==, DMU_OT_DNODE);
if (hash_lock != NULL)
mutex_enter(hash_lock);
arc_buf_untransform_in_place(buf, hash_lock);
if (hash_lock != NULL)
mutex_exit(hash_lock);
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
}
return (0);
}
if (hdr_compressed == compressed) {
if (!arc_buf_is_shared(buf)) {
abd_copy_to_buf(buf->b_data, hdr->b_l1hdr.b_pabd,
arc_buf_size(buf));
}
} else {
ASSERT(hdr_compressed);
ASSERT(!compressed);
ASSERT3U(HDR_GET_LSIZE(hdr), !=, HDR_GET_PSIZE(hdr));
/*
* If the buf is sharing its data with the hdr, unlink it and
* allocate a new data buffer for the buf.
*/
if (arc_buf_is_shared(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
/* We need to give the buf it's own b_data */
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
/* Previously overhead was 0; just add new overhead */
ARCSTAT_INCR(arcstat_overhead_size, HDR_GET_LSIZE(hdr));
} else if (ARC_BUF_COMPRESSED(buf)) {
/* We need to reallocate the buf's b_data */
arc_free_data_buf(hdr, buf->b_data, HDR_GET_PSIZE(hdr),
buf);
buf->b_data =
arc_get_data_buf(hdr, HDR_GET_LSIZE(hdr), buf);
/* We increased the size of b_data; update overhead */
ARCSTAT_INCR(arcstat_overhead_size,
HDR_GET_LSIZE(hdr) - HDR_GET_PSIZE(hdr));
}
/*
* Regardless of the buf's previous compression settings, it
* should not be compressed at the end of this function.
*/
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
/*
* Try copying the data from another buf which already has a
* decompressed version. If that's not possible, it's time to
* bite the bullet and decompress the data from the hdr.
*/
if (arc_buf_try_copy_decompressed_data(buf)) {
/* Skip byteswapping and checksumming (already done) */
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, !=, NULL);
return (0);
} else {
error = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, buf->b_data,
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
/*
* Absent hardware errors or software bugs, this should
* be impossible, but log it anyway so we can debug it.
*/
if (error != 0) {
zfs_dbgmsg(
"hdr %p, compress %d, psize %d, lsize %d",
hdr, arc_hdr_get_compress(hdr),
HDR_GET_PSIZE(hdr), HDR_GET_LSIZE(hdr));
return (SET_ERROR(EIO));
}
}
}
byteswap:
/* Byteswap the buf's data if necessary */
if (bswap != DMU_BSWAP_NUMFUNCS) {
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT3U(bswap, <, DMU_BSWAP_NUMFUNCS);
dmu_ot_byteswap[bswap].ob_func(buf->b_data, HDR_GET_LSIZE(hdr));
}
/* Compute the hdr's checksum if necessary */
arc_cksum_compute(buf);
return (0);
}
/*
* If this function is being called to decrypt an encrypted buffer or verify an
* authenticated one, the key must be loaded and a mapping must be made
* available in the keystore via spa_keystore_create_mapping() or one of its
* callers.
*/
int
arc_untransform(arc_buf_t *buf, spa_t *spa, const zbookmark_phys_t *zb,
boolean_t in_place)
{
int ret;
arc_fill_flags_t flags = 0;
if (in_place)
flags |= ARC_FILL_IN_PLACE;
ret = arc_buf_fill(buf, spa, zb->zb_objset, flags);
if (ret == ECKSUM) {
/*
* Convert authentication and decryption errors to EIO
* (and generate an ereport) before leaving the ARC.
*/
ret = SET_ERROR(EIO);
zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0, 0);
}
return (ret);
}
/*
* Increment the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_increment(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) refcount_add_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
ASSERT(!GHOST_STATE(state));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) refcount_add_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) refcount_add_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) refcount_add_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Decrement the amount of evictable space in the arc_state_t's refcount.
* We account for the space used by the hdr and the arc buf individually
* so that we can add and remove them from the refcount individually.
*/
static void
arc_evictable_space_decrement(arc_buf_hdr_t *hdr, arc_state_t *state)
{
arc_buf_contents_t type = arc_buf_type(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(state)) {
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
(void) refcount_remove_many(&state->arcs_esize[type],
HDR_GET_LSIZE(hdr), hdr);
return;
}
ASSERT(!GHOST_STATE(state));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) refcount_remove_many(&state->arcs_esize[type],
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) refcount_remove_many(&state->arcs_esize[type],
HDR_GET_PSIZE(hdr), hdr);
}
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
if (arc_buf_is_shared(buf))
continue;
(void) refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
}
/*
* Add a reference to this hdr indicating that someone is actively
* referencing that memory. When the refcount transitions from 0 to 1,
* we remove it from the respective arc_state_t list to indicate that
* it is not evictable.
*/
static void
add_reference(arc_buf_hdr_t *hdr, void *tag)
{
arc_state_t *state;
ASSERT(HDR_HAS_L1HDR(hdr));
if (!MUTEX_HELD(HDR_LOCK(hdr))) {
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
}
state = hdr->b_l1hdr.b_state;
if ((refcount_add(&hdr->b_l1hdr.b_refcnt, tag) == 1) &&
(state != arc_anon)) {
/* We don't use the L2-only state list. */
if (state != arc_l2c_only) {
multilist_remove(state->arcs_list[arc_buf_type(hdr)],
hdr);
arc_evictable_space_decrement(hdr, state);
}
/* remove the prefetch flag if we get a reference */
arc_hdr_clear_flags(hdr, ARC_FLAG_PREFETCH);
}
}
/*
* Remove a reference from this hdr. When the reference transitions from
* 1 to 0 and we're not anonymous, then we add this hdr to the arc_state_t's
* list making it eligible for eviction.
*/
static int
remove_reference(arc_buf_hdr_t *hdr, kmutex_t *hash_lock, void *tag)
{
int cnt;
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(state == arc_anon || MUTEX_HELD(hash_lock));
ASSERT(!GHOST_STATE(state));
/*
* arc_l2c_only counts as a ghost state so we don't need to explicitly
* check to prevent usage of the arc_l2c_only list.
*/
if (((cnt = refcount_remove(&hdr->b_l1hdr.b_refcnt, tag)) == 0) &&
(state != arc_anon)) {
multilist_insert(state->arcs_list[arc_buf_type(hdr)], hdr);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
arc_evictable_space_increment(hdr, state);
}
return (cnt);
}
/*
* Returns detailed information about a specific arc buffer. When the
* state_index argument is set the function will calculate the arc header
* list position for its arc state. Since this requires a linear traversal
* callers are strongly encourage not to do this. However, it can be helpful
* for targeted analysis so the functionality is provided.
*/
void
arc_buf_info(arc_buf_t *ab, arc_buf_info_t *abi, int state_index)
{
arc_buf_hdr_t *hdr = ab->b_hdr;
l1arc_buf_hdr_t *l1hdr = NULL;
l2arc_buf_hdr_t *l2hdr = NULL;
arc_state_t *state = NULL;
memset(abi, 0, sizeof (arc_buf_info_t));
if (hdr == NULL)
return;
abi->abi_flags = hdr->b_flags;
if (HDR_HAS_L1HDR(hdr)) {
l1hdr = &hdr->b_l1hdr;
state = l1hdr->b_state;
}
if (HDR_HAS_L2HDR(hdr))
l2hdr = &hdr->b_l2hdr;
if (l1hdr) {
abi->abi_bufcnt = l1hdr->b_bufcnt;
abi->abi_access = l1hdr->b_arc_access;
abi->abi_mru_hits = l1hdr->b_mru_hits;
abi->abi_mru_ghost_hits = l1hdr->b_mru_ghost_hits;
abi->abi_mfu_hits = l1hdr->b_mfu_hits;
abi->abi_mfu_ghost_hits = l1hdr->b_mfu_ghost_hits;
abi->abi_holds = refcount_count(&l1hdr->b_refcnt);
}
if (l2hdr) {
abi->abi_l2arc_dattr = l2hdr->b_daddr;
abi->abi_l2arc_hits = l2hdr->b_hits;
}
abi->abi_state_type = state ? state->arcs_state : ARC_STATE_ANON;
abi->abi_state_contents = arc_buf_type(hdr);
abi->abi_size = arc_hdr_size(hdr);
}
/*
* Move the supplied buffer to the indicated state. The hash lock
* for the buffer must be held by the caller.
*/
static void
arc_change_state(arc_state_t *new_state, arc_buf_hdr_t *hdr,
kmutex_t *hash_lock)
{
arc_state_t *old_state;
int64_t refcnt;
uint32_t bufcnt;
boolean_t update_old, update_new;
arc_buf_contents_t buftype = arc_buf_type(hdr);
/*
* We almost always have an L1 hdr here, since we call arc_hdr_realloc()
* in arc_read() when bringing a buffer out of the L2ARC. However, the
* L1 hdr doesn't always exist when we change state to arc_anon before
* destroying a header, in which case reallocating to add the L1 hdr is
* pointless.
*/
if (HDR_HAS_L1HDR(hdr)) {
old_state = hdr->b_l1hdr.b_state;
refcnt = refcount_count(&hdr->b_l1hdr.b_refcnt);
bufcnt = hdr->b_l1hdr.b_bufcnt;
update_old = (bufcnt > 0 || hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
} else {
old_state = arc_l2c_only;
refcnt = 0;
bufcnt = 0;
update_old = B_FALSE;
}
update_new = update_old;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT3P(new_state, !=, old_state);
ASSERT(!GHOST_STATE(new_state) || bufcnt == 0);
ASSERT(old_state != arc_anon || bufcnt <= 1);
/*
* If this buffer is evictable, transfer it from the
* old state list to the new state list.
*/
if (refcnt == 0) {
if (old_state != arc_anon && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_remove(old_state->arcs_list[buftype], hdr);
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_old = B_TRUE;
}
arc_evictable_space_decrement(hdr, old_state);
}
if (new_state != arc_anon && new_state != arc_l2c_only) {
/*
* An L1 header always exists here, since if we're
* moving to some L1-cached state (i.e. not l2c_only or
* anonymous), we realloc the header to add an L1hdr
* beforehand.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
multilist_insert(new_state->arcs_list[buftype], hdr);
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
update_new = B_TRUE;
}
arc_evictable_space_increment(hdr, new_state);
}
}
ASSERT(!HDR_EMPTY(hdr));
if (new_state == arc_anon && HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
/* adjust state sizes (ignore arc_l2c_only) */
if (update_new && new_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(new_state)) {
ASSERT0(bufcnt);
/*
* When moving a header to a ghost state, we first
* remove all arc buffers. Thus, we'll have a
* bufcnt of zero, and no arc buffer to use for
* the reference. As a result, we use the arc
* header pointer for the reference.
*/
(void) refcount_add_many(&new_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) refcount_add_many(&new_state->arcs_size,
arc_buf_size(buf), buf);
}
ASSERT3U(bufcnt, ==, buffers);
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) refcount_add_many(&new_state->arcs_size,
arc_hdr_size(hdr), hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) refcount_add_many(&new_state->arcs_size,
HDR_GET_PSIZE(hdr), hdr);
}
}
}
if (update_old && old_state != arc_l2c_only) {
ASSERT(HDR_HAS_L1HDR(hdr));
if (GHOST_STATE(old_state)) {
ASSERT0(bufcnt);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* When moving a header off of a ghost state,
* the header will not contain any arc buffers.
* We use the arc header pointer for the reference
* which is exactly what we did when we put the
* header on the ghost state.
*/
(void) refcount_remove_many(&old_state->arcs_size,
HDR_GET_LSIZE(hdr), hdr);
} else {
uint32_t buffers = 0;
/*
* Each individual buffer holds a unique reference,
* thus we must remove each of these references one
* at a time.
*/
for (arc_buf_t *buf = hdr->b_l1hdr.b_buf; buf != NULL;
buf = buf->b_next) {
ASSERT3U(bufcnt, !=, 0);
buffers++;
/*
* When the arc_buf_t is sharing the data
* block with the hdr, the owner of the
* reference belongs to the hdr. Only
* add to the refcount if the arc_buf_t is
* not shared.
*/
if (arc_buf_is_shared(buf))
continue;
(void) refcount_remove_many(
&old_state->arcs_size, arc_buf_size(buf),
buf);
}
ASSERT3U(bufcnt, ==, buffers);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
if (hdr->b_l1hdr.b_pabd != NULL) {
(void) refcount_remove_many(
&old_state->arcs_size, arc_hdr_size(hdr),
hdr);
}
if (HDR_HAS_RABD(hdr)) {
(void) refcount_remove_many(
&old_state->arcs_size, HDR_GET_PSIZE(hdr),
hdr);
}
}
}
if (HDR_HAS_L1HDR(hdr))
hdr->b_l1hdr.b_state = new_state;
/*
* L2 headers should never be on the L2 state list since they don't
* have L1 headers allocated.
*/
ASSERT(multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_DATA]) &&
multilist_is_empty(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]));
}
void
arc_space_consume(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, space);
break;
case ARC_SPACE_L2HDRS:
ARCSTAT_INCR(arcstat_l2_hdr_size, space);
break;
}
if (type != ARC_SPACE_DATA)
ARCSTAT_INCR(arcstat_meta_used, space);
atomic_add_64(&arc_size, space);
}
void
arc_space_return(uint64_t space, arc_space_type_t type)
{
ASSERT(type >= 0 && type < ARC_SPACE_NUMTYPES);
switch (type) {
default:
break;
case ARC_SPACE_DATA:
ARCSTAT_INCR(arcstat_data_size, -space);
break;
case ARC_SPACE_META:
ARCSTAT_INCR(arcstat_metadata_size, -space);
break;
case ARC_SPACE_BONUS:
ARCSTAT_INCR(arcstat_bonus_size, -space);
break;
case ARC_SPACE_DNODE:
ARCSTAT_INCR(arcstat_dnode_size, -space);
break;
case ARC_SPACE_DBUF:
ARCSTAT_INCR(arcstat_dbuf_size, -space);
break;
case ARC_SPACE_HDRS:
ARCSTAT_INCR(arcstat_hdr_size, -space);
break;
case ARC_SPACE_L2HDRS:
ARCSTAT_INCR(arcstat_l2_hdr_size, -space);
break;
}
if (type != ARC_SPACE_DATA) {
ASSERT(arc_meta_used >= space);
if (arc_meta_max < arc_meta_used)
arc_meta_max = arc_meta_used;
ARCSTAT_INCR(arcstat_meta_used, -space);
}
ASSERT(arc_size >= space);
atomic_add_64(&arc_size, -space);
}
/*
* Given a hdr and a buf, returns whether that buf can share its b_data buffer
* with the hdr's b_pabd.
*/
static boolean_t
arc_can_share(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
/*
* The criteria for sharing a hdr's data are:
* 1. the buffer is not encrypted
* 2. the hdr's compression matches the buf's compression
* 3. the hdr doesn't need to be byteswapped
* 4. the hdr isn't already being shared
* 5. the buf is either compressed or it is the last buf in the hdr list
*
* Criterion #5 maintains the invariant that shared uncompressed
* bufs must be the final buf in the hdr's b_buf list. Reading this, you
* might ask, "if a compressed buf is allocated first, won't that be the
* last thing in the list?", but in that case it's impossible to create
* a shared uncompressed buf anyway (because the hdr must be compressed
* to have the compressed buf). You might also think that #3 is
* sufficient to make this guarantee, however it's possible
* (specifically in the rare L2ARC write race mentioned in
* arc_buf_alloc_impl()) there will be an existing uncompressed buf that
* is sharable, but wasn't at the time of its allocation. Rather than
* allow a new shared uncompressed buf to be created and then shuffle
* the list around to make it the last element, this simply disallows
* sharing if the new buf isn't the first to be added.
*/
ASSERT3P(buf->b_hdr, ==, hdr);
boolean_t hdr_compressed =
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF;
boolean_t buf_compressed = ARC_BUF_COMPRESSED(buf) != 0;
return (!ARC_BUF_ENCRYPTED(buf) &&
buf_compressed == hdr_compressed &&
hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS &&
!HDR_SHARED_DATA(hdr) &&
(ARC_BUF_LAST(buf) || ARC_BUF_COMPRESSED(buf)));
}
/*
* Allocate a buf for this hdr. If you care about the data that's in the hdr,
* or if you want a compressed buffer, pass those flags in. Returns 0 if the
* copy was made successfully, or an error code otherwise.
*/
static int
arc_buf_alloc_impl(arc_buf_hdr_t *hdr, spa_t *spa, uint64_t dsobj, void *tag,
boolean_t encrypted, boolean_t compressed, boolean_t noauth,
boolean_t fill, arc_buf_t **ret)
{
arc_buf_t *buf;
arc_fill_flags_t flags = ARC_FILL_LOCKED;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
VERIFY(hdr->b_type == ARC_BUFC_DATA ||
hdr->b_type == ARC_BUFC_METADATA);
ASSERT3P(ret, !=, NULL);
ASSERT3P(*ret, ==, NULL);
IMPLY(encrypted, compressed);
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_l2_hits = 0;
buf = *ret = kmem_cache_alloc(buf_cache, KM_PUSHPAGE);
buf->b_hdr = hdr;
buf->b_data = NULL;
buf->b_next = hdr->b_l1hdr.b_buf;
buf->b_flags = 0;
add_reference(hdr, tag);
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
/*
* Only honor requests for compressed bufs if the hdr is actually
* compressed. This must be overriden if the buffer is encrypted since
* encrypted buffers cannot be decompressed.
*/
if (encrypted) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
buf->b_flags |= ARC_BUF_FLAG_ENCRYPTED;
flags |= ARC_FILL_COMPRESSED | ARC_FILL_ENCRYPTED;
} else if (compressed &&
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
buf->b_flags |= ARC_BUF_FLAG_COMPRESSED;
flags |= ARC_FILL_COMPRESSED;
}
if (noauth) {
ASSERT0(encrypted);
flags |= ARC_FILL_NOAUTH;
}
/*
* If the hdr's data can be shared then we share the data buffer and
* set the appropriate bit in the hdr's b_flags to indicate the hdr is
* allocate a new buffer to store the buf's data.
*
* There are two additional restrictions here because we're sharing
* hdr -> buf instead of the usual buf -> hdr. First, the hdr can't be
* actively involved in an L2ARC write, because if this buf is used by
* an arc_write() then the hdr's data buffer will be released when the
* write completes, even though the L2ARC write might still be using it.
* Second, the hdr's ABD must be linear so that the buf's user doesn't
* need to be ABD-aware.
*/
boolean_t can_share = arc_can_share(hdr, buf) && !HDR_L2_WRITING(hdr) &&
hdr->b_l1hdr.b_pabd != NULL && abd_is_linear(hdr->b_l1hdr.b_pabd);
/* Set up b_data and sharing */
if (can_share) {
buf->b_data = abd_to_buf(hdr->b_l1hdr.b_pabd);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
buf->b_data =
arc_get_data_buf(hdr, arc_buf_size(buf), buf);
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
VERIFY3P(buf->b_data, !=, NULL);
hdr->b_l1hdr.b_buf = buf;
hdr->b_l1hdr.b_bufcnt += 1;
if (encrypted)
hdr->b_crypt_hdr.b_ebufcnt += 1;
/*
* If the user wants the data from the hdr, we need to either copy or
* decompress the data.
*/
if (fill) {
return (arc_buf_fill(buf, spa, dsobj, flags));
}
return (0);
}
static char *arc_onloan_tag = "onloan";
static inline void
arc_loaned_bytes_update(int64_t delta)
{
atomic_add_64(&arc_loaned_bytes, delta);
/* assert that it did not wrap around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
}
/*
* Loan out an anonymous arc buffer. Loaned buffers are not counted as in
* flight data by arc_tempreserve_space() until they are "returned". Loaned
* buffers must be returned to the arc before they can be used by the DMU or
* freed.
*/
arc_buf_t *
arc_loan_buf(spa_t *spa, boolean_t is_metadata, int size)
{
arc_buf_t *buf = arc_alloc_buf(spa, arc_onloan_tag,
is_metadata ? ARC_BUFC_METADATA : ARC_BUFC_DATA, size);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_compressed_buf(spa_t *spa, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type)
{
arc_buf_t *buf = arc_alloc_compressed_buf(spa, arc_onloan_tag,
psize, lsize, compression_type);
arc_loaned_bytes_update(arc_buf_size(buf));
return (buf);
}
arc_buf_t *
arc_loan_raw_buf(spa_t *spa, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type)
{
arc_buf_t *buf = arc_alloc_raw_buf(spa, arc_onloan_tag, dsobj,
byteorder, salt, iv, mac, ot, psize, lsize, compression_type);
atomic_add_64(&arc_loaned_bytes, psize);
return (buf);
}
/*
* Return a loaned arc buffer to the arc.
*/
void
arc_return_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) refcount_add(&hdr->b_l1hdr.b_refcnt, tag);
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
arc_loaned_bytes_update(-arc_buf_size(buf));
}
/* Detach an arc_buf from a dbuf (tag) */
void
arc_loan_inuse_buf(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(buf->b_data, !=, NULL);
ASSERT(HDR_HAS_L1HDR(hdr));
(void) refcount_add(&hdr->b_l1hdr.b_refcnt, arc_onloan_tag);
(void) refcount_remove(&hdr->b_l1hdr.b_refcnt, tag);
arc_loaned_bytes_update(arc_buf_size(buf));
}
static void
l2arc_free_abd_on_write(abd_t *abd, size_t size, arc_buf_contents_t type)
{
l2arc_data_free_t *df = kmem_alloc(sizeof (*df), KM_SLEEP);
df->l2df_abd = abd;
df->l2df_size = size;
df->l2df_type = type;
mutex_enter(&l2arc_free_on_write_mtx);
list_insert_head(l2arc_free_on_write, df);
mutex_exit(&l2arc_free_on_write_mtx);
}
static void
arc_hdr_free_on_write(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) refcount_remove_many(&state->arcs_esize[type],
size, hdr);
}
(void) refcount_remove_many(&state->arcs_size, size, hdr);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
if (free_rdata) {
l2arc_free_abd_on_write(hdr->b_crypt_hdr.b_rabd, size, type);
} else {
l2arc_free_abd_on_write(hdr->b_l1hdr.b_pabd, size, type);
}
}
/*
* Share the arc_buf_t's data with the hdr. Whenever we are sharing the
* data buffer, we transfer the refcount ownership to the hdr and update
* the appropriate kstats.
*/
static void
arc_share_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_can_share(hdr, buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!ARC_BUF_ENCRYPTED(buf));
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
/*
* Start sharing the data buffer. We transfer the
* refcount ownership to the hdr since it always owns
* the refcount whenever an arc_buf_t is shared.
*/
refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, buf, hdr);
hdr->b_l1hdr.b_pabd = abd_get_from_buf(buf->b_data, arc_buf_size(buf));
abd_take_ownership_of_buf(hdr->b_l1hdr.b_pabd,
HDR_ISTYPE_METADATA(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_SHARED_DATA);
buf->b_flags |= ARC_BUF_FLAG_SHARED;
/*
* Since we've transferred ownership to the hdr we need
* to increment its compressed and uncompressed kstats and
* decrement the overhead size.
*/
ARCSTAT_INCR(arcstat_compressed_size, arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, -arc_buf_size(buf));
}
static void
arc_unshare_buf(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
/*
* We are no longer sharing this buffer so we need
* to transfer its ownership to the rightful owner.
*/
refcount_transfer_ownership(&hdr->b_l1hdr.b_state->arcs_size, hdr, buf);
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
abd_release_ownership_of_buf(hdr->b_l1hdr.b_pabd);
abd_put(hdr->b_l1hdr.b_pabd);
hdr->b_l1hdr.b_pabd = NULL;
buf->b_flags &= ~ARC_BUF_FLAG_SHARED;
/*
* Since the buffer is no longer shared between
* the arc buf and the hdr, count it as overhead.
*/
ARCSTAT_INCR(arcstat_compressed_size, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
ARCSTAT_INCR(arcstat_overhead_size, arc_buf_size(buf));
}
/*
* Remove an arc_buf_t from the hdr's buf list and return the last
* arc_buf_t on the list. If no buffers remain on the list then return
* NULL.
*/
static arc_buf_t *
arc_buf_remove(arc_buf_hdr_t *hdr, arc_buf_t *buf)
{
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
arc_buf_t **bufp = &hdr->b_l1hdr.b_buf;
arc_buf_t *lastbuf = NULL;
/*
* Remove the buf from the hdr list and locate the last
* remaining buffer on the list.
*/
while (*bufp != NULL) {
if (*bufp == buf)
*bufp = buf->b_next;
/*
* If we've removed a buffer in the middle of
* the list then update the lastbuf and update
* bufp.
*/
if (*bufp != NULL) {
lastbuf = *bufp;
bufp = &(*bufp)->b_next;
}
}
buf->b_next = NULL;
ASSERT3P(lastbuf, !=, buf);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, lastbuf != NULL);
IMPLY(hdr->b_l1hdr.b_bufcnt > 0, hdr->b_l1hdr.b_buf != NULL);
IMPLY(lastbuf != NULL, ARC_BUF_LAST(lastbuf));
return (lastbuf);
}
/*
* Free up buf->b_data and pull the arc_buf_t off of the the arc_buf_hdr_t's
* list and free it.
*/
static void
arc_buf_destroy_impl(arc_buf_t *buf)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Free up the data associated with the buf but only if we're not
* sharing this with the hdr. If we are sharing it with the hdr, the
* hdr is responsible for doing the free.
*/
if (buf->b_data != NULL) {
/*
* We're about to change the hdr's b_flags. We must either
* hold the hash_lock or be undiscoverable.
*/
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)) || HDR_EMPTY(hdr));
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
if (arc_buf_is_shared(buf)) {
arc_hdr_clear_flags(hdr, ARC_FLAG_SHARED_DATA);
} else {
uint64_t size = arc_buf_size(buf);
arc_free_data_buf(hdr, buf->b_data, size, buf);
ARCSTAT_INCR(arcstat_overhead_size, -size);
}
buf->b_data = NULL;
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf)) {
hdr->b_crypt_hdr.b_ebufcnt -= 1;
/*
* If we have no more encrypted buffers and we've
* already gotten a copy of the decrypted data we can
* free b_rabd to save some space.
*/
if (hdr->b_crypt_hdr.b_ebufcnt == 0 &&
HDR_HAS_RABD(hdr) && hdr->b_l1hdr.b_pabd != NULL &&
!HDR_IO_IN_PROGRESS(hdr)) {
arc_hdr_free_abd(hdr, B_TRUE);
}
}
}
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
if (ARC_BUF_SHARED(buf) && !ARC_BUF_COMPRESSED(buf)) {
/*
* If the current arc_buf_t is sharing its data buffer with the
* hdr, then reassign the hdr's b_pabd to share it with the new
* buffer at the end of the list. The shared buffer is always
* the last one on the hdr's buffer list.
*
* There is an equivalent case for compressed bufs, but since
* they aren't guaranteed to be the last buf in the list and
* that is an exceedingly rare case, we just allow that space be
* wasted temporarily. We must also be careful not to share
* encrypted buffers, since they cannot be shared.
*/
if (lastbuf != NULL && !ARC_BUF_ENCRYPTED(lastbuf)) {
/* Only one buf can be shared at once */
VERIFY(!arc_buf_is_shared(lastbuf));
/* hdr is uncompressed so can't have compressed buf */
VERIFY(!ARC_BUF_COMPRESSED(lastbuf));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
arc_hdr_free_abd(hdr, B_FALSE);
/*
* We must setup a new shared block between the
* last buffer and the hdr. The data would have
* been allocated by the arc buf so we need to transfer
* ownership to the hdr since it's now being shared.
*/
arc_share_buf(hdr, lastbuf);
}
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT3P(lastbuf, !=, NULL);
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
}
/*
* Free the checksum if we're removing the last uncompressed buf from
* this hdr.
*/
if (!arc_hdr_has_uncompressed_buf(hdr)) {
arc_cksum_free(hdr);
}
/* clean up the buf */
buf->b_hdr = NULL;
kmem_cache_free(buf_cache, buf);
}
static void
arc_hdr_alloc_abd(arc_buf_hdr_t *hdr, boolean_t alloc_rdata)
{
uint64_t size;
ASSERT3U(HDR_GET_LSIZE(hdr), >, 0);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!HDR_SHARED_DATA(hdr) || alloc_rdata);
IMPLY(alloc_rdata, HDR_PROTECTED(hdr));
if (alloc_rdata) {
size = HDR_GET_PSIZE(hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, ==, NULL);
hdr->b_crypt_hdr.b_rabd = arc_get_data_abd(hdr, size, hdr);
ASSERT3P(hdr->b_crypt_hdr.b_rabd, !=, NULL);
ARCSTAT_INCR(arcstat_raw_size, size);
} else {
size = arc_hdr_size(hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
hdr->b_l1hdr.b_pabd = arc_get_data_abd(hdr, size, hdr);
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
}
ARCSTAT_INCR(arcstat_compressed_size, size);
ARCSTAT_INCR(arcstat_uncompressed_size, HDR_GET_LSIZE(hdr));
}
static void
arc_hdr_free_abd(arc_buf_hdr_t *hdr, boolean_t free_rdata)
{
uint64_t size = (free_rdata) ? HDR_GET_PSIZE(hdr) : arc_hdr_size(hdr);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
IMPLY(free_rdata, HDR_HAS_RABD(hdr));
/*
* If the hdr is currently being written to the l2arc then
* we defer freeing the data by adding it to the l2arc_free_on_write
* list. The l2arc will free the data once it's finished
* writing it to the l2arc device.
*/
if (HDR_L2_WRITING(hdr)) {
arc_hdr_free_on_write(hdr, free_rdata);
ARCSTAT_BUMP(arcstat_l2_free_on_write);
} else if (free_rdata) {
arc_free_data_abd(hdr, hdr->b_crypt_hdr.b_rabd, size, hdr);
} else {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd, size, hdr);
}
if (free_rdata) {
hdr->b_crypt_hdr.b_rabd = NULL;
ARCSTAT_INCR(arcstat_raw_size, -size);
} else {
hdr->b_l1hdr.b_pabd = NULL;
}
if (hdr->b_l1hdr.b_pabd == NULL && !HDR_HAS_RABD(hdr))
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
ARCSTAT_INCR(arcstat_compressed_size, -size);
ARCSTAT_INCR(arcstat_uncompressed_size, -HDR_GET_LSIZE(hdr));
}
static arc_buf_hdr_t *
arc_hdr_alloc(uint64_t spa, int32_t psize, int32_t lsize,
boolean_t protected, enum zio_compress compression_type,
arc_buf_contents_t type, boolean_t alloc_rdata)
{
arc_buf_hdr_t *hdr;
VERIFY(type == ARC_BUFC_DATA || type == ARC_BUFC_METADATA);
if (protected) {
hdr = kmem_cache_alloc(hdr_full_crypt_cache, KM_PUSHPAGE);
} else {
hdr = kmem_cache_alloc(hdr_full_cache, KM_PUSHPAGE);
}
ASSERT(HDR_EMPTY(hdr));
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
HDR_SET_PSIZE(hdr, psize);
HDR_SET_LSIZE(hdr, lsize);
hdr->b_spa = spa;
hdr->b_type = type;
hdr->b_flags = 0;
arc_hdr_set_flags(hdr, arc_bufc_to_flags(type) | ARC_FLAG_HAS_L1HDR);
arc_hdr_set_compress(hdr, compression_type);
if (protected)
arc_hdr_set_flags(hdr, ARC_FLAG_PROTECTED);
hdr->b_l1hdr.b_state = arc_anon;
hdr->b_l1hdr.b_arc_access = 0;
hdr->b_l1hdr.b_bufcnt = 0;
hdr->b_l1hdr.b_buf = NULL;
/*
* Allocate the hdr's buffer. This will contain either
* the compressed or uncompressed data depending on the block
* it references and compressed arc enablement.
*/
arc_hdr_alloc_abd(hdr, alloc_rdata);
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
return (hdr);
}
/*
* Transition between the two allocation states for the arc_buf_hdr struct.
* The arc_buf_hdr struct can be allocated with (hdr_full_cache) or without
* (hdr_l2only_cache) the fields necessary for the L1 cache - the smaller
* version is used when a cache buffer is only in the L2ARC in order to reduce
* memory usage.
*/
static arc_buf_hdr_t *
arc_hdr_realloc(arc_buf_hdr_t *hdr, kmem_cache_t *old, kmem_cache_t *new)
{
ASSERT(HDR_HAS_L2HDR(hdr));
arc_buf_hdr_t *nhdr;
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
ASSERT((old == hdr_full_cache && new == hdr_l2only_cache) ||
(old == hdr_l2only_cache && new == hdr_full_cache));
/*
* if the caller wanted a new full header and the header is to be
* encrypted we will actually allocate the header from the full crypt
* cache instead. The same applies to freeing from the old cache.
*/
if (HDR_PROTECTED(hdr) && new == hdr_full_cache)
new = hdr_full_crypt_cache;
if (HDR_PROTECTED(hdr) && old == hdr_full_cache)
old = hdr_full_crypt_cache;
nhdr = kmem_cache_alloc(new, KM_PUSHPAGE);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
buf_hash_remove(hdr);
bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
if (new == hdr_full_cache || new == hdr_full_crypt_cache) {
arc_hdr_set_flags(nhdr, ARC_FLAG_HAS_L1HDR);
/*
* arc_access and arc_change_state need to be aware that a
* header has just come out of L2ARC, so we set its state to
* l2c_only even though it's about to change.
*/
nhdr->b_l1hdr.b_state = arc_l2c_only;
/* Verify previous threads set to NULL before freeing */
ASSERT3P(nhdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
} else {
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(hdr->b_l1hdr.b_bufcnt);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
/*
* If we've reached here, We must have been called from
* arc_evict_hdr(), as such we should have already been
* removed from any ghost list we were previously on
* (which protects us from racing with arc_evict_state),
* thus no locking is needed during this check.
*/
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
/*
* A buffer must not be moved into the arc_l2c_only
* state if it's not finished being written out to the
* l2arc device. Otherwise, the b_l1hdr.b_pabd field
* might try to be accessed, even though it was removed.
*/
VERIFY(!HDR_L2_WRITING(hdr));
VERIFY3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
arc_hdr_clear_flags(nhdr, ARC_FLAG_HAS_L1HDR);
}
/*
* The header has been reallocated so we need to re-insert it into any
* lists it was on.
*/
(void) buf_hash_insert(nhdr, NULL);
ASSERT(list_link_active(&hdr->b_l2hdr.b_l2node));
mutex_enter(&dev->l2ad_mtx);
/*
* We must place the realloc'ed header back into the list at
* the same spot. Otherwise, if it's placed earlier in the list,
* l2arc_write_buffers() could find it during the function's
* write phase, and try to write it out to the l2arc.
*/
list_insert_after(&dev->l2ad_buflist, hdr, nhdr);
list_remove(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
/*
* Since we're using the pointer address as the tag when
* incrementing and decrementing the l2ad_alloc refcount, we
* must remove the old pointer (that we're about to destroy) and
* add the new pointer to the refcount. Otherwise we'd remove
* the wrong pointer address when calling arc_hdr_destroy() later.
*/
(void) refcount_remove_many(&dev->l2ad_alloc, arc_hdr_size(hdr), hdr);
(void) refcount_add_many(&dev->l2ad_alloc, arc_hdr_size(nhdr), nhdr);
buf_discard_identity(hdr);
kmem_cache_free(old, hdr);
return (nhdr);
}
/*
* This function allows an L1 header to be reallocated as a crypt
* header and vice versa. If we are going to a crypt header, the
* new fields will be zeroed out.
*/
static arc_buf_hdr_t *
arc_hdr_realloc_crypt(arc_buf_hdr_t *hdr, boolean_t need_crypt)
{
arc_buf_hdr_t *nhdr;
arc_buf_t *buf;
kmem_cache_t *ncache, *ocache;
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(!!HDR_PROTECTED(hdr), !=, need_crypt);
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
if (need_crypt) {
ncache = hdr_full_crypt_cache;
ocache = hdr_full_cache;
} else {
ncache = hdr_full_cache;
ocache = hdr_full_crypt_cache;
}
nhdr = kmem_cache_alloc(ncache, KM_PUSHPAGE);
bcopy(hdr, nhdr, HDR_L2ONLY_SIZE);
nhdr->b_l1hdr.b_freeze_cksum = hdr->b_l1hdr.b_freeze_cksum;
nhdr->b_l1hdr.b_bufcnt = hdr->b_l1hdr.b_bufcnt;
nhdr->b_l1hdr.b_byteswap = hdr->b_l1hdr.b_byteswap;
nhdr->b_l1hdr.b_state = hdr->b_l1hdr.b_state;
nhdr->b_l1hdr.b_arc_access = hdr->b_l1hdr.b_arc_access;
nhdr->b_l1hdr.b_mru_hits = hdr->b_l1hdr.b_mru_hits;
nhdr->b_l1hdr.b_mru_ghost_hits = hdr->b_l1hdr.b_mru_ghost_hits;
nhdr->b_l1hdr.b_mfu_hits = hdr->b_l1hdr.b_mfu_hits;
nhdr->b_l1hdr.b_mfu_ghost_hits = hdr->b_l1hdr.b_mfu_ghost_hits;
nhdr->b_l1hdr.b_l2_hits = hdr->b_l1hdr.b_l2_hits;
nhdr->b_l1hdr.b_acb = hdr->b_l1hdr.b_acb;
nhdr->b_l1hdr.b_pabd = hdr->b_l1hdr.b_pabd;
nhdr->b_l1hdr.b_buf = hdr->b_l1hdr.b_buf;
/*
* This refcount_add() exists only to ensure that the individual
* arc buffers always point to a header that is referenced, avoiding
* a small race condition that could trigger ASSERTs.
*/
(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, FTAG);
for (buf = nhdr->b_l1hdr.b_buf; buf != NULL; buf = buf->b_next) {
mutex_enter(&buf->b_evict_lock);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
}
refcount_transfer(&nhdr->b_l1hdr.b_refcnt, &hdr->b_l1hdr.b_refcnt);
(void) refcount_remove(&nhdr->b_l1hdr.b_refcnt, FTAG);
if (need_crypt) {
arc_hdr_set_flags(nhdr, ARC_FLAG_PROTECTED);
} else {
arc_hdr_clear_flags(nhdr, ARC_FLAG_PROTECTED);
}
buf_discard_identity(hdr);
kmem_cache_free(ocache, hdr);
return (nhdr);
}
/*
* This function is used by the send / receive code to convert a newly
* allocated arc_buf_t to one that is suitable for a raw encrypted write. It
* is also used to allow the root objset block to be uupdated without altering
* its embedded MACs. Both block types will always be uncompressed so we do not
* have to worry about compression type or psize.
*/
void
arc_convert_to_raw(arc_buf_t *buf, uint64_t dsobj, boolean_t byteorder,
dmu_object_type_t ot, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT(ot == DMU_OT_DNODE || ot == DMU_OT_OBJSET);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
buf->b_flags |= (ARC_BUF_FLAG_COMPRESSED | ARC_BUF_FLAG_ENCRYPTED);
if (!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, B_TRUE);
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
if (salt != NULL)
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
if (iv != NULL)
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
if (mac != NULL)
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
}
/*
* Allocate a new arc_buf_hdr_t and arc_buf_t and return the buf to the caller.
* The buf is returned thawed since we expect the consumer to modify it.
*/
arc_buf_t *
arc_alloc_buf(spa_t *spa, void *tag, arc_buf_contents_t type, int32_t size)
{
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), size, size,
B_FALSE, ZIO_COMPRESS_OFF, type, B_FALSE);
ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, 0, tag, B_FALSE, B_FALSE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
return (buf);
}
/*
* Allocate a compressed buf in the same manner as arc_alloc_buf. Don't use this
* for bufs containing metadata.
*/
arc_buf_t *
arc_alloc_compressed_buf(spa_t *spa, void *tag, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type)
{
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
arc_buf_hdr_t *hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
B_FALSE, compression_type, ARC_BUFC_DATA, B_FALSE);
ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
arc_buf_t *buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, 0, tag, B_FALSE,
B_TRUE, B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
if (!arc_buf_is_shared(buf)) {
/*
* To ensure that the hdr has the correct data in it if we call
* arc_untransform() on this buf before it's been written to
* disk, it's easiest if we just set up sharing between the
* buf and the hdr.
*/
ASSERT(!abd_is_linear(hdr->b_l1hdr.b_pabd));
arc_hdr_free_abd(hdr, B_FALSE);
arc_share_buf(hdr, buf);
}
return (buf);
}
arc_buf_t *
arc_alloc_raw_buf(spa_t *spa, void *tag, uint64_t dsobj, boolean_t byteorder,
const uint8_t *salt, const uint8_t *iv, const uint8_t *mac,
dmu_object_type_t ot, uint64_t psize, uint64_t lsize,
enum zio_compress compression_type)
{
arc_buf_hdr_t *hdr;
arc_buf_t *buf;
arc_buf_contents_t type = DMU_OT_IS_METADATA(ot) ?
ARC_BUFC_METADATA : ARC_BUFC_DATA;
ASSERT3U(lsize, >, 0);
ASSERT3U(lsize, >=, psize);
ASSERT3U(compression_type, >=, ZIO_COMPRESS_OFF);
ASSERT3U(compression_type, <, ZIO_COMPRESS_FUNCTIONS);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize, B_TRUE,
compression_type, type, B_TRUE);
ASSERT(!MUTEX_HELD(HDR_LOCK(hdr)));
hdr->b_crypt_hdr.b_dsobj = dsobj;
hdr->b_crypt_hdr.b_ot = ot;
hdr->b_l1hdr.b_byteswap = (byteorder == ZFS_HOST_BYTEORDER) ?
DMU_BSWAP_NUMFUNCS : DMU_OT_BYTESWAP(ot);
bcopy(salt, hdr->b_crypt_hdr.b_salt, ZIO_DATA_SALT_LEN);
bcopy(iv, hdr->b_crypt_hdr.b_iv, ZIO_DATA_IV_LEN);
bcopy(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN);
/*
* This buffer will be considered encrypted even if the ot is not an
* encrypted type. It will become authenticated instead in
* arc_write_ready().
*/
buf = NULL;
VERIFY0(arc_buf_alloc_impl(hdr, spa, dsobj, tag, B_TRUE, B_TRUE,
B_FALSE, B_FALSE, &buf));
arc_buf_thaw(buf);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
return (buf);
}
static void
arc_hdr_l2hdr_destroy(arc_buf_hdr_t *hdr)
{
l2arc_buf_hdr_t *l2hdr = &hdr->b_l2hdr;
l2arc_dev_t *dev = l2hdr->b_dev;
uint64_t psize = arc_hdr_size(hdr);
ASSERT(MUTEX_HELD(&dev->l2ad_mtx));
ASSERT(HDR_HAS_L2HDR(hdr));
list_remove(&dev->l2ad_buflist, hdr);
ARCSTAT_INCR(arcstat_l2_psize, -psize);
ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
vdev_space_update(dev->l2ad_vdev, -psize, 0, 0);
(void) refcount_remove_many(&dev->l2ad_alloc, psize, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
}
static void
arc_hdr_destroy(arc_buf_hdr_t *hdr)
{
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_buf == NULL ||
hdr->b_l1hdr.b_bufcnt > 0);
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
}
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
if (!HDR_EMPTY(hdr))
buf_discard_identity(hdr);
if (HDR_HAS_L2HDR(hdr)) {
l2arc_dev_t *dev = hdr->b_l2hdr.b_dev;
boolean_t buflist_held = MUTEX_HELD(&dev->l2ad_mtx);
if (!buflist_held)
mutex_enter(&dev->l2ad_mtx);
/*
* Even though we checked this conditional above, we
* need to check this again now that we have the
* l2ad_mtx. This is because we could be racing with
* another thread calling l2arc_evict() which might have
* destroyed this header's L2 portion as we were waiting
* to acquire the l2ad_mtx. If that happens, we don't
* want to re-destroy the header's L2 portion.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
if (!buflist_held)
mutex_exit(&dev->l2ad_mtx);
}
if (HDR_HAS_L1HDR(hdr)) {
arc_cksum_free(hdr);
while (hdr->b_l1hdr.b_buf != NULL)
arc_buf_destroy_impl(hdr->b_l1hdr.b_buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
arc_hdr_free_abd(hdr, B_FALSE);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_hash_next, ==, NULL);
if (HDR_HAS_L1HDR(hdr)) {
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (!HDR_PROTECTED(hdr)) {
kmem_cache_free(hdr_full_cache, hdr);
} else {
kmem_cache_free(hdr_full_crypt_cache, hdr);
}
} else {
kmem_cache_free(hdr_l2only_cache, hdr);
}
}
void
arc_buf_destroy(arc_buf_t *buf, void* tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
kmutex_t *hash_lock = HDR_LOCK(hdr);
if (hdr->b_l1hdr.b_state == arc_anon) {
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
VERIFY0(remove_reference(hdr, NULL, tag));
arc_hdr_destroy(hdr);
return;
}
mutex_enter(hash_lock);
ASSERT3P(hdr, ==, buf->b_hdr);
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(hdr->b_l1hdr.b_state, !=, arc_anon);
ASSERT3P(buf->b_data, !=, NULL);
(void) remove_reference(hdr, hash_lock, tag);
arc_buf_destroy_impl(buf);
mutex_exit(hash_lock);
}
/*
* Evict the arc_buf_hdr that is provided as a parameter. The resultant
* state of the header is dependent on its state prior to entering this
* function. The following transitions are possible:
*
* - arc_mru -> arc_mru_ghost
* - arc_mfu -> arc_mfu_ghost
* - arc_mru_ghost -> arc_l2c_only
* - arc_mru_ghost -> deleted
* - arc_mfu_ghost -> arc_l2c_only
* - arc_mfu_ghost -> deleted
*/
static int64_t
arc_evict_hdr(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
arc_state_t *evicted_state, *state;
int64_t bytes_evicted = 0;
int min_lifetime = HDR_PRESCIENT_PREFETCH(hdr) ?
arc_min_prescient_prefetch_ms : arc_min_prefetch_ms;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
state = hdr->b_l1hdr.b_state;
if (GHOST_STATE(state)) {
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
/*
* l2arc_write_buffers() relies on a header's L1 portion
* (i.e. its b_pabd field) during it's write phase.
* Thus, we cannot push a header onto the arc_l2c_only
* state (removing its L1 piece) until the header is
* done being written to the l2arc.
*/
if (HDR_HAS_L2HDR(hdr) && HDR_L2_WRITING(hdr)) {
ARCSTAT_BUMP(arcstat_evict_l2_skip);
return (bytes_evicted);
}
ARCSTAT_BUMP(arcstat_deleted);
bytes_evicted += HDR_GET_LSIZE(hdr);
DTRACE_PROBE1(arc__delete, arc_buf_hdr_t *, hdr);
if (HDR_HAS_L2HDR(hdr)) {
ASSERT(hdr->b_l1hdr.b_pabd == NULL);
ASSERT(!HDR_HAS_RABD(hdr));
/*
* This buffer is cached on the 2nd Level ARC;
* don't destroy the header.
*/
arc_change_state(arc_l2c_only, hdr, hash_lock);
/*
* dropping from L1+L2 cached to L2-only,
* realloc to remove the L1 header.
*/
hdr = arc_hdr_realloc(hdr, hdr_full_cache,
hdr_l2only_cache);
} else {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
}
return (bytes_evicted);
}
ASSERT(state == arc_mru || state == arc_mfu);
evicted_state = (state == arc_mru) ? arc_mru_ghost : arc_mfu_ghost;
/* prefetch buffers have a minimum lifespan */
if (HDR_IO_IN_PROGRESS(hdr) ||
((hdr->b_flags & (ARC_FLAG_PREFETCH | ARC_FLAG_INDIRECT)) &&
ddi_get_lbolt() - hdr->b_l1hdr.b_arc_access <
MSEC_TO_TICK(min_lifetime))) {
ARCSTAT_BUMP(arcstat_evict_skip);
return (bytes_evicted);
}
ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
while (hdr->b_l1hdr.b_buf) {
arc_buf_t *buf = hdr->b_l1hdr.b_buf;
if (!mutex_tryenter(&buf->b_evict_lock)) {
ARCSTAT_BUMP(arcstat_mutex_miss);
break;
}
if (buf->b_data != NULL)
bytes_evicted += HDR_GET_LSIZE(hdr);
mutex_exit(&buf->b_evict_lock);
arc_buf_destroy_impl(buf);
}
if (HDR_HAS_L2HDR(hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_cached, HDR_GET_LSIZE(hdr));
} else {
if (l2arc_write_eligible(hdr->b_spa, hdr)) {
ARCSTAT_INCR(arcstat_evict_l2_eligible,
HDR_GET_LSIZE(hdr));
} else {
ARCSTAT_INCR(arcstat_evict_l2_ineligible,
HDR_GET_LSIZE(hdr));
}
}
if (hdr->b_l1hdr.b_bufcnt == 0) {
arc_cksum_free(hdr);
bytes_evicted += arc_hdr_size(hdr);
/*
* If this hdr is being evicted and has a compressed
* buffer then we discard it here before we change states.
* This ensures that the accounting is updated correctly
* in arc_free_data_impl().
*/
if (hdr->b_l1hdr.b_pabd != NULL)
arc_hdr_free_abd(hdr, B_FALSE);
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
arc_change_state(evicted_state, hdr, hash_lock);
ASSERT(HDR_IN_HASH_TABLE(hdr));
arc_hdr_set_flags(hdr, ARC_FLAG_IN_HASH_TABLE);
DTRACE_PROBE1(arc__evict, arc_buf_hdr_t *, hdr);
}
return (bytes_evicted);
}
static uint64_t
arc_evict_state_impl(multilist_t *ml, int idx, arc_buf_hdr_t *marker,
uint64_t spa, int64_t bytes)
{
multilist_sublist_t *mls;
uint64_t bytes_evicted = 0;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
int evict_count = 0;
ASSERT3P(marker, !=, NULL);
IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
mls = multilist_sublist_lock(ml, idx);
for (hdr = multilist_sublist_prev(mls, marker); hdr != NULL;
hdr = multilist_sublist_prev(mls, marker)) {
if ((bytes != ARC_EVICT_ALL && bytes_evicted >= bytes) ||
(evict_count >= zfs_arc_evict_batch_limit))
break;
/*
* To keep our iteration location, move the marker
* forward. Since we're not holding hdr's hash lock, we
* must be very careful and not remove 'hdr' from the
* sublist. Otherwise, other consumers might mistake the
* 'hdr' as not being on a sublist when they call the
* multilist_link_active() function (they all rely on
* the hash lock protecting concurrent insertions and
* removals). multilist_sublist_move_forward() was
* specifically implemented to ensure this is the case
* (only 'marker' will be removed and re-inserted).
*/
multilist_sublist_move_forward(mls, marker);
/*
* The only case where the b_spa field should ever be
* zero, is the marker headers inserted by
* arc_evict_state(). It's possible for multiple threads
* to be calling arc_evict_state() concurrently (e.g.
* dsl_pool_close() and zio_inject_fault()), so we must
* skip any markers we see from these other threads.
*/
if (hdr->b_spa == 0)
continue;
/* we're only interested in evicting buffers of a certain spa */
if (spa != 0 && hdr->b_spa != spa) {
ARCSTAT_BUMP(arcstat_evict_skip);
continue;
}
hash_lock = HDR_LOCK(hdr);
/*
* We aren't calling this function from any code path
* that would already be holding a hash lock, so we're
* asserting on this assumption to be defensive in case
* this ever changes. Without this check, it would be
* possible to incorrectly increment arcstat_mutex_miss
* below (e.g. if the code changed such that we called
* this function with a hash lock held).
*/
ASSERT(!MUTEX_HELD(hash_lock));
if (mutex_tryenter(hash_lock)) {
uint64_t evicted = arc_evict_hdr(hdr, hash_lock);
mutex_exit(hash_lock);
bytes_evicted += evicted;
/*
* If evicted is zero, arc_evict_hdr() must have
* decided to skip this header, don't increment
* evict_count in this case.
*/
if (evicted != 0)
evict_count++;
/*
* If arc_size isn't overflowing, signal any
* threads that might happen to be waiting.
*
* For each header evicted, we wake up a single
* thread. If we used cv_broadcast, we could
* wake up "too many" threads causing arc_size
* to significantly overflow arc_c; since
* arc_get_data_impl() doesn't check for overflow
* when it's woken up (it doesn't because it's
* possible for the ARC to be overflowing while
* full of un-evictable buffers, and the
* function should proceed in this case).
*
* If threads are left sleeping, due to not
* using cv_broadcast, they will be woken up
* just before arc_reclaim_thread() sleeps.
*/
mutex_enter(&arc_reclaim_lock);
if (!arc_is_overflowing())
cv_signal(&arc_reclaim_waiters_cv);
mutex_exit(&arc_reclaim_lock);
} else {
ARCSTAT_BUMP(arcstat_mutex_miss);
}
}
multilist_sublist_unlock(mls);
return (bytes_evicted);
}
/*
* Evict buffers from the given arc state, until we've removed the
* specified number of bytes. Move the removed buffers to the
* appropriate evict state.
*
* This function makes a "best effort". It skips over any buffers
* it can't get a hash_lock on, and so, may not catch all candidates.
* It may also return without evicting as much space as requested.
*
* If bytes is specified using the special value ARC_EVICT_ALL, this
* will evict all available (i.e. unlocked and evictable) buffers from
* the given arc state; which is used by arc_flush().
*/
static uint64_t
arc_evict_state(arc_state_t *state, uint64_t spa, int64_t bytes,
arc_buf_contents_t type)
{
uint64_t total_evicted = 0;
multilist_t *ml = state->arcs_list[type];
int num_sublists;
arc_buf_hdr_t **markers;
IMPLY(bytes < 0, bytes == ARC_EVICT_ALL);
num_sublists = multilist_get_num_sublists(ml);
/*
* If we've tried to evict from each sublist, made some
* progress, but still have not hit the target number of bytes
* to evict, we want to keep trying. The markers allow us to
* pick up where we left off for each individual sublist, rather
* than starting from the tail each time.
*/
markers = kmem_zalloc(sizeof (*markers) * num_sublists, KM_SLEEP);
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls;
markers[i] = kmem_cache_alloc(hdr_full_cache, KM_SLEEP);
/*
* A b_spa of 0 is used to indicate that this header is
* a marker. This fact is used in arc_adjust_type() and
* arc_evict_state_impl().
*/
markers[i]->b_spa = 0;
mls = multilist_sublist_lock(ml, i);
multilist_sublist_insert_tail(mls, markers[i]);
multilist_sublist_unlock(mls);
}
/*
* While we haven't hit our target number of bytes to evict, or
* we're evicting all available buffers.
*/
while (total_evicted < bytes || bytes == ARC_EVICT_ALL) {
int sublist_idx = multilist_get_random_index(ml);
uint64_t scan_evicted = 0;
/*
* Try to reduce pinned dnodes with a floor of arc_dnode_limit.
* Request that 10% of the LRUs be scanned by the superblock
* shrinker.
*/
if (type == ARC_BUFC_DATA && arc_dnode_size > arc_dnode_limit)
arc_prune_async((arc_dnode_size - arc_dnode_limit) /
sizeof (dnode_t) / zfs_arc_dnode_reduce_percent);
/*
* Start eviction using a randomly selected sublist,
* this is to try and evenly balance eviction across all
* sublists. Always starting at the same sublist
* (e.g. index 0) would cause evictions to favor certain
* sublists over others.
*/
for (int i = 0; i < num_sublists; i++) {
uint64_t bytes_remaining;
uint64_t bytes_evicted;
if (bytes == ARC_EVICT_ALL)
bytes_remaining = ARC_EVICT_ALL;
else if (total_evicted < bytes)
bytes_remaining = bytes - total_evicted;
else
break;
bytes_evicted = arc_evict_state_impl(ml, sublist_idx,
markers[sublist_idx], spa, bytes_remaining);
scan_evicted += bytes_evicted;
total_evicted += bytes_evicted;
/* we've reached the end, wrap to the beginning */
if (++sublist_idx >= num_sublists)
sublist_idx = 0;
}
/*
* If we didn't evict anything during this scan, we have
* no reason to believe we'll evict more during another
* scan, so break the loop.
*/
if (scan_evicted == 0) {
/* This isn't possible, let's make that obvious */
ASSERT3S(bytes, !=, 0);
/*
* When bytes is ARC_EVICT_ALL, the only way to
* break the loop is when scan_evicted is zero.
* In that case, we actually have evicted enough,
* so we don't want to increment the kstat.
*/
if (bytes != ARC_EVICT_ALL) {
ASSERT3S(total_evicted, <, bytes);
ARCSTAT_BUMP(arcstat_evict_not_enough);
}
break;
}
}
for (int i = 0; i < num_sublists; i++) {
multilist_sublist_t *mls = multilist_sublist_lock(ml, i);
multilist_sublist_remove(mls, markers[i]);
multilist_sublist_unlock(mls);
kmem_cache_free(hdr_full_cache, markers[i]);
}
kmem_free(markers, sizeof (*markers) * num_sublists);
return (total_evicted);
}
/*
* Flush all "evictable" data of the given type from the arc state
* specified. This will not evict any "active" buffers (i.e. referenced).
*
* When 'retry' is set to B_FALSE, the function will make a single pass
* over the state and evict any buffers that it can. Since it doesn't
* continually retry the eviction, it might end up leaving some buffers
* in the ARC due to lock misses.
*
* When 'retry' is set to B_TRUE, the function will continually retry the
* eviction until *all* evictable buffers have been removed from the
* state. As a result, if concurrent insertions into the state are
* allowed (e.g. if the ARC isn't shutting down), this function might
* wind up in an infinite loop, continually trying to evict buffers.
*/
static uint64_t
arc_flush_state(arc_state_t *state, uint64_t spa, arc_buf_contents_t type,
boolean_t retry)
{
uint64_t evicted = 0;
while (refcount_count(&state->arcs_esize[type]) != 0) {
evicted += arc_evict_state(state, spa, ARC_EVICT_ALL, type);
if (!retry)
break;
}
return (evicted);
}
/*
* Helper function for arc_prune_async() it is responsible for safely
* handling the execution of a registered arc_prune_func_t.
*/
static void
arc_prune_task(void *ptr)
{
arc_prune_t *ap = (arc_prune_t *)ptr;
arc_prune_func_t *func = ap->p_pfunc;
if (func != NULL)
func(ap->p_adjust, ap->p_private);
refcount_remove(&ap->p_refcnt, func);
}
/*
* Notify registered consumers they must drop holds on a portion of the ARC
* buffered they reference. This provides a mechanism to ensure the ARC can
* honor the arc_meta_limit and reclaim otherwise pinned ARC buffers. This
* is analogous to dnlc_reduce_cache() but more generic.
*
* This operation is performed asynchronously so it may be safely called
* in the context of the arc_reclaim_thread(). A reference is taken here
* for each registered arc_prune_t and the arc_prune_task() is responsible
* for releasing it once the registered arc_prune_func_t has completed.
*/
static void
arc_prune_async(int64_t adjust)
{
arc_prune_t *ap;
mutex_enter(&arc_prune_mtx);
for (ap = list_head(&arc_prune_list); ap != NULL;
ap = list_next(&arc_prune_list, ap)) {
if (refcount_count(&ap->p_refcnt) >= 2)
continue;
refcount_add(&ap->p_refcnt, ap->p_pfunc);
ap->p_adjust = adjust;
if (taskq_dispatch(arc_prune_taskq, arc_prune_task,
ap, TQ_SLEEP) == TASKQID_INVALID) {
refcount_remove(&ap->p_refcnt, ap->p_pfunc);
continue;
}
ARCSTAT_BUMP(arcstat_prune);
}
mutex_exit(&arc_prune_mtx);
}
/*
* Evict the specified number of bytes from the state specified,
* restricting eviction to the spa and type given. This function
* prevents us from trying to evict more from a state's list than
* is "evictable", and to skip evicting altogether when passed a
* negative value for "bytes". In contrast, arc_evict_state() will
* evict everything it can, when passed a negative value for "bytes".
*/
static uint64_t
arc_adjust_impl(arc_state_t *state, uint64_t spa, int64_t bytes,
arc_buf_contents_t type)
{
int64_t delta;
if (bytes > 0 && refcount_count(&state->arcs_esize[type]) > 0) {
delta = MIN(refcount_count(&state->arcs_esize[type]), bytes);
return (arc_evict_state(state, spa, delta, type));
}
return (0);
}
/*
* The goal of this function is to evict enough meta data buffers from the
* ARC in order to enforce the arc_meta_limit. Achieving this is slightly
* more complicated than it appears because it is common for data buffers
* to have holds on meta data buffers. In addition, dnode meta data buffers
* will be held by the dnodes in the block preventing them from being freed.
* This means we can't simply traverse the ARC and expect to always find
* enough unheld meta data buffer to release.
*
* Therefore, this function has been updated to make alternating passes
* over the ARC releasing data buffers and then newly unheld meta data
* buffers. This ensures forward progress is maintained and arc_meta_used
* will decrease. Normally this is sufficient, but if required the ARC
* will call the registered prune callbacks causing dentry and inodes to
* be dropped from the VFS cache. This will make dnode meta data buffers
* available for reclaim.
*/
static uint64_t
arc_adjust_meta_balanced(void)
{
int64_t delta, prune = 0, adjustmnt;
uint64_t total_evicted = 0;
arc_buf_contents_t type = ARC_BUFC_DATA;
int restarts = MAX(zfs_arc_meta_adjust_restarts, 0);
restart:
/*
* This slightly differs than the way we evict from the mru in
* arc_adjust because we don't have a "target" value (i.e. no
* "meta" arc_p). As a result, I think we can completely
* cannibalize the metadata in the MRU before we evict the
* metadata from the MFU. I think we probably need to implement a
* "metadata arc_p" value to do this properly.
*/
adjustmnt = arc_meta_used - arc_meta_limit;
if (adjustmnt > 0 && refcount_count(&arc_mru->arcs_esize[type]) > 0) {
delta = MIN(refcount_count(&arc_mru->arcs_esize[type]),
adjustmnt);
total_evicted += arc_adjust_impl(arc_mru, 0, delta, type);
adjustmnt -= delta;
}
/*
* We can't afford to recalculate adjustmnt here. If we do,
* new metadata buffers can sneak into the MRU or ANON lists,
* thus penalize the MFU metadata. Although the fudge factor is
* small, it has been empirically shown to be significant for
* certain workloads (e.g. creating many empty directories). As
* such, we use the original calculation for adjustmnt, and
* simply decrement the amount of data evicted from the MRU.
*/
if (adjustmnt > 0 && refcount_count(&arc_mfu->arcs_esize[type]) > 0) {
delta = MIN(refcount_count(&arc_mfu->arcs_esize[type]),
adjustmnt);
total_evicted += arc_adjust_impl(arc_mfu, 0, delta, type);
}
adjustmnt = arc_meta_used - arc_meta_limit;
if (adjustmnt > 0 &&
refcount_count(&arc_mru_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
refcount_count(&arc_mru_ghost->arcs_esize[type]));
total_evicted += arc_adjust_impl(arc_mru_ghost, 0, delta, type);
adjustmnt -= delta;
}
if (adjustmnt > 0 &&
refcount_count(&arc_mfu_ghost->arcs_esize[type]) > 0) {
delta = MIN(adjustmnt,
refcount_count(&arc_mfu_ghost->arcs_esize[type]));
total_evicted += arc_adjust_impl(arc_mfu_ghost, 0, delta, type);
}
/*
* If after attempting to make the requested adjustment to the ARC
* the meta limit is still being exceeded then request that the
* higher layers drop some cached objects which have holds on ARC
* meta buffers. Requests to the upper layers will be made with
* increasingly large scan sizes until the ARC is below the limit.
*/
if (arc_meta_used > arc_meta_limit) {
if (type == ARC_BUFC_DATA) {
type = ARC_BUFC_METADATA;
} else {
type = ARC_BUFC_DATA;
if (zfs_arc_meta_prune) {
prune += zfs_arc_meta_prune;
arc_prune_async(prune);
}
}
if (restarts > 0) {
restarts--;
goto restart;
}
}
return (total_evicted);
}
/*
* Evict metadata buffers from the cache, such that arc_meta_used is
* capped by the arc_meta_limit tunable.
*/
static uint64_t
arc_adjust_meta_only(void)
{
uint64_t total_evicted = 0;
int64_t target;
/*
* If we're over the meta limit, we want to evict enough
* metadata to get back under the meta limit. We don't want to
* evict so much that we drop the MRU below arc_p, though. If
* we're over the meta limit more than we're over arc_p, we
* evict some from the MRU here, and some from the MFU below.
*/
target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
(int64_t)(refcount_count(&arc_anon->arcs_size) +
refcount_count(&arc_mru->arcs_size) - arc_p));
total_evicted += arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
/*
* Similar to the above, we want to evict enough bytes to get us
* below the meta limit, but not so much as to drop us below the
* space allotted to the MFU (which is defined as arc_c - arc_p).
*/
target = MIN((int64_t)(arc_meta_used - arc_meta_limit),
(int64_t)(refcount_count(&arc_mfu->arcs_size) - (arc_c - arc_p)));
total_evicted += arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
static uint64_t
arc_adjust_meta(void)
{
if (zfs_arc_meta_strategy == ARC_STRATEGY_META_ONLY)
return (arc_adjust_meta_only());
else
return (arc_adjust_meta_balanced());
}
/*
* Return the type of the oldest buffer in the given arc state
*
* This function will select a random sublist of type ARC_BUFC_DATA and
* a random sublist of type ARC_BUFC_METADATA. The tail of each sublist
* is compared, and the type which contains the "older" buffer will be
* returned.
*/
static arc_buf_contents_t
arc_adjust_type(arc_state_t *state)
{
multilist_t *data_ml = state->arcs_list[ARC_BUFC_DATA];
multilist_t *meta_ml = state->arcs_list[ARC_BUFC_METADATA];
int data_idx = multilist_get_random_index(data_ml);
int meta_idx = multilist_get_random_index(meta_ml);
multilist_sublist_t *data_mls;
multilist_sublist_t *meta_mls;
arc_buf_contents_t type;
arc_buf_hdr_t *data_hdr;
arc_buf_hdr_t *meta_hdr;
/*
* We keep the sublist lock until we're finished, to prevent
* the headers from being destroyed via arc_evict_state().
*/
data_mls = multilist_sublist_lock(data_ml, data_idx);
meta_mls = multilist_sublist_lock(meta_ml, meta_idx);
/*
* These two loops are to ensure we skip any markers that
* might be at the tail of the lists due to arc_evict_state().
*/
for (data_hdr = multilist_sublist_tail(data_mls); data_hdr != NULL;
data_hdr = multilist_sublist_prev(data_mls, data_hdr)) {
if (data_hdr->b_spa != 0)
break;
}
for (meta_hdr = multilist_sublist_tail(meta_mls); meta_hdr != NULL;
meta_hdr = multilist_sublist_prev(meta_mls, meta_hdr)) {
if (meta_hdr->b_spa != 0)
break;
}
if (data_hdr == NULL && meta_hdr == NULL) {
type = ARC_BUFC_DATA;
} else if (data_hdr == NULL) {
ASSERT3P(meta_hdr, !=, NULL);
type = ARC_BUFC_METADATA;
} else if (meta_hdr == NULL) {
ASSERT3P(data_hdr, !=, NULL);
type = ARC_BUFC_DATA;
} else {
ASSERT3P(data_hdr, !=, NULL);
ASSERT3P(meta_hdr, !=, NULL);
/* The headers can't be on the sublist without an L1 header */
ASSERT(HDR_HAS_L1HDR(data_hdr));
ASSERT(HDR_HAS_L1HDR(meta_hdr));
if (data_hdr->b_l1hdr.b_arc_access <
meta_hdr->b_l1hdr.b_arc_access) {
type = ARC_BUFC_DATA;
} else {
type = ARC_BUFC_METADATA;
}
}
multilist_sublist_unlock(meta_mls);
multilist_sublist_unlock(data_mls);
return (type);
}
/*
* Evict buffers from the cache, such that arc_size is capped by arc_c.
*/
static uint64_t
arc_adjust(void)
{
uint64_t total_evicted = 0;
uint64_t bytes;
int64_t target;
/*
* If we're over arc_meta_limit, we want to correct that before
* potentially evicting data buffers below.
*/
total_evicted += arc_adjust_meta();
/*
* Adjust MRU size
*
* If we're over the target cache size, we want to evict enough
* from the list to get back to our target size. We don't want
* to evict too much from the MRU, such that it drops below
* arc_p. So, if we're over our target cache size more than
* the MRU is over arc_p, we'll evict enough to get back to
* arc_p here, and then evict more from the MFU below.
*/
target = MIN((int64_t)(arc_size - arc_c),
(int64_t)(refcount_count(&arc_anon->arcs_size) +
refcount_count(&arc_mru->arcs_size) + arc_meta_used - arc_p));
/*
* If we're below arc_meta_min, always prefer to evict data.
* Otherwise, try to satisfy the requested number of bytes to
* evict from the type which contains older buffers; in an
* effort to keep newer buffers in the cache regardless of their
* type. If we cannot satisfy the number of bytes from this
* type, spill over into the next type.
*/
if (arc_adjust_type(arc_mru) == ARC_BUFC_METADATA &&
arc_meta_used > arc_meta_min) {
bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from metadata.
*/
target -= bytes;
total_evicted +=
arc_adjust_impl(arc_mru, 0, target, ARC_BUFC_METADATA);
}
/*
* Adjust MFU size
*
* Now that we've tried to evict enough from the MRU to get its
* size back to arc_p, if we're still above the target cache
* size, we evict the rest from the MFU.
*/
target = arc_size - arc_c;
if (arc_adjust_type(arc_mfu) == ARC_BUFC_METADATA &&
arc_meta_used > arc_meta_min) {
bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* metadata, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
} else {
bytes = arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
/*
* If we couldn't evict our target number of bytes from
* data, we try to get the rest from data.
*/
target -= bytes;
total_evicted +=
arc_adjust_impl(arc_mfu, 0, target, ARC_BUFC_METADATA);
}
/*
* Adjust ghost lists
*
* In addition to the above, the ARC also defines target values
* for the ghost lists. The sum of the mru list and mru ghost
* list should never exceed the target size of the cache, and
* the sum of the mru list, mfu list, mru ghost list, and mfu
* ghost list should never exceed twice the target size of the
* cache. The following logic enforces these limits on the ghost
* caches, and evicts from them as needed.
*/
target = refcount_count(&arc_mru->arcs_size) +
refcount_count(&arc_mru_ghost->arcs_size) - arc_c;
bytes = arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_adjust_impl(arc_mru_ghost, 0, target, ARC_BUFC_METADATA);
/*
* We assume the sum of the mru list and mfu list is less than
* or equal to arc_c (we enforced this above), which means we
* can use the simpler of the two equations below:
*
* mru + mfu + mru ghost + mfu ghost <= 2 * arc_c
* mru ghost + mfu ghost <= arc_c
*/
target = refcount_count(&arc_mru_ghost->arcs_size) +
refcount_count(&arc_mfu_ghost->arcs_size) - arc_c;
bytes = arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_DATA);
total_evicted += bytes;
target -= bytes;
total_evicted +=
arc_adjust_impl(arc_mfu_ghost, 0, target, ARC_BUFC_METADATA);
return (total_evicted);
}
void
arc_flush(spa_t *spa, boolean_t retry)
{
uint64_t guid = 0;
/*
* If retry is B_TRUE, a spa must not be specified since we have
* no good way to determine if all of a spa's buffers have been
* evicted from an arc state.
*/
ASSERT(!retry || spa == 0);
if (spa != NULL)
guid = spa_load_guid(spa);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mru_ghost, guid, ARC_BUFC_METADATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_DATA, retry);
(void) arc_flush_state(arc_mfu_ghost, guid, ARC_BUFC_METADATA, retry);
}
void
arc_shrink(int64_t to_free)
{
uint64_t c = arc_c;
if (c > to_free && c - to_free > arc_c_min) {
arc_c = c - to_free;
atomic_add_64(&arc_p, -(arc_p >> arc_shrink_shift));
if (arc_c > arc_size)
arc_c = MAX(arc_size, arc_c_min);
if (arc_p > arc_c)
arc_p = (arc_c >> 1);
ASSERT(arc_c >= arc_c_min);
ASSERT((int64_t)arc_p >= 0);
} else {
arc_c = arc_c_min;
}
if (arc_size > arc_c)
(void) arc_adjust();
}
/*
* Return maximum amount of memory that we could possibly use. Reduced
* to half of all memory in user space which is primarily used for testing.
*/
static uint64_t
arc_all_memory(void)
{
#ifdef _KERNEL
#ifdef CONFIG_HIGHMEM
return (ptob(totalram_pages - totalhigh_pages));
#else
return (ptob(totalram_pages));
#endif /* CONFIG_HIGHMEM */
#else
return (ptob(physmem) / 2);
#endif /* _KERNEL */
}
/*
* Return the amount of memory that is considered free. In user space
* which is primarily used for testing we pretend that free memory ranges
* from 0-20% of all memory.
*/
static uint64_t
arc_free_memory(void)
{
#ifdef _KERNEL
#ifdef CONFIG_HIGHMEM
struct sysinfo si;
si_meminfo(&si);
return (ptob(si.freeram - si.freehigh));
#else
return (ptob(nr_free_pages() +
nr_inactive_file_pages() +
nr_inactive_anon_pages() +
nr_slab_reclaimable_pages()));
#endif /* CONFIG_HIGHMEM */
#else
return (spa_get_random(arc_all_memory() * 20 / 100));
#endif /* _KERNEL */
}
typedef enum free_memory_reason_t {
FMR_UNKNOWN,
FMR_NEEDFREE,
FMR_LOTSFREE,
FMR_SWAPFS_MINFREE,
FMR_PAGES_PP_MAXIMUM,
FMR_HEAP_ARENA,
FMR_ZIO_ARENA,
} free_memory_reason_t;
int64_t last_free_memory;
free_memory_reason_t last_free_reason;
#ifdef _KERNEL
/*
* Additional reserve of pages for pp_reserve.
*/
int64_t arc_pages_pp_reserve = 64;
/*
* Additional reserve of pages for swapfs.
*/
int64_t arc_swapfs_reserve = 64;
#endif /* _KERNEL */
/*
* Return the amount of memory that can be consumed before reclaim will be
* needed. Positive if there is sufficient free memory, negative indicates
* the amount of memory that needs to be freed up.
*/
static int64_t
arc_available_memory(void)
{
int64_t lowest = INT64_MAX;
free_memory_reason_t r = FMR_UNKNOWN;
#ifdef _KERNEL
int64_t n;
#ifdef __linux__
#ifdef freemem
#undef freemem
#endif
pgcnt_t needfree = btop(arc_need_free);
pgcnt_t lotsfree = btop(arc_sys_free);
pgcnt_t desfree = 0;
pgcnt_t freemem = btop(arc_free_memory());
#endif
if (needfree > 0) {
n = PAGESIZE * (-needfree);
if (n < lowest) {
lowest = n;
r = FMR_NEEDFREE;
}
}
/*
* check that we're out of range of the pageout scanner. It starts to
* schedule paging if freemem is less than lotsfree and needfree.
* lotsfree is the high-water mark for pageout, and needfree is the
* number of needed free pages. We add extra pages here to make sure
* the scanner doesn't start up while we're freeing memory.
*/
n = PAGESIZE * (freemem - lotsfree - needfree - desfree);
if (n < lowest) {
lowest = n;
r = FMR_LOTSFREE;
}
#ifndef __linux__
/*
* check to make sure that swapfs has enough space so that anon
* reservations can still succeed. anon_resvmem() checks that the
* availrmem is greater than swapfs_minfree, and the number of reserved
* swap pages. We also add a bit of extra here just to prevent
* circumstances from getting really dire.
*/
n = PAGESIZE * (availrmem - swapfs_minfree - swapfs_reserve -
desfree - arc_swapfs_reserve);
if (n < lowest) {
lowest = n;
r = FMR_SWAPFS_MINFREE;
}
/*
* Check that we have enough availrmem that memory locking (e.g., via
* mlock(3C) or memcntl(2)) can still succeed. (pages_pp_maximum
* stores the number of pages that cannot be locked; when availrmem
* drops below pages_pp_maximum, page locking mechanisms such as
* page_pp_lock() will fail.)
*/
n = PAGESIZE * (availrmem - pages_pp_maximum -
arc_pages_pp_reserve);
if (n < lowest) {
lowest = n;
r = FMR_PAGES_PP_MAXIMUM;
}
#endif
#if defined(_ILP32)
/*
* If we're on a 32-bit platform, it's possible that we'll exhaust the
* kernel heap space before we ever run out of available physical
* memory. Most checks of the size of the heap_area compare against
* tune.t_minarmem, which is the minimum available real memory that we
* can have in the system. However, this is generally fixed at 25 pages
* which is so low that it's useless. In this comparison, we seek to
* calculate the total heap-size, and reclaim if more than 3/4ths of the
* heap is allocated. (Or, in the calculation, if less than 1/4th is
* free)
*/
n = vmem_size(heap_arena, VMEM_FREE) -
(vmem_size(heap_arena, VMEM_FREE | VMEM_ALLOC) >> 2);
if (n < lowest) {
lowest = n;
r = FMR_HEAP_ARENA;
}
#endif
/*
* If zio data pages are being allocated out of a separate heap segment,
* then enforce that the size of available vmem for this arena remains
* above about 1/4th (1/(2^arc_zio_arena_free_shift)) free.
*
* Note that reducing the arc_zio_arena_free_shift keeps more virtual
* memory (in the zio_arena) free, which can avoid memory
* fragmentation issues.
*/
if (zio_arena != NULL) {
n = (int64_t)vmem_size(zio_arena, VMEM_FREE) -
(vmem_size(zio_arena, VMEM_ALLOC) >>
arc_zio_arena_free_shift);
if (n < lowest) {
lowest = n;
r = FMR_ZIO_ARENA;
}
}
#else /* _KERNEL */
/* Every 100 calls, free a small amount */
if (spa_get_random(100) == 0)
lowest = -1024;
#endif /* _KERNEL */
last_free_memory = lowest;
last_free_reason = r;
return (lowest);
}
/*
* Determine if the system is under memory pressure and is asking
* to reclaim memory. A return value of B_TRUE indicates that the system
* is under memory pressure and that the arc should adjust accordingly.
*/
static boolean_t
arc_reclaim_needed(void)
{
return (arc_available_memory() < 0);
}
static void
arc_kmem_reap_now(void)
{
size_t i;
kmem_cache_t *prev_cache = NULL;
kmem_cache_t *prev_data_cache = NULL;
extern kmem_cache_t *zio_buf_cache[];
extern kmem_cache_t *zio_data_buf_cache[];
extern kmem_cache_t *range_seg_cache;
#ifdef _KERNEL
if ((arc_meta_used >= arc_meta_limit) && zfs_arc_meta_prune) {
/*
* We are exceeding our meta-data cache limit.
* Prune some entries to release holds on meta-data.
*/
arc_prune_async(zfs_arc_meta_prune);
}
#if defined(_ILP32)
/*
* Reclaim unused memory from all kmem caches.
*/
kmem_reap();
#endif
#endif
for (i = 0; i < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; i++) {
#if defined(_ILP32)
/* reach upper limit of cache size on 32-bit */
if (zio_buf_cache[i] == NULL)
break;
#endif
if (zio_buf_cache[i] != prev_cache) {
prev_cache = zio_buf_cache[i];
kmem_cache_reap_now(zio_buf_cache[i]);
}
if (zio_data_buf_cache[i] != prev_data_cache) {
prev_data_cache = zio_data_buf_cache[i];
kmem_cache_reap_now(zio_data_buf_cache[i]);
}
}
kmem_cache_reap_now(buf_cache);
kmem_cache_reap_now(hdr_full_cache);
kmem_cache_reap_now(hdr_l2only_cache);
kmem_cache_reap_now(range_seg_cache);
if (zio_arena != NULL) {
/*
* Ask the vmem arena to reclaim unused memory from its
* quantum caches.
*/
vmem_qcache_reap(zio_arena);
}
}
/*
* Threads can block in arc_get_data_impl() waiting for this thread to evict
* enough data and signal them to proceed. When this happens, the threads in
* arc_get_data_impl() are sleeping while holding the hash lock for their
* particular arc header. Thus, we must be careful to never sleep on a
* hash lock in this thread. This is to prevent the following deadlock:
*
* - Thread A sleeps on CV in arc_get_data_impl() holding hash lock "L",
* waiting for the reclaim thread to signal it.
*
* - arc_reclaim_thread() tries to acquire hash lock "L" using mutex_enter,
* fails, and goes to sleep forever.
*
* This possible deadlock is avoided by always acquiring a hash lock
* using mutex_tryenter() from arc_reclaim_thread().
*/
/* ARGSUSED */
static void
arc_reclaim_thread(void *unused)
{
fstrans_cookie_t cookie = spl_fstrans_mark();
hrtime_t growtime = 0;
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &arc_reclaim_lock, callb_generic_cpr, FTAG);
mutex_enter(&arc_reclaim_lock);
while (!arc_reclaim_thread_exit) {
uint64_t evicted = 0;
uint64_t need_free = arc_need_free;
arc_tuning_update();
/*
* This is necessary in order for the mdb ::arc dcmd to
* show up to date information. Since the ::arc command
* does not call the kstat's update function, without
* this call, the command may show stale stats for the
* anon, mru, mru_ghost, mfu, and mfu_ghost lists. Even
* with this change, the data might be up to 1 second
* out of date; but that should suffice. The arc_state_t
* structures can be queried directly if more accurate
* information is needed.
*/
#ifndef __linux__
if (arc_ksp != NULL)
arc_ksp->ks_update(arc_ksp, KSTAT_READ);
#endif
mutex_exit(&arc_reclaim_lock);
/*
* We call arc_adjust() before (possibly) calling
* arc_kmem_reap_now(), so that we can wake up
* arc_get_data_buf() sooner.
*/
evicted = arc_adjust();
int64_t free_memory = arc_available_memory();
if (free_memory < 0) {
arc_no_grow = B_TRUE;
arc_warm = B_TRUE;
/*
* Wait at least zfs_grow_retry (default 5) seconds
* before considering growing.
*/
growtime = gethrtime() + SEC2NSEC(arc_grow_retry);
arc_kmem_reap_now();
/*
* If we are still low on memory, shrink the ARC
* so that we have arc_shrink_min free space.
*/
free_memory = arc_available_memory();
int64_t to_free =
(arc_c >> arc_shrink_shift) - free_memory;
if (to_free > 0) {
#ifdef _KERNEL
to_free = MAX(to_free, need_free);
#endif
arc_shrink(to_free);
}
} else if (free_memory < arc_c >> arc_no_grow_shift) {
arc_no_grow = B_TRUE;
} else if (gethrtime() >= growtime) {
arc_no_grow = B_FALSE;
}
mutex_enter(&arc_reclaim_lock);
/*
* If evicted is zero, we couldn't evict anything via
* arc_adjust(). This could be due to hash lock
* collisions, but more likely due to the majority of
* arc buffers being unevictable. Therefore, even if
* arc_size is above arc_c, another pass is unlikely to
* be helpful and could potentially cause us to enter an
* infinite loop.
*/
if (arc_size <= arc_c || evicted == 0) {
/*
* We're either no longer overflowing, or we
* can't evict anything more, so we should wake
* up any threads before we go to sleep and remove
* the bytes we were working on from arc_need_free
* since nothing more will be done here.
*/
cv_broadcast(&arc_reclaim_waiters_cv);
ARCSTAT_INCR(arcstat_need_free, -need_free);
/*
* Block until signaled, or after one second (we
* might need to perform arc_kmem_reap_now()
* even if we aren't being signalled)
*/
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_sig_hires(&arc_reclaim_thread_cv,
&arc_reclaim_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
CALLB_CPR_SAFE_END(&cpr, &arc_reclaim_lock);
}
}
arc_reclaim_thread_exit = B_FALSE;
cv_broadcast(&arc_reclaim_thread_cv);
CALLB_CPR_EXIT(&cpr); /* drops arc_reclaim_lock */
spl_fstrans_unmark(cookie);
thread_exit();
}
#ifdef _KERNEL
/*
* Determine the amount of memory eligible for eviction contained in the
* ARC. All clean data reported by the ghost lists can always be safely
* evicted. Due to arc_c_min, the same does not hold for all clean data
* contained by the regular mru and mfu lists.
*
* In the case of the regular mru and mfu lists, we need to report as
* much clean data as possible, such that evicting that same reported
* data will not bring arc_size below arc_c_min. Thus, in certain
* circumstances, the total amount of clean data in the mru and mfu
* lists might not actually be evictable.
*
* The following two distinct cases are accounted for:
*
* 1. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is greater than or equal to arc_c_min.
* (i.e. amount of dirty data >= arc_c_min)
*
* This is the easy case; all clean data contained by the mru and mfu
* lists is evictable. Evicting all clean data can only drop arc_size
* to the amount of dirty data, which is greater than arc_c_min.
*
* 2. The sum of the amount of dirty data contained by both the mru and
* mfu lists, plus the ARC's other accounting (e.g. the anon list),
* is less than arc_c_min.
* (i.e. arc_c_min > amount of dirty data)
*
* 2.1. arc_size is greater than or equal arc_c_min.
* (i.e. arc_size >= arc_c_min > amount of dirty data)
*
* In this case, not all clean data from the regular mru and mfu
* lists is actually evictable; we must leave enough clean data
* to keep arc_size above arc_c_min. Thus, the maximum amount of
* evictable data from the two lists combined, is exactly the
* difference between arc_size and arc_c_min.
*
* 2.2. arc_size is less than arc_c_min
* (i.e. arc_c_min > arc_size > amount of dirty data)
*
* In this case, none of the data contained in the mru and mfu
* lists is evictable, even if it's clean. Since arc_size is
* already below arc_c_min, evicting any more would only
* increase this negative difference.
*/
static uint64_t
arc_evictable_memory(void)
{
uint64_t arc_clean =
refcount_count(&arc_mru->arcs_esize[ARC_BUFC_DATA]) +
refcount_count(&arc_mru->arcs_esize[ARC_BUFC_METADATA]) +
refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_DATA]) +
refcount_count(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
uint64_t arc_dirty = MAX((int64_t)arc_size - (int64_t)arc_clean, 0);
/*
* Scale reported evictable memory in proportion to page cache, cap
* at specified min/max.
*/
uint64_t min = (ptob(nr_file_pages()) / 100) * zfs_arc_pc_percent;
min = MAX(arc_c_min, MIN(arc_c_max, min));
if (arc_dirty >= min)
return (arc_clean);
return (MAX((int64_t)arc_size - (int64_t)min, 0));
}
/*
* If sc->nr_to_scan is zero, the caller is requesting a query of the
* number of objects which can potentially be freed. If it is nonzero,
* the request is to free that many objects.
*
* Linux kernels >= 3.12 have the count_objects and scan_objects callbacks
* in struct shrinker and also require the shrinker to return the number
* of objects freed.
*
* Older kernels require the shrinker to return the number of freeable
* objects following the freeing of nr_to_free.
*/
static spl_shrinker_t
__arc_shrinker_func(struct shrinker *shrink, struct shrink_control *sc)
{
int64_t pages;
/* The arc is considered warm once reclaim has occurred */
if (unlikely(arc_warm == B_FALSE))
arc_warm = B_TRUE;
/* Return the potential number of reclaimable pages */
pages = btop((int64_t)arc_evictable_memory());
if (sc->nr_to_scan == 0)
return (pages);
/* Not allowed to perform filesystem reclaim */
if (!(sc->gfp_mask & __GFP_FS))
return (SHRINK_STOP);
/* Reclaim in progress */
if (mutex_tryenter(&arc_reclaim_lock) == 0) {
ARCSTAT_INCR(arcstat_need_free, ptob(sc->nr_to_scan));
return (0);
}
mutex_exit(&arc_reclaim_lock);
/*
* Evict the requested number of pages by shrinking arc_c the
* requested amount.
*/
if (pages > 0) {
arc_shrink(ptob(sc->nr_to_scan));
if (current_is_kswapd())
arc_kmem_reap_now();
#ifdef HAVE_SPLIT_SHRINKER_CALLBACK
pages = MAX((int64_t)pages -
(int64_t)btop(arc_evictable_memory()), 0);
#else
pages = btop(arc_evictable_memory());
#endif
/*
* We've shrunk what we can, wake up threads.
*/
cv_broadcast(&arc_reclaim_waiters_cv);
} else
pages = SHRINK_STOP;
/*
* When direct reclaim is observed it usually indicates a rapid
* increase in memory pressure. This occurs because the kswapd
* threads were unable to asynchronously keep enough free memory
* available. In this case set arc_no_grow to briefly pause arc
* growth to avoid compounding the memory pressure.
*/
if (current_is_kswapd()) {
ARCSTAT_BUMP(arcstat_memory_indirect_count);
} else {
arc_no_grow = B_TRUE;
arc_kmem_reap_now();
ARCSTAT_BUMP(arcstat_memory_direct_count);
}
return (pages);
}
SPL_SHRINKER_CALLBACK_WRAPPER(arc_shrinker_func);
SPL_SHRINKER_DECLARE(arc_shrinker, arc_shrinker_func, DEFAULT_SEEKS);
#endif /* _KERNEL */
/*
* Adapt arc info given the number of bytes we are trying to add and
* the state that we are coming from. This function is only called
* when we are adding new content to the cache.
*/
static void
arc_adapt(int bytes, arc_state_t *state)
{
int mult;
uint64_t arc_p_min = (arc_c >> arc_p_min_shift);
int64_t mrug_size = refcount_count(&arc_mru_ghost->arcs_size);
int64_t mfug_size = refcount_count(&arc_mfu_ghost->arcs_size);
if (state == arc_l2c_only)
return;
ASSERT(bytes > 0);
/*
* Adapt the target size of the MRU list:
* - if we just hit in the MRU ghost list, then increase
* the target size of the MRU list.
* - if we just hit in the MFU ghost list, then increase
* the target size of the MFU list by decreasing the
* target size of the MRU list.
*/
if (state == arc_mru_ghost) {
mult = (mrug_size >= mfug_size) ? 1 : (mfug_size / mrug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10); /* avoid wild arc_p adjustment */
arc_p = MIN(arc_c - arc_p_min, arc_p + bytes * mult);
} else if (state == arc_mfu_ghost) {
uint64_t delta;
mult = (mfug_size >= mrug_size) ? 1 : (mrug_size / mfug_size);
if (!zfs_arc_p_dampener_disable)
mult = MIN(mult, 10);
delta = MIN(bytes * mult, arc_p);
arc_p = MAX(arc_p_min, arc_p - delta);
}
ASSERT((int64_t)arc_p >= 0);
if (arc_reclaim_needed()) {
cv_signal(&arc_reclaim_thread_cv);
return;
}
if (arc_no_grow)
return;
if (arc_c >= arc_c_max)
return;
/*
* If we're within (2 * maxblocksize) bytes of the target
* cache size, increment the target cache size
*/
ASSERT3U(arc_c, >=, 2ULL << SPA_MAXBLOCKSHIFT);
if (arc_size >= arc_c - (2ULL << SPA_MAXBLOCKSHIFT)) {
atomic_add_64(&arc_c, (int64_t)bytes);
if (arc_c > arc_c_max)
arc_c = arc_c_max;
else if (state == arc_anon)
atomic_add_64(&arc_p, (int64_t)bytes);
if (arc_p > arc_c)
arc_p = arc_c;
}
ASSERT((int64_t)arc_p >= 0);
}
/*
* Check if arc_size has grown past our upper threshold, determined by
* zfs_arc_overflow_shift.
*/
static boolean_t
arc_is_overflowing(void)
{
/* Always allow at least one block of overflow */
uint64_t overflow = MAX(SPA_MAXBLOCKSIZE,
arc_c >> zfs_arc_overflow_shift);
return (arc_size >= arc_c + overflow);
}
static abd_t *
arc_get_data_abd(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
return (abd_alloc(size, B_TRUE));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (abd_alloc(size, B_FALSE));
}
}
static void *
arc_get_data_buf(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_get_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
return (zio_buf_alloc(size));
} else {
ASSERT(type == ARC_BUFC_DATA);
return (zio_data_buf_alloc(size));
}
}
/*
* Allocate a block and return it to the caller. If we are hitting the
* hard limit for the cache size, we must sleep, waiting for the eviction
* thread to catch up. If we're past the target size but below the hard
* limit, we'll only signal the reclaim thread and continue on.
*/
static void
arc_get_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
arc_adapt(size, state);
/*
* If arc_size is currently overflowing, and has grown past our
* upper limit, we must be adding data faster than the evict
* thread can evict. Thus, to ensure we don't compound the
* problem by adding more data and forcing arc_size to grow even
* further past it's target size, we halt and wait for the
* eviction thread to catch up.
*
* It's also possible that the reclaim thread is unable to evict
* enough buffers to get arc_size below the overflow limit (e.g.
* due to buffers being un-evictable, or hash lock collisions).
* In this case, we want to proceed regardless if we're
* overflowing; thus we don't use a while loop here.
*/
if (arc_is_overflowing()) {
mutex_enter(&arc_reclaim_lock);
/*
* Now that we've acquired the lock, we may no longer be
* over the overflow limit, lets check.
*
* We're ignoring the case of spurious wake ups. If that
* were to happen, it'd let this thread consume an ARC
* buffer before it should have (i.e. before we're under
* the overflow limit and were signalled by the reclaim
* thread). As long as that is a rare occurrence, it
* shouldn't cause any harm.
*/
if (arc_is_overflowing()) {
cv_signal(&arc_reclaim_thread_cv);
cv_wait(&arc_reclaim_waiters_cv, &arc_reclaim_lock);
}
mutex_exit(&arc_reclaim_lock);
}
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_consume(size, ARC_SPACE_META);
} else {
arc_space_consume(size, ARC_SPACE_DATA);
}
/*
* Update the state size. Note that ghost states have a
* "ghost size" and so don't need to be updated.
*/
if (!GHOST_STATE(state)) {
(void) refcount_add_many(&state->arcs_size, size, tag);
/*
* If this is reached via arc_read, the link is
* protected by the hash lock. If reached via
* arc_buf_alloc, the header should not be accessed by
* any other thread. And, if reached via arc_read_done,
* the hash lock will protect it if it's found in the
* hash table; otherwise no other thread should be
* trying to [add|remove]_reference it.
*/
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
(void) refcount_add_many(&state->arcs_esize[type],
size, tag);
}
/*
* If we are growing the cache, and we are adding anonymous
* data, and we have outgrown arc_p, update arc_p
*/
if (arc_size < arc_c && hdr->b_l1hdr.b_state == arc_anon &&
(refcount_count(&arc_anon->arcs_size) +
refcount_count(&arc_mru->arcs_size) > arc_p))
arc_p = MIN(arc_c, arc_p + size);
}
}
static void
arc_free_data_abd(arc_buf_hdr_t *hdr, abd_t *abd, uint64_t size, void *tag)
{
arc_free_data_impl(hdr, size, tag);
abd_free(abd);
}
static void
arc_free_data_buf(arc_buf_hdr_t *hdr, void *buf, uint64_t size, void *tag)
{
arc_buf_contents_t type = arc_buf_type(hdr);
arc_free_data_impl(hdr, size, tag);
if (type == ARC_BUFC_METADATA) {
zio_buf_free(buf, size);
} else {
ASSERT(type == ARC_BUFC_DATA);
zio_data_buf_free(buf, size);
}
}
/*
* Free the arc data buffer.
*/
static void
arc_free_data_impl(arc_buf_hdr_t *hdr, uint64_t size, void *tag)
{
arc_state_t *state = hdr->b_l1hdr.b_state;
arc_buf_contents_t type = arc_buf_type(hdr);
/* protected by hash lock, if in the hash table */
if (multilist_link_active(&hdr->b_l1hdr.b_arc_node)) {
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
ASSERT(state != arc_anon && state != arc_l2c_only);
(void) refcount_remove_many(&state->arcs_esize[type],
size, tag);
}
(void) refcount_remove_many(&state->arcs_size, size, tag);
VERIFY3U(hdr->b_type, ==, type);
if (type == ARC_BUFC_METADATA) {
arc_space_return(size, ARC_SPACE_META);
} else {
ASSERT(type == ARC_BUFC_DATA);
arc_space_return(size, ARC_SPACE_DATA);
}
}
/*
* This routine is called whenever a buffer is accessed.
* NOTE: the hash lock is dropped in this function.
*/
static void
arc_access(arc_buf_hdr_t *hdr, kmutex_t *hash_lock)
{
clock_t now;
ASSERT(MUTEX_HELD(hash_lock));
ASSERT(HDR_HAS_L1HDR(hdr));
if (hdr->b_l1hdr.b_state == arc_anon) {
/*
* This buffer is not in the cache, and does not
* appear in our "ghost" list. Add the new buffer
* to the MRU state.
*/
ASSERT0(hdr->b_l1hdr.b_arc_access);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mru, hdr, hash_lock);
} else if (hdr->b_l1hdr.b_state == arc_mru) {
now = ddi_get_lbolt();
/*
* If this buffer is here because of a prefetch, then either:
* - clear the flag if this is a "referencing" read
* (any subsequent access will bump this into the MFU state).
* or
* - move the buffer to the head of the list if this is
* another prefetch (to make it less likely to be evicted).
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
if (refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
/* link protected by hash lock */
ASSERT(multilist_link_active(
&hdr->b_l1hdr.b_arc_node));
} else {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
atomic_inc_32(&hdr->b_l1hdr.b_mru_hits);
ARCSTAT_BUMP(arcstat_mru_hits);
}
hdr->b_l1hdr.b_arc_access = now;
return;
}
/*
* This buffer has been "accessed" only once so far,
* but it is still in the cache. Move it to the MFU
* state.
*/
if (ddi_time_after(now, hdr->b_l1hdr.b_arc_access +
ARC_MINTIME)) {
/*
* More than 125ms have passed since we
* instantiated this buffer. Move it to the
* most frequently used state.
*/
hdr->b_l1hdr.b_arc_access = now;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
}
atomic_inc_32(&hdr->b_l1hdr.b_mru_hits);
ARCSTAT_BUMP(arcstat_mru_hits);
} else if (hdr->b_l1hdr.b_state == arc_mru_ghost) {
arc_state_t *new_state;
/*
* This buffer has been "accessed" recently, but
* was evicted from the cache. Move it to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
new_state = arc_mru;
if (refcount_count(&hdr->b_l1hdr.b_refcnt) > 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREFETCH |
ARC_FLAG_PRESCIENT_PREFETCH);
}
DTRACE_PROBE1(new_state__mru, arc_buf_hdr_t *, hdr);
} else {
new_state = arc_mfu;
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
arc_change_state(new_state, hdr, hash_lock);
atomic_inc_32(&hdr->b_l1hdr.b_mru_ghost_hits);
ARCSTAT_BUMP(arcstat_mru_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_mfu) {
/*
* This buffer has been accessed more than once and is
* still in the cache. Keep it in the MFU state.
*
* NOTE: an add_reference() that occurred when we did
* the arc_read() will have kicked this off the list.
* If it was a prefetch, we will explicitly move it to
* the head of the list now.
*/
atomic_inc_32(&hdr->b_l1hdr.b_mfu_hits);
ARCSTAT_BUMP(arcstat_mfu_hits);
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
} else if (hdr->b_l1hdr.b_state == arc_mfu_ghost) {
arc_state_t *new_state = arc_mfu;
/*
* This buffer has been accessed more than once but has
* been evicted from the cache. Move it back to the
* MFU state.
*/
if (HDR_PREFETCH(hdr) || HDR_PRESCIENT_PREFETCH(hdr)) {
/*
* This is a prefetch access...
* move this block back to the MRU state.
*/
new_state = arc_mru;
}
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(new_state, hdr, hash_lock);
atomic_inc_32(&hdr->b_l1hdr.b_mfu_ghost_hits);
ARCSTAT_BUMP(arcstat_mfu_ghost_hits);
} else if (hdr->b_l1hdr.b_state == arc_l2c_only) {
/*
* This buffer is on the 2nd Level ARC.
*/
hdr->b_l1hdr.b_arc_access = ddi_get_lbolt();
DTRACE_PROBE1(new_state__mfu, arc_buf_hdr_t *, hdr);
arc_change_state(arc_mfu, hdr, hash_lock);
} else {
cmn_err(CE_PANIC, "invalid arc state 0x%p",
hdr->b_l1hdr.b_state);
}
}
/*
* This routine is called by dbuf_hold() to update the arc_access() state
* which otherwise would be skipped for entries in the dbuf cache.
*/
void
arc_buf_access(arc_buf_t *buf)
{
mutex_enter(&buf->b_evict_lock);
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* Avoid taking the hash_lock when possible as an optimization.
* The header must be checked again under the hash_lock in order
* to handle the case where it is concurrently being released.
*/
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(&buf->b_evict_lock);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
if (hdr->b_l1hdr.b_state == arc_anon || HDR_EMPTY(hdr)) {
mutex_exit(hash_lock);
mutex_exit(&buf->b_evict_lock);
ARCSTAT_BUMP(arcstat_access_skip);
return;
}
mutex_exit(&buf->b_evict_lock);
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr) && !HDR_PRESCIENT_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr), data, metadata, hits);
}
/* a generic arc_read_done_func_t which you can use */
/* ARGSUSED */
void
arc_bcopy_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
if (buf == NULL)
return;
bcopy(buf->b_data, arg, arc_buf_size(buf));
arc_buf_destroy(buf, arg);
}
/* a generic arc_read_done_func_t */
/* ARGSUSED */
void
arc_getbuf_func(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *arg)
{
arc_buf_t **bufp = arg;
if (buf == NULL) {
*bufp = NULL;
} else {
*bufp = buf;
ASSERT(buf->b_data);
}
}
static void
arc_hdr_verify(arc_buf_hdr_t *hdr, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
ASSERT3U(HDR_GET_PSIZE(hdr), ==, 0);
ASSERT3U(arc_hdr_get_compress(hdr), ==, ZIO_COMPRESS_OFF);
} else {
if (HDR_COMPRESSION_ENABLED(hdr)) {
ASSERT3U(arc_hdr_get_compress(hdr), ==,
BP_GET_COMPRESS(bp));
}
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
ASSERT3U(HDR_GET_PSIZE(hdr), ==, BP_GET_PSIZE(bp));
ASSERT3U(!!HDR_PROTECTED(hdr), ==, BP_IS_PROTECTED(bp));
}
}
static void
arc_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
arc_buf_hdr_t *hdr = zio->io_private;
kmutex_t *hash_lock = NULL;
arc_callback_t *callback_list;
arc_callback_t *acb;
boolean_t freeable = B_FALSE;
/*
* The hdr was inserted into hash-table and removed from lists
* prior to starting I/O. We should find this header, since
* it's in the hash table, and it should be legit since it's
* not possible to evict it during the I/O. The only possible
* reason for it not to be found is if we were freed during the
* read.
*/
if (HDR_IN_HASH_TABLE(hdr)) {
arc_buf_hdr_t *found;
ASSERT3U(hdr->b_birth, ==, BP_PHYSICAL_BIRTH(zio->io_bp));
ASSERT3U(hdr->b_dva.dva_word[0], ==,
BP_IDENTITY(zio->io_bp)->dva_word[0]);
ASSERT3U(hdr->b_dva.dva_word[1], ==,
BP_IDENTITY(zio->io_bp)->dva_word[1]);
found = buf_hash_find(hdr->b_spa, zio->io_bp, &hash_lock);
ASSERT((found == hdr &&
DVA_EQUAL(&hdr->b_dva, BP_IDENTITY(zio->io_bp))) ||
(found == hdr && HDR_L2_READING(hdr)));
ASSERT3P(hash_lock, !=, NULL);
}
if (BP_IS_PROTECTED(bp)) {
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
if (BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG) {
void *tmpbuf;
tmpbuf = abd_borrow_buf_copy(zio->io_abd,
sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmpbuf,
hdr->b_crypt_hdr.b_mac);
abd_return_buf(zio->io_abd, tmpbuf,
sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
}
if (zio->io_error == 0) {
/* byteswap if necessary */
if (BP_SHOULD_BYTESWAP(zio->io_bp)) {
if (BP_GET_LEVEL(zio->io_bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(zio->io_bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_EVICTED);
if (l2arc_noprefetch && HDR_PREFETCH(hdr))
arc_hdr_clear_flags(hdr, ARC_FLAG_L2CACHE);
callback_list = hdr->b_l1hdr.b_acb;
ASSERT3P(callback_list, !=, NULL);
if (hash_lock && zio->io_error == 0 &&
hdr->b_l1hdr.b_state == arc_anon) {
/*
* Only call arc_access on anonymous buffers. This is because
* if we've issued an I/O for an evicted buffer, we've already
* called arc_access (to prevent any simultaneous readers from
* getting confused).
*/
arc_access(hdr, hash_lock);
}
/*
* If a read request has a callback (i.e. acb_done is not NULL), then we
* make a buf containing the data according to the parameters which were
* passed in. The implementation of arc_buf_alloc_impl() ensures that we
* aren't needlessly decompressing the data multiple times.
*/
int callback_cnt = 0;
for (acb = callback_list; acb != NULL; acb = acb->acb_next) {
if (!acb->acb_done)
continue;
callback_cnt++;
if (zio->io_error != 0)
continue;
int error = arc_buf_alloc_impl(hdr, zio->io_spa,
acb->acb_dsobj, acb->acb_private, acb->acb_encrypted,
acb->acb_compressed, acb->acb_noauth, B_TRUE,
&acb->acb_buf);
if (error != 0) {
arc_buf_destroy(acb->acb_buf, acb->acb_private);
acb->acb_buf = NULL;
}
/*
* Assert non-speculative zios didn't fail because an
* encryption key wasn't loaded
*/
ASSERT((zio->io_flags & ZIO_FLAG_SPECULATIVE) ||
error != ENOENT);
/*
* If we failed to decrypt, report an error now (as the zio
* layer would have done if it had done the transforms).
*/
if (error == ECKSUM) {
ASSERT(BP_IS_PROTECTED(bp));
error = SET_ERROR(EIO);
spa_log_error(zio->io_spa, &zio->io_bookmark);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
zio->io_spa, NULL, &zio->io_bookmark, zio,
0, 0);
}
}
if (zio->io_error == 0)
zio->io_error = error;
}
hdr->b_l1hdr.b_acb = NULL;
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (callback_cnt == 0)
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT(refcount_is_zero(&hdr->b_l1hdr.b_refcnt) ||
callback_list != NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
} else {
arc_hdr_set_flags(hdr, ARC_FLAG_IO_ERROR);
if (hdr->b_l1hdr.b_state != arc_anon)
arc_change_state(arc_anon, hdr, hash_lock);
if (HDR_IN_HASH_TABLE(hdr))
buf_hash_remove(hdr);
freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/*
* Broadcast before we drop the hash_lock to avoid the possibility
* that the hdr (and hence the cv) might be freed before we get to
* the cv_broadcast().
*/
cv_broadcast(&hdr->b_l1hdr.b_cv);
if (hash_lock != NULL) {
mutex_exit(hash_lock);
} else {
/*
* This block was freed while we waited for the read to
* complete. It has been removed from the hash table and
* moved to the anonymous state (so that it won't show up
* in the cache).
*/
ASSERT3P(hdr->b_l1hdr.b_state, ==, arc_anon);
freeable = refcount_is_zero(&hdr->b_l1hdr.b_refcnt);
}
/* execute each callback and free its structure */
while ((acb = callback_list) != NULL) {
if (acb->acb_done) {
acb->acb_done(zio, &zio->io_bookmark, zio->io_bp,
acb->acb_buf, acb->acb_private);
}
if (acb->acb_zio_dummy != NULL) {
acb->acb_zio_dummy->io_error = zio->io_error;
zio_nowait(acb->acb_zio_dummy);
}
callback_list = acb->acb_next;
kmem_free(acb, sizeof (arc_callback_t));
}
if (freeable)
arc_hdr_destroy(hdr);
}
/*
* "Read" the block at the specified DVA (in bp) via the
* cache. If the block is found in the cache, invoke the provided
* callback immediately and return. Note that the `zio' parameter
* in the callback will be NULL in this case, since no IO was
* required. If the block is not in the cache pass the read request
* on to the spa with a substitute callback function, so that the
* requested block will be added to the cache.
*
* If a read request arrives for a block that has a read in-progress,
* either wait for the in-progress read to complete (and return the
* results); or, if this is a read with a "done" func, add a record
* to the read to invoke the "done" func when the read completes,
* and return; or just return.
*
* arc_read_done() will invoke all the requested "done" functions
* for readers of this block.
*/
int
arc_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
arc_read_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, arc_flags_t *arc_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = NULL;
kmutex_t *hash_lock = NULL;
zio_t *rzio;
uint64_t guid = spa_load_guid(spa);
boolean_t compressed_read = (zio_flags & ZIO_FLAG_RAW_COMPRESS) != 0;
boolean_t encrypted_read = BP_IS_ENCRYPTED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
boolean_t noauth_read = BP_IS_AUTHENTICATED(bp) &&
(zio_flags & ZIO_FLAG_RAW_ENCRYPT) != 0;
int rc = 0;
ASSERT(!BP_IS_EMBEDDED(bp) ||
BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA);
top:
if (!BP_IS_EMBEDDED(bp)) {
/*
* Embedded BP's have no DVA and require no I/O to "read".
* Create an anonymous arc buf to back it.
*/
hdr = buf_hash_find(guid, bp, &hash_lock);
}
/*
* Determine if we have an L1 cache hit or a cache miss. For simplicity
* we maintain encrypted data seperately from compressed / uncompressed
* data. If the user is requesting raw encrypted data and we don't have
* that in the header we will read from disk to guarantee that we can
* get it even if the encryption keys aren't loaded.
*/
if (hdr != NULL && HDR_HAS_L1HDR(hdr) && (HDR_HAS_RABD(hdr) ||
(hdr->b_l1hdr.b_pabd != NULL && !encrypted_read))) {
arc_buf_t *buf = NULL;
*arc_flags |= ARC_FLAG_CACHED;
if (HDR_IO_IN_PROGRESS(hdr)) {
zio_t *head_zio = hdr->b_l1hdr.b_acb->acb_zio_head;
ASSERT3P(head_zio, !=, NULL);
if ((hdr->b_flags & ARC_FLAG_PRIO_ASYNC_READ) &&
priority == ZIO_PRIORITY_SYNC_READ) {
/*
* This is a sync read that needs to wait for
* an in-flight async read. Request that the
* zio have its priority upgraded.
*/
zio_change_priority(head_zio, priority);
DTRACE_PROBE1(arc__async__upgrade__sync,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_async_upgrade_sync);
}
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (*arc_flags & ARC_FLAG_WAIT) {
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
if (done) {
arc_callback_t *acb = NULL;
acb = kmem_zalloc(sizeof (arc_callback_t),
KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_dsobj = zb->zb_objset;
if (pio != NULL)
acb->acb_zio_dummy = zio_null(pio,
spa, NULL, NULL, NULL, zio_flags);
ASSERT3P(acb->acb_done, !=, NULL);
acb->acb_zio_head = head_zio;
acb->acb_next = hdr->b_l1hdr.b_acb;
hdr->b_l1hdr.b_acb = acb;
mutex_exit(hash_lock);
goto out;
}
mutex_exit(hash_lock);
goto out;
}
ASSERT(hdr->b_l1hdr.b_state == arc_mru ||
hdr->b_l1hdr.b_state == arc_mfu);
if (done) {
if (hdr->b_flags & ARC_FLAG_PREDICTIVE_PREFETCH) {
/*
* This is a demand read which does not have to
* wait for i/o because we did a predictive
* prefetch i/o for it, which has completed.
*/
DTRACE_PROBE1(
arc__demand__hit__predictive__prefetch,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(
arcstat_demand_hit_predictive_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PREDICTIVE_PREFETCH);
}
if (hdr->b_flags & ARC_FLAG_PRESCIENT_PREFETCH) {
ARCSTAT_BUMP(
arcstat_demand_hit_prescient_prefetch);
arc_hdr_clear_flags(hdr,
ARC_FLAG_PRESCIENT_PREFETCH);
}
ASSERT(!BP_IS_EMBEDDED(bp) || !BP_IS_HOLE(bp));
/* Get a buf with the desired data in it. */
rc = arc_buf_alloc_impl(hdr, spa, zb->zb_objset,
private, encrypted_read, compressed_read,
noauth_read, B_TRUE, &buf);
if (rc == ECKSUM) {
/*
* Convert authentication and decryption errors
* to EIO (and generate an ereport) before
* leaving the ARC.
*/
rc = SET_ERROR(EIO);
zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, zb, NULL, 0, 0);
}
if (rc != 0) {
arc_buf_destroy(buf, private);
buf = NULL;
}
/* assert any errors weren't due to unloaded keys */
ASSERT((zio_flags & ZIO_FLAG_SPECULATIVE) ||
rc != ENOENT);
} else if (*arc_flags & ARC_FLAG_PREFETCH &&
refcount_count(&hdr->b_l1hdr.b_refcnt) == 0) {
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
}
DTRACE_PROBE1(arc__hit, arc_buf_hdr_t *, hdr);
arc_access(hdr, hash_lock);
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
mutex_exit(hash_lock);
ARCSTAT_BUMP(arcstat_hits);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
data, metadata, hits);
if (done)
done(NULL, zb, bp, buf, private);
} else {
uint64_t lsize = BP_GET_LSIZE(bp);
uint64_t psize = BP_GET_PSIZE(bp);
arc_callback_t *acb;
vdev_t *vd = NULL;
uint64_t addr = 0;
boolean_t devw = B_FALSE;
uint64_t size;
abd_t *hdr_abd;
/*
* Gracefully handle a damaged logical block size as a
* checksum error.
*/
if (lsize > spa_maxblocksize(spa)) {
rc = SET_ERROR(ECKSUM);
goto out;
}
if (hdr == NULL) {
/* this block is not in the cache */
arc_buf_hdr_t *exists = NULL;
arc_buf_contents_t type = BP_GET_BUFC_TYPE(bp);
hdr = arc_hdr_alloc(spa_load_guid(spa), psize, lsize,
BP_IS_PROTECTED(bp), BP_GET_COMPRESS(bp), type,
encrypted_read);
if (!BP_IS_EMBEDDED(bp)) {
hdr->b_dva = *BP_IDENTITY(bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(bp);
exists = buf_hash_insert(hdr, &hash_lock);
}
if (exists != NULL) {
/* somebody beat us to the hash insert */
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_hdr_destroy(hdr);
goto top; /* restart the IO request */
}
} else {
/*
* This block is in the ghost cache or encrypted data
* was requested and we didn't have it. If it was
* L2-only (and thus didn't have an L1 hdr),
* we realloc the header to add an L1 hdr.
*/
if (!HDR_HAS_L1HDR(hdr)) {
hdr = arc_hdr_realloc(hdr, hdr_l2only_cache,
hdr_full_cache);
}
if (GHOST_STATE(hdr->b_l1hdr.b_state)) {
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT0(refcount_count(&hdr->b_l1hdr.b_refcnt));
ASSERT3P(hdr->b_l1hdr.b_buf, ==, NULL);
ASSERT3P(hdr->b_l1hdr.b_freeze_cksum, ==, NULL);
} else if (HDR_IO_IN_PROGRESS(hdr)) {
/*
* If this header already had an IO in progress
* and we are performing another IO to fetch
* encrypted data we must wait until the first
* IO completes so as not to confuse
* arc_read_done(). This should be very rare
* and so the performance impact shouldn't
* matter.
*/
cv_wait(&hdr->b_l1hdr.b_cv, hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* This is a delicate dance that we play here.
* This hdr might be in the ghost list so we access
* it to move it out of the ghost list before we
* initiate the read. If it's a prefetch then
* it won't have a callback so we'll remove the
* reference that arc_buf_alloc_impl() created. We
* do this after we've called arc_access() to
* avoid hitting an assert in remove_reference().
*/
arc_access(hdr, hash_lock);
arc_hdr_alloc_abd(hdr, encrypted_read);
}
if (encrypted_read) {
ASSERT(HDR_HAS_RABD(hdr));
size = HDR_GET_PSIZE(hdr);
hdr_abd = hdr->b_crypt_hdr.b_rabd;
zio_flags |= ZIO_FLAG_RAW;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
size = arc_hdr_size(hdr);
hdr_abd = hdr->b_l1hdr.b_pabd;
if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF) {
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
/*
* For authenticated bp's, we do not ask the ZIO layer
* to authenticate them since this will cause the entire
* IO to fail if the key isn't loaded. Instead, we
* defer authentication until arc_buf_fill(), which will
* verify the data when the key is available.
*/
if (BP_IS_AUTHENTICATED(bp))
zio_flags |= ZIO_FLAG_RAW_ENCRYPT;
}
if (*arc_flags & ARC_FLAG_PREFETCH &&
refcount_is_zero(&hdr->b_l1hdr.b_refcnt))
arc_hdr_set_flags(hdr, ARC_FLAG_PREFETCH);
if (*arc_flags & ARC_FLAG_PRESCIENT_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PRESCIENT_PREFETCH);
if (*arc_flags & ARC_FLAG_L2CACHE)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (BP_IS_AUTHENTICATED(bp))
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
if (BP_GET_LEVEL(bp) > 0)
arc_hdr_set_flags(hdr, ARC_FLAG_INDIRECT);
if (*arc_flags & ARC_FLAG_PREDICTIVE_PREFETCH)
arc_hdr_set_flags(hdr, ARC_FLAG_PREDICTIVE_PREFETCH);
ASSERT(!GHOST_STATE(hdr->b_l1hdr.b_state));
acb = kmem_zalloc(sizeof (arc_callback_t), KM_SLEEP);
acb->acb_done = done;
acb->acb_private = private;
acb->acb_compressed = compressed_read;
acb->acb_encrypted = encrypted_read;
acb->acb_noauth = noauth_read;
acb->acb_dsobj = zb->zb_objset;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
hdr->b_l1hdr.b_acb = acb;
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (HDR_HAS_L2HDR(hdr) &&
(vd = hdr->b_l2hdr.b_dev->l2ad_vdev) != NULL) {
devw = hdr->b_l2hdr.b_dev->l2ad_writing;
addr = hdr->b_l2hdr.b_daddr;
/*
- * Lock out device removal.
+ * Lock out L2ARC device removal.
*/
if (vdev_is_dead(vd) ||
!spa_config_tryenter(spa, SCL_L2ARC, vd, RW_READER))
vd = NULL;
}
/*
* We count both async reads and scrub IOs as asynchronous so
* that both can be upgraded in the event of a cache hit while
* the read IO is still in-flight.
*/
if (priority == ZIO_PRIORITY_ASYNC_READ ||
priority == ZIO_PRIORITY_SCRUB)
arc_hdr_set_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
else
arc_hdr_clear_flags(hdr, ARC_FLAG_PRIO_ASYNC_READ);
/*
* At this point, we have a level 1 cache miss. Try again in
* L2ARC if possible.
*/
ASSERT3U(HDR_GET_LSIZE(hdr), ==, lsize);
DTRACE_PROBE4(arc__miss, arc_buf_hdr_t *, hdr, blkptr_t *, bp,
uint64_t, lsize, zbookmark_phys_t *, zb);
ARCSTAT_BUMP(arcstat_misses);
ARCSTAT_CONDSTAT(!HDR_PREFETCH(hdr),
demand, prefetch, !HDR_ISTYPE_METADATA(hdr),
data, metadata, misses);
if (vd != NULL && l2arc_ndev != 0 && !(l2arc_norw && devw)) {
/*
* Read from the L2ARC if the following are true:
* 1. The L2ARC vdev was previously cached.
* 2. This buffer still has L2ARC metadata.
* 3. This buffer isn't currently writing to the L2ARC.
* 4. The L2ARC entry wasn't evicted, which may
* also have invalidated the vdev.
* 5. This isn't prefetch and l2arc_noprefetch is set.
*/
if (HDR_HAS_L2HDR(hdr) &&
!HDR_L2_WRITING(hdr) && !HDR_L2_EVICTED(hdr) &&
!(l2arc_noprefetch && HDR_PREFETCH(hdr))) {
l2arc_read_callback_t *cb;
abd_t *abd;
uint64_t asize;
DTRACE_PROBE1(l2arc__hit, arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_hits);
atomic_inc_32(&hdr->b_l2hdr.b_hits);
cb = kmem_zalloc(sizeof (l2arc_read_callback_t),
KM_SLEEP);
cb->l2rcb_hdr = hdr;
cb->l2rcb_bp = *bp;
cb->l2rcb_zb = *zb;
cb->l2rcb_flags = zio_flags;
asize = vdev_psize_to_asize(vd, size);
if (asize != size) {
abd = abd_alloc_for_io(asize,
HDR_ISTYPE_METADATA(hdr));
cb->l2rcb_abd = abd;
} else {
abd = hdr_abd;
}
ASSERT(addr >= VDEV_LABEL_START_SIZE &&
addr + asize <= vd->vdev_psize -
VDEV_LABEL_END_SIZE);
/*
* l2arc read. The SCL_L2ARC lock will be
* released by l2arc_read_done().
* Issue a null zio if the underlying buffer
* was squashed to zero size by compression.
*/
ASSERT3U(arc_hdr_get_compress(hdr), !=,
ZIO_COMPRESS_EMPTY);
rzio = zio_read_phys(pio, vd, addr,
asize, abd,
ZIO_CHECKSUM_OFF,
l2arc_read_done, cb, priority,
zio_flags | ZIO_FLAG_DONT_CACHE |
ZIO_FLAG_CANFAIL |
ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_DONT_RETRY, B_FALSE);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
DTRACE_PROBE2(l2arc__read, vdev_t *, vd,
zio_t *, rzio);
ARCSTAT_INCR(arcstat_l2_read_bytes,
HDR_GET_PSIZE(hdr));
if (*arc_flags & ARC_FLAG_NOWAIT) {
zio_nowait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_WAIT);
if (zio_wait(rzio) == 0)
goto out;
/* l2arc read error; goto zio_read() */
if (hash_lock != NULL)
mutex_enter(hash_lock);
} else {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
if (HDR_L2_WRITING(hdr))
ARCSTAT_BUMP(arcstat_l2_rw_clash);
spa_config_exit(spa, SCL_L2ARC, vd);
}
} else {
if (vd != NULL)
spa_config_exit(spa, SCL_L2ARC, vd);
if (l2arc_ndev != 0) {
DTRACE_PROBE1(l2arc__miss,
arc_buf_hdr_t *, hdr);
ARCSTAT_BUMP(arcstat_l2_misses);
}
}
rzio = zio_read(pio, spa, bp, hdr_abd, size,
arc_read_done, hdr, priority, zio_flags, zb);
acb->acb_zio_head = rzio;
if (hash_lock != NULL)
mutex_exit(hash_lock);
if (*arc_flags & ARC_FLAG_WAIT) {
rc = zio_wait(rzio);
goto out;
}
ASSERT(*arc_flags & ARC_FLAG_NOWAIT);
zio_nowait(rzio);
}
out:
/* embedded bps don't actually go to disk */
if (!BP_IS_EMBEDDED(bp))
spa_read_history_add(spa, zb, *arc_flags);
return (rc);
}
arc_prune_t *
arc_add_prune_callback(arc_prune_func_t *func, void *private)
{
arc_prune_t *p;
p = kmem_alloc(sizeof (*p), KM_SLEEP);
p->p_pfunc = func;
p->p_private = private;
list_link_init(&p->p_node);
refcount_create(&p->p_refcnt);
mutex_enter(&arc_prune_mtx);
refcount_add(&p->p_refcnt, &arc_prune_list);
list_insert_head(&arc_prune_list, p);
mutex_exit(&arc_prune_mtx);
return (p);
}
void
arc_remove_prune_callback(arc_prune_t *p)
{
boolean_t wait = B_FALSE;
mutex_enter(&arc_prune_mtx);
list_remove(&arc_prune_list, p);
if (refcount_remove(&p->p_refcnt, &arc_prune_list) > 0)
wait = B_TRUE;
mutex_exit(&arc_prune_mtx);
/* wait for arc_prune_task to finish */
if (wait)
taskq_wait_outstanding(arc_prune_taskq, 0);
ASSERT0(refcount_count(&p->p_refcnt));
refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
/*
* Notify the arc that a block was freed, and thus will never be used again.
*/
void
arc_freed(spa_t *spa, const blkptr_t *bp)
{
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
uint64_t guid = spa_load_guid(spa);
ASSERT(!BP_IS_EMBEDDED(bp));
hdr = buf_hash_find(guid, bp, &hash_lock);
if (hdr == NULL)
return;
/*
* We might be trying to free a block that is still doing I/O
* (i.e. prefetch) or has a reference (i.e. a dedup-ed,
* dmu_sync-ed block). If this block is being prefetched, then it
* would still have the ARC_FLAG_IO_IN_PROGRESS flag set on the hdr
* until the I/O completes. A block may also have a reference if it is
* part of a dedup-ed, dmu_synced write. The dmu_sync() function would
* have written the new block to its final resting place on disk but
* without the dedup flag set. This would have left the hdr in the MRU
* state and discoverable. When the txg finally syncs it detects that
* the block was overridden in open context and issues an override I/O.
* Since this is a dedup block, the override I/O will determine if the
* block is already in the DDT. If so, then it will replace the io_bp
* with the bp from the DDT and allow the I/O to finish. When the I/O
* reaches the done callback, dbuf_write_override_done, it will
* check to see if the io_bp and io_bp_override are identical.
* If they are not, then it indicates that the bp was replaced with
* the bp in the DDT and the override bp is freed. This allows
* us to arrive here with a reference on a block that is being
* freed. So if we have an I/O in progress, or a reference to
* this hdr, then we don't destroy the hdr.
*/
if (!HDR_HAS_L1HDR(hdr) || (!HDR_IO_IN_PROGRESS(hdr) &&
refcount_is_zero(&hdr->b_l1hdr.b_refcnt))) {
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
mutex_exit(hash_lock);
} else {
mutex_exit(hash_lock);
}
}
/*
* Release this buffer from the cache, making it an anonymous buffer. This
* must be done after a read and prior to modifying the buffer contents.
* If the buffer has more than one reference, we must make
* a new hdr for the buffer.
*/
void
arc_release(arc_buf_t *buf, void *tag)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
/*
* It would be nice to assert that if its DMU metadata (level >
* 0 || it's the dnode file), then it must be syncing context.
* But we don't know that information at this level.
*/
mutex_enter(&buf->b_evict_lock);
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* We don't grab the hash lock prior to this check, because if
* the buffer's header is in the arc_anon state, it won't be
* linked into the hash table.
*/
if (hdr->b_l1hdr.b_state == arc_anon) {
mutex_exit(&buf->b_evict_lock);
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT(!HDR_IN_HASH_TABLE(hdr));
ASSERT(!HDR_HAS_L2HDR(hdr));
ASSERT(HDR_EMPTY(hdr));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), ==, 1);
ASSERT(!list_link_active(&hdr->b_l1hdr.b_arc_node));
hdr->b_l1hdr.b_arc_access = 0;
/*
* If the buf is being overridden then it may already
* have a hdr that is not empty.
*/
buf_discard_identity(hdr);
arc_buf_thaw(buf);
return;
}
kmutex_t *hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
/*
* This assignment is only valid as long as the hash_lock is
* held, we must be careful not to reference state or the
* b_state field after dropping the lock.
*/
arc_state_t *state = hdr->b_l1hdr.b_state;
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
ASSERT3P(state, !=, arc_anon);
/* this buffer is not on any list */
ASSERT3S(refcount_count(&hdr->b_l1hdr.b_refcnt), >, 0);
if (HDR_HAS_L2HDR(hdr)) {
mutex_enter(&hdr->b_l2hdr.b_dev->l2ad_mtx);
/*
* We have to recheck this conditional again now that
* we're holding the l2ad_mtx to prevent a race with
* another thread which might be concurrently calling
* l2arc_evict(). In that case, l2arc_evict() might have
* destroyed the header's L2 portion as we were waiting
* to acquire the l2ad_mtx.
*/
if (HDR_HAS_L2HDR(hdr))
arc_hdr_l2hdr_destroy(hdr);
mutex_exit(&hdr->b_l2hdr.b_dev->l2ad_mtx);
}
/*
* Do we have more than one buf?
*/
if (hdr->b_l1hdr.b_bufcnt > 1) {
arc_buf_hdr_t *nhdr;
uint64_t spa = hdr->b_spa;
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t lsize = HDR_GET_LSIZE(hdr);
boolean_t protected = HDR_PROTECTED(hdr);
enum zio_compress compress = arc_hdr_get_compress(hdr);
arc_buf_contents_t type = arc_buf_type(hdr);
VERIFY3U(hdr->b_type, ==, type);
ASSERT(hdr->b_l1hdr.b_buf != buf || buf->b_next != NULL);
(void) remove_reference(hdr, hash_lock, tag);
if (arc_buf_is_shared(buf) && !ARC_BUF_COMPRESSED(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
ASSERT(ARC_BUF_LAST(buf));
}
/*
* Pull the data off of this hdr and attach it to
* a new anonymous hdr. Also find the last buffer
* in the hdr's buffer list.
*/
arc_buf_t *lastbuf = arc_buf_remove(hdr, buf);
ASSERT3P(lastbuf, !=, NULL);
/*
* If the current arc_buf_t and the hdr are sharing their data
* buffer, then we must stop sharing that block.
*/
if (arc_buf_is_shared(buf)) {
ASSERT3P(hdr->b_l1hdr.b_buf, !=, buf);
VERIFY(!arc_buf_is_shared(lastbuf));
/*
* First, sever the block sharing relationship between
* buf and the arc_buf_hdr_t.
*/
arc_unshare_buf(hdr, buf);
/*
* Now we need to recreate the hdr's b_pabd. Since we
* have lastbuf handy, we try to share with it, but if
* we can't then we allocate a new b_pabd and copy the
* data from buf into it.
*/
if (arc_can_share(hdr, lastbuf)) {
arc_share_buf(hdr, lastbuf);
} else {
arc_hdr_alloc_abd(hdr, B_FALSE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd,
buf->b_data, psize);
}
VERIFY3P(lastbuf->b_data, !=, NULL);
} else if (HDR_SHARED_DATA(hdr)) {
/*
* Uncompressed shared buffers are always at the end
* of the list. Compressed buffers don't have the
* same requirements. This makes it hard to
* simply assert that the lastbuf is shared so
* we rely on the hdr's compression flags to determine
* if we have a compressed, shared buffer.
*/
ASSERT(arc_buf_is_shared(lastbuf) ||
arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF);
ASSERT(!ARC_BUF_SHARED(buf));
}
ASSERT(hdr->b_l1hdr.b_pabd != NULL || HDR_HAS_RABD(hdr));
ASSERT3P(state, !=, arc_l2c_only);
(void) refcount_remove_many(&state->arcs_size,
arc_buf_size(buf), buf);
if (refcount_is_zero(&hdr->b_l1hdr.b_refcnt)) {
ASSERT3P(state, !=, arc_l2c_only);
(void) refcount_remove_many(&state->arcs_esize[type],
arc_buf_size(buf), buf);
}
hdr->b_l1hdr.b_bufcnt -= 1;
if (ARC_BUF_ENCRYPTED(buf))
hdr->b_crypt_hdr.b_ebufcnt -= 1;
arc_cksum_verify(buf);
arc_buf_unwatch(buf);
/* if this is the last uncompressed buf free the checksum */
if (!arc_hdr_has_uncompressed_buf(hdr))
arc_cksum_free(hdr);
mutex_exit(hash_lock);
/*
* Allocate a new hdr. The new hdr will contain a b_pabd
* buffer which will be freed in arc_write().
*/
nhdr = arc_hdr_alloc(spa, psize, lsize, protected,
compress, type, HDR_HAS_RABD(hdr));
ASSERT3P(nhdr->b_l1hdr.b_buf, ==, NULL);
ASSERT0(nhdr->b_l1hdr.b_bufcnt);
ASSERT0(refcount_count(&nhdr->b_l1hdr.b_refcnt));
VERIFY3U(nhdr->b_type, ==, type);
ASSERT(!HDR_SHARED_DATA(nhdr));
nhdr->b_l1hdr.b_buf = buf;
nhdr->b_l1hdr.b_bufcnt = 1;
if (ARC_BUF_ENCRYPTED(buf))
nhdr->b_crypt_hdr.b_ebufcnt = 1;
nhdr->b_l1hdr.b_mru_hits = 0;
nhdr->b_l1hdr.b_mru_ghost_hits = 0;
nhdr->b_l1hdr.b_mfu_hits = 0;
nhdr->b_l1hdr.b_mfu_ghost_hits = 0;
nhdr->b_l1hdr.b_l2_hits = 0;
(void) refcount_add(&nhdr->b_l1hdr.b_refcnt, tag);
buf->b_hdr = nhdr;
mutex_exit(&buf->b_evict_lock);
(void) refcount_add_many(&arc_anon->arcs_size,
HDR_GET_LSIZE(nhdr), buf);
} else {
mutex_exit(&buf->b_evict_lock);
ASSERT(refcount_count(&hdr->b_l1hdr.b_refcnt) == 1);
/* protected by hash lock, or hdr is on arc_anon */
ASSERT(!multilist_link_active(&hdr->b_l1hdr.b_arc_node));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
hdr->b_l1hdr.b_mru_hits = 0;
hdr->b_l1hdr.b_mru_ghost_hits = 0;
hdr->b_l1hdr.b_mfu_hits = 0;
hdr->b_l1hdr.b_mfu_ghost_hits = 0;
hdr->b_l1hdr.b_l2_hits = 0;
arc_change_state(arc_anon, hdr, hash_lock);
hdr->b_l1hdr.b_arc_access = 0;
mutex_exit(hash_lock);
buf_discard_identity(hdr);
arc_buf_thaw(buf);
}
}
int
arc_released(arc_buf_t *buf)
{
int released;
mutex_enter(&buf->b_evict_lock);
released = (buf->b_data != NULL &&
buf->b_hdr->b_l1hdr.b_state == arc_anon);
mutex_exit(&buf->b_evict_lock);
return (released);
}
#ifdef ZFS_DEBUG
int
arc_referenced(arc_buf_t *buf)
{
int referenced;
mutex_enter(&buf->b_evict_lock);
referenced = (refcount_count(&buf->b_hdr->b_l1hdr.b_refcnt));
mutex_exit(&buf->b_evict_lock);
return (referenced);
}
#endif
static void
arc_write_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_IS_HOLE(bp) ? 0 : BP_GET_PSIZE(bp);
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT(!refcount_is_zero(&buf->b_hdr->b_l1hdr.b_refcnt));
ASSERT(hdr->b_l1hdr.b_bufcnt > 0);
/*
* If we're reexecuting this zio because the pool suspended, then
* cleanup any state that was previously set the first time the
* callback was invoked.
*/
if (zio->io_flags & ZIO_FLAG_REEXECUTED) {
arc_cksum_free(hdr);
arc_buf_unwatch(buf);
if (hdr->b_l1hdr.b_pabd != NULL) {
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
}
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
ASSERT(!HDR_HAS_RABD(hdr));
ASSERT(!HDR_SHARED_DATA(hdr));
ASSERT(!arc_buf_is_shared(buf));
callback->awcb_ready(zio, buf, callback->awcb_private);
if (HDR_IO_IN_PROGRESS(hdr))
ASSERT(zio->io_flags & ZIO_FLAG_REEXECUTED);
arc_hdr_set_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
if (BP_IS_PROTECTED(bp) != !!HDR_PROTECTED(hdr))
hdr = arc_hdr_realloc_crypt(hdr, BP_IS_PROTECTED(bp));
if (BP_IS_PROTECTED(bp)) {
/* ZIL blocks are written through zio_rewrite */
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(HDR_PROTECTED(hdr));
if (BP_SHOULD_BYTESWAP(bp)) {
if (BP_GET_LEVEL(bp) > 0) {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_UINT64;
} else {
hdr->b_l1hdr.b_byteswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
}
} else {
hdr->b_l1hdr.b_byteswap = DMU_BSWAP_NUMFUNCS;
}
hdr->b_crypt_hdr.b_ot = BP_GET_TYPE(bp);
hdr->b_crypt_hdr.b_dsobj = zio->io_bookmark.zb_objset;
zio_crypt_decode_params_bp(bp, hdr->b_crypt_hdr.b_salt,
hdr->b_crypt_hdr.b_iv);
zio_crypt_decode_mac_bp(bp, hdr->b_crypt_hdr.b_mac);
}
/*
* If this block was written for raw encryption but the zio layer
* ended up only authenticating it, adjust the buffer flags now.
*/
if (BP_IS_AUTHENTICATED(bp) && ARC_BUF_ENCRYPTED(buf)) {
arc_hdr_set_flags(hdr, ARC_FLAG_NOAUTH);
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
if (BP_GET_COMPRESS(bp) == ZIO_COMPRESS_OFF)
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
} else if (BP_IS_HOLE(bp) && ARC_BUF_ENCRYPTED(buf)) {
buf->b_flags &= ~ARC_BUF_FLAG_ENCRYPTED;
buf->b_flags &= ~ARC_BUF_FLAG_COMPRESSED;
}
/* this must be done after the buffer flags are adjusted */
arc_cksum_compute(buf);
enum zio_compress compress;
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp)) {
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(HDR_GET_LSIZE(hdr), ==, BP_GET_LSIZE(bp));
compress = BP_GET_COMPRESS(bp);
}
HDR_SET_PSIZE(hdr, psize);
arc_hdr_set_compress(hdr, compress);
if (zio->io_error != 0 || psize == 0)
goto out;
/*
* Fill the hdr with data. If the buffer is encrypted we have no choice
* but to copy the data into b_radb. If the hdr is compressed, the data
* we want is available from the zio, otherwise we can take it from
* the buf.
*
* We might be able to share the buf's data with the hdr here. However,
* doing so would cause the ARC to be full of linear ABDs if we write a
* lot of shareable data. As a compromise, we check whether scattered
* ABDs are allowed, and assume that if they are then the user wants
* the ARC to be primarily filled with them regardless of the data being
* written. Therefore, if they're allowed then we allocate one and copy
* the data into it; otherwise, we share the data directly if we can.
*/
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT3U(psize, >, 0);
ASSERT(ARC_BUF_COMPRESSED(buf));
arc_hdr_alloc_abd(hdr, B_TRUE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (zfs_abd_scatter_enabled || !arc_can_share(hdr, buf)) {
/*
* Ideally, we would always copy the io_abd into b_pabd, but the
* user may have disabled compressed ARC, thus we must check the
* hdr's compression setting rather than the io_bp's.
*/
if (BP_IS_ENCRYPTED(bp)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, B_TRUE);
abd_copy(hdr->b_crypt_hdr.b_rabd, zio->io_abd, psize);
} else if (arc_hdr_get_compress(hdr) != ZIO_COMPRESS_OFF &&
!ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(psize, >, 0);
arc_hdr_alloc_abd(hdr, B_FALSE);
abd_copy(hdr->b_l1hdr.b_pabd, zio->io_abd, psize);
} else {
ASSERT3U(zio->io_orig_size, ==, arc_hdr_size(hdr));
arc_hdr_alloc_abd(hdr, B_FALSE);
abd_copy_from_buf(hdr->b_l1hdr.b_pabd, buf->b_data,
arc_buf_size(buf));
}
} else {
ASSERT3P(buf->b_data, ==, abd_to_buf(zio->io_orig_abd));
ASSERT3U(zio->io_orig_size, ==, arc_buf_size(buf));
ASSERT3U(hdr->b_l1hdr.b_bufcnt, ==, 1);
arc_share_buf(hdr, buf);
}
out:
arc_hdr_verify(hdr, bp);
spl_fstrans_unmark(cookie);
}
static void
arc_write_children_ready(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
callback->awcb_children_ready(zio, buf, callback->awcb_private);
}
/*
* The SPA calls this callback for each physical write that happens on behalf
* of a logical write. See the comment in dbuf_write_physdone() for details.
*/
static void
arc_write_physdone(zio_t *zio)
{
arc_write_callback_t *cb = zio->io_private;
if (cb->awcb_physdone != NULL)
cb->awcb_physdone(zio, cb->awcb_buf, cb->awcb_private);
}
static void
arc_write_done(zio_t *zio)
{
arc_write_callback_t *callback = zio->io_private;
arc_buf_t *buf = callback->awcb_buf;
arc_buf_hdr_t *hdr = buf->b_hdr;
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
if (zio->io_error == 0) {
arc_hdr_verify(hdr, zio->io_bp);
if (BP_IS_HOLE(zio->io_bp) || BP_IS_EMBEDDED(zio->io_bp)) {
buf_discard_identity(hdr);
} else {
hdr->b_dva = *BP_IDENTITY(zio->io_bp);
hdr->b_birth = BP_PHYSICAL_BIRTH(zio->io_bp);
}
} else {
ASSERT(HDR_EMPTY(hdr));
}
/*
* If the block to be written was all-zero or compressed enough to be
* embedded in the BP, no write was performed so there will be no
* dva/birth/checksum. The buffer must therefore remain anonymous
* (and uncached).
*/
if (!HDR_EMPTY(hdr)) {
arc_buf_hdr_t *exists;
kmutex_t *hash_lock;
ASSERT3U(zio->io_error, ==, 0);
arc_cksum_verify(buf);
exists = buf_hash_insert(hdr, &hash_lock);
if (exists != NULL) {
/*
* This can only happen if we overwrite for
* sync-to-convergence, because we remove
* buffers from the hash table when we arc_free().
*/
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad overwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
ASSERT(refcount_is_zero(
&exists->b_l1hdr.b_refcnt));
arc_change_state(arc_anon, exists, hash_lock);
mutex_exit(hash_lock);
arc_hdr_destroy(exists);
exists = buf_hash_insert(hdr, &hash_lock);
ASSERT3P(exists, ==, NULL);
} else if (zio->io_flags & ZIO_FLAG_NOPWRITE) {
/* nopwrite */
ASSERT(zio->io_prop.zp_nopwrite);
if (!BP_EQUAL(&zio->io_bp_orig, zio->io_bp))
panic("bad nopwrite, hdr=%p exists=%p",
(void *)hdr, (void *)exists);
} else {
/* Dedup */
ASSERT(hdr->b_l1hdr.b_bufcnt == 1);
ASSERT(hdr->b_l1hdr.b_state == arc_anon);
ASSERT(BP_GET_DEDUP(zio->io_bp));
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
}
}
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
/* if it's not anon, we are doing a scrub */
if (exists == NULL && hdr->b_l1hdr.b_state == arc_anon)
arc_access(hdr, hash_lock);
mutex_exit(hash_lock);
} else {
arc_hdr_clear_flags(hdr, ARC_FLAG_IO_IN_PROGRESS);
}
ASSERT(!refcount_is_zero(&hdr->b_l1hdr.b_refcnt));
callback->awcb_done(zio, buf, callback->awcb_private);
abd_put(zio->io_abd);
kmem_free(callback, sizeof (arc_write_callback_t));
}
zio_t *
arc_write(zio_t *pio, spa_t *spa, uint64_t txg,
blkptr_t *bp, arc_buf_t *buf, boolean_t l2arc,
const zio_prop_t *zp, arc_write_done_func_t *ready,
arc_write_done_func_t *children_ready, arc_write_done_func_t *physdone,
arc_write_done_func_t *done, void *private, zio_priority_t priority,
int zio_flags, const zbookmark_phys_t *zb)
{
arc_buf_hdr_t *hdr = buf->b_hdr;
arc_write_callback_t *callback;
zio_t *zio;
zio_prop_t localprop = *zp;
ASSERT3P(ready, !=, NULL);
ASSERT3P(done, !=, NULL);
ASSERT(!HDR_IO_ERROR(hdr));
ASSERT(!HDR_IO_IN_PROGRESS(hdr));
ASSERT3P(hdr->b_l1hdr.b_acb, ==, NULL);
ASSERT3U(hdr->b_l1hdr.b_bufcnt, >, 0);
if (l2arc)
arc_hdr_set_flags(hdr, ARC_FLAG_L2CACHE);
if (ARC_BUF_ENCRYPTED(buf)) {
ASSERT(ARC_BUF_COMPRESSED(buf));
localprop.zp_encrypt = B_TRUE;
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
localprop.zp_byteorder =
(hdr->b_l1hdr.b_byteswap == DMU_BSWAP_NUMFUNCS) ?
ZFS_HOST_BYTEORDER : !ZFS_HOST_BYTEORDER;
bcopy(hdr->b_crypt_hdr.b_salt, localprop.zp_salt,
ZIO_DATA_SALT_LEN);
bcopy(hdr->b_crypt_hdr.b_iv, localprop.zp_iv,
ZIO_DATA_IV_LEN);
bcopy(hdr->b_crypt_hdr.b_mac, localprop.zp_mac,
ZIO_DATA_MAC_LEN);
if (DMU_OT_IS_ENCRYPTED(localprop.zp_type)) {
localprop.zp_nopwrite = B_FALSE;
localprop.zp_copies =
MIN(localprop.zp_copies, SPA_DVAS_PER_BP - 1);
}
zio_flags |= ZIO_FLAG_RAW;
} else if (ARC_BUF_COMPRESSED(buf)) {
ASSERT3U(HDR_GET_LSIZE(hdr), !=, arc_buf_size(buf));
localprop.zp_compress = HDR_GET_COMPRESS(hdr);
zio_flags |= ZIO_FLAG_RAW_COMPRESS;
}
callback = kmem_zalloc(sizeof (arc_write_callback_t), KM_SLEEP);
callback->awcb_ready = ready;
callback->awcb_children_ready = children_ready;
callback->awcb_physdone = physdone;
callback->awcb_done = done;
callback->awcb_private = private;
callback->awcb_buf = buf;
/*
* The hdr's b_pabd is now stale, free it now. A new data block
* will be allocated when the zio pipeline calls arc_write_ready().
*/
if (hdr->b_l1hdr.b_pabd != NULL) {
/*
* If the buf is currently sharing the data block with
* the hdr then we need to break that relationship here.
* The hdr will remain with a NULL data pointer and the
* buf will take sole ownership of the block.
*/
if (arc_buf_is_shared(buf)) {
arc_unshare_buf(hdr, buf);
} else {
arc_hdr_free_abd(hdr, B_FALSE);
}
VERIFY3P(buf->b_data, !=, NULL);
}
if (HDR_HAS_RABD(hdr))
arc_hdr_free_abd(hdr, B_TRUE);
if (!(zio_flags & ZIO_FLAG_RAW))
arc_hdr_set_compress(hdr, ZIO_COMPRESS_OFF);
ASSERT(!arc_buf_is_shared(buf));
ASSERT3P(hdr->b_l1hdr.b_pabd, ==, NULL);
zio = zio_write(pio, spa, txg, bp,
abd_get_from_buf(buf->b_data, HDR_GET_LSIZE(hdr)),
HDR_GET_LSIZE(hdr), arc_buf_size(buf), &localprop, arc_write_ready,
(children_ready != NULL) ? arc_write_children_ready : NULL,
arc_write_physdone, arc_write_done, callback,
priority, zio_flags, zb);
return (zio);
}
static int
arc_memory_throttle(uint64_t reserve, uint64_t txg)
{
#ifdef _KERNEL
uint64_t available_memory = arc_free_memory();
static uint64_t page_load = 0;
static uint64_t last_txg = 0;
#if defined(_ILP32)
available_memory =
MIN(available_memory, vmem_size(heap_arena, VMEM_FREE));
#endif
if (available_memory > arc_all_memory() * arc_lotsfree_percent / 100)
return (0);
if (txg > last_txg) {
last_txg = txg;
page_load = 0;
}
/*
* If we are in pageout, we know that memory is already tight,
* the arc is already going to be evicting, so we just want to
* continue to let page writes occur as quickly as possible.
*/
if (current_is_kswapd()) {
if (page_load > MAX(arc_sys_free / 4, available_memory) / 4) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(ERESTART));
}
/* Note: reserve is inflated, so we deflate */
page_load += reserve / 8;
return (0);
} else if (page_load > 0 && arc_reclaim_needed()) {
/* memory is low, delay before restarting */
ARCSTAT_INCR(arcstat_memory_throttle_count, 1);
DMU_TX_STAT_BUMP(dmu_tx_memory_reclaim);
return (SET_ERROR(EAGAIN));
}
page_load = 0;
#endif
return (0);
}
void
arc_tempreserve_clear(uint64_t reserve)
{
atomic_add_64(&arc_tempreserve, -reserve);
ASSERT((int64_t)arc_tempreserve >= 0);
}
int
arc_tempreserve_space(uint64_t reserve, uint64_t txg)
{
int error;
uint64_t anon_size;
if (!arc_no_grow &&
reserve > arc_c/4 &&
reserve * 4 > (2ULL << SPA_MAXBLOCKSHIFT))
arc_c = MIN(arc_c_max, reserve * 4);
/*
* Throttle when the calculated memory footprint for the TXG
* exceeds the target ARC size.
*/
if (reserve > arc_c) {
DMU_TX_STAT_BUMP(dmu_tx_memory_reserve);
return (SET_ERROR(ERESTART));
}
/*
* Don't count loaned bufs as in flight dirty data to prevent long
* network delays from blocking transactions that are ready to be
* assigned to a txg.
*/
/* assert that it has not wrapped around */
ASSERT3S(atomic_add_64_nv(&arc_loaned_bytes, 0), >=, 0);
anon_size = MAX((int64_t)(refcount_count(&arc_anon->arcs_size) -
arc_loaned_bytes), 0);
/*
* Writes will, almost always, require additional memory allocations
* in order to compress/encrypt/etc the data. We therefore need to
* make sure that there is sufficient available memory for this.
*/
error = arc_memory_throttle(reserve, txg);
if (error != 0)
return (error);
/*
* Throttle writes when the amount of dirty data in the cache
* gets too large. We try to keep the cache less than half full
* of dirty blocks so that our sync times don't grow too large.
* Note: if two requests come in concurrently, we might let them
* both succeed, when one of them should fail. Not a huge deal.
*/
if (reserve + arc_tempreserve + anon_size > arc_c / 2 &&
anon_size > arc_c / 4) {
#ifdef ZFS_DEBUG
uint64_t meta_esize =
refcount_count(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
uint64_t data_esize =
refcount_count(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
dprintf("failing, arc_tempreserve=%lluK anon_meta=%lluK "
"anon_data=%lluK tempreserve=%lluK arc_c=%lluK\n",
arc_tempreserve >> 10, meta_esize >> 10,
data_esize >> 10, reserve >> 10, arc_c >> 10);
#endif
DMU_TX_STAT_BUMP(dmu_tx_dirty_throttle);
return (SET_ERROR(ERESTART));
}
atomic_add_64(&arc_tempreserve, reserve);
return (0);
}
static void
arc_kstat_update_state(arc_state_t *state, kstat_named_t *size,
kstat_named_t *evict_data, kstat_named_t *evict_metadata)
{
size->value.ui64 = refcount_count(&state->arcs_size);
evict_data->value.ui64 =
refcount_count(&state->arcs_esize[ARC_BUFC_DATA]);
evict_metadata->value.ui64 =
refcount_count(&state->arcs_esize[ARC_BUFC_METADATA]);
}
static int
arc_kstat_update(kstat_t *ksp, int rw)
{
arc_stats_t *as = ksp->ks_data;
if (rw == KSTAT_WRITE) {
return (SET_ERROR(EACCES));
} else {
arc_kstat_update_state(arc_anon,
&as->arcstat_anon_size,
&as->arcstat_anon_evictable_data,
&as->arcstat_anon_evictable_metadata);
arc_kstat_update_state(arc_mru,
&as->arcstat_mru_size,
&as->arcstat_mru_evictable_data,
&as->arcstat_mru_evictable_metadata);
arc_kstat_update_state(arc_mru_ghost,
&as->arcstat_mru_ghost_size,
&as->arcstat_mru_ghost_evictable_data,
&as->arcstat_mru_ghost_evictable_metadata);
arc_kstat_update_state(arc_mfu,
&as->arcstat_mfu_size,
&as->arcstat_mfu_evictable_data,
&as->arcstat_mfu_evictable_metadata);
arc_kstat_update_state(arc_mfu_ghost,
&as->arcstat_mfu_ghost_size,
&as->arcstat_mfu_ghost_evictable_data,
&as->arcstat_mfu_ghost_evictable_metadata);
as->arcstat_memory_all_bytes.value.ui64 =
arc_all_memory();
as->arcstat_memory_free_bytes.value.ui64 =
arc_free_memory();
as->arcstat_memory_available_bytes.value.i64 =
arc_available_memory();
}
return (0);
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the ARC eviction
* code is laid out; arc_evict_state() assumes ARC buffers are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
unsigned int
arc_state_multilist_index_func(multilist_t *ml, void *obj)
{
arc_buf_hdr_t *hdr = obj;
/*
* We rely on b_dva to generate evenly distributed index
* numbers using buf_hash below. So, as an added precaution,
* let's make sure we never add empty buffers to the arc lists.
*/
ASSERT(!HDR_EMPTY(hdr));
/*
* The assumption here, is the hash value for a given
* arc_buf_hdr_t will remain constant throughout its lifetime
* (i.e. its b_spa, b_dva, and b_birth fields don't change).
* Thus, we don't need to store the header's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed.
*/
return (buf_hash(hdr->b_spa, &hdr->b_dva, hdr->b_birth) %
multilist_get_num_sublists(ml));
}
/*
* Called during module initialization and periodically thereafter to
* apply reasonable changes to the exposed performance tunings. Non-zero
* zfs_* values which differ from the currently set values will be applied.
*/
static void
arc_tuning_update(void)
{
uint64_t allmem = arc_all_memory();
unsigned long limit;
/* Valid range: 64M - <all physical memory> */
if ((zfs_arc_max) && (zfs_arc_max != arc_c_max) &&
(zfs_arc_max >= 64 << 20) && (zfs_arc_max < allmem) &&
(zfs_arc_max > arc_c_min)) {
arc_c_max = zfs_arc_max;
arc_c = arc_c_max;
arc_p = (arc_c >> 1);
if (arc_meta_limit > arc_c_max)
arc_meta_limit = arc_c_max;
if (arc_dnode_limit > arc_meta_limit)
arc_dnode_limit = arc_meta_limit;
}
/* Valid range: 32M - <arc_c_max> */
if ((zfs_arc_min) && (zfs_arc_min != arc_c_min) &&
(zfs_arc_min >= 2ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_min <= arc_c_max)) {
arc_c_min = zfs_arc_min;
arc_c = MAX(arc_c, arc_c_min);
}
/* Valid range: 16M - <arc_c_max> */
if ((zfs_arc_meta_min) && (zfs_arc_meta_min != arc_meta_min) &&
(zfs_arc_meta_min >= 1ULL << SPA_MAXBLOCKSHIFT) &&
(zfs_arc_meta_min <= arc_c_max)) {
arc_meta_min = zfs_arc_meta_min;
if (arc_meta_limit < arc_meta_min)
arc_meta_limit = arc_meta_min;
if (arc_dnode_limit < arc_meta_min)
arc_dnode_limit = arc_meta_min;
}
/* Valid range: <arc_meta_min> - <arc_c_max> */
limit = zfs_arc_meta_limit ? zfs_arc_meta_limit :
MIN(zfs_arc_meta_limit_percent, 100) * arc_c_max / 100;
if ((limit != arc_meta_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_c_max))
arc_meta_limit = limit;
/* Valid range: <arc_meta_min> - <arc_meta_limit> */
limit = zfs_arc_dnode_limit ? zfs_arc_dnode_limit :
MIN(zfs_arc_dnode_limit_percent, 100) * arc_meta_limit / 100;
if ((limit != arc_dnode_limit) &&
(limit >= arc_meta_min) &&
(limit <= arc_meta_limit))
arc_dnode_limit = limit;
/* Valid range: 1 - N */
if (zfs_arc_grow_retry)
arc_grow_retry = zfs_arc_grow_retry;
/* Valid range: 1 - N */
if (zfs_arc_shrink_shift) {
arc_shrink_shift = zfs_arc_shrink_shift;
arc_no_grow_shift = MIN(arc_no_grow_shift, arc_shrink_shift -1);
}
/* Valid range: 1 - N */
if (zfs_arc_p_min_shift)
arc_p_min_shift = zfs_arc_p_min_shift;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prefetch_ms)
arc_min_prefetch_ms = zfs_arc_min_prefetch_ms;
/* Valid range: 1 - N ms */
if (zfs_arc_min_prescient_prefetch_ms) {
arc_min_prescient_prefetch_ms =
zfs_arc_min_prescient_prefetch_ms;
}
/* Valid range: 0 - 100 */
if ((zfs_arc_lotsfree_percent >= 0) &&
(zfs_arc_lotsfree_percent <= 100))
arc_lotsfree_percent = zfs_arc_lotsfree_percent;
/* Valid range: 0 - <all physical memory> */
if ((zfs_arc_sys_free) && (zfs_arc_sys_free != arc_sys_free))
arc_sys_free = MIN(MAX(zfs_arc_sys_free, 0), allmem);
}
static void
arc_state_init(void)
{
arc_anon = &ARC_anon;
arc_mru = &ARC_mru;
arc_mru_ghost = &ARC_mru_ghost;
arc_mfu = &ARC_mfu;
arc_mfu_ghost = &ARC_mfu_ghost;
arc_l2c_only = &ARC_l2c_only;
arc_mru->arcs_list[ARC_BUFC_METADATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_mru->arcs_list[ARC_BUFC_DATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_mru_ghost->arcs_list[ARC_BUFC_METADATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_mru_ghost->arcs_list[ARC_BUFC_DATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_mfu->arcs_list[ARC_BUFC_METADATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_mfu->arcs_list[ARC_BUFC_DATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_mfu_ghost->arcs_list[ARC_BUFC_DATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_l2c_only->arcs_list[ARC_BUFC_METADATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
arc_l2c_only->arcs_list[ARC_BUFC_DATA] =
multilist_create(sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l1hdr.b_arc_node),
arc_state_multilist_index_func);
refcount_create(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
refcount_create(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
refcount_create(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
refcount_create(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
refcount_create(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
refcount_create(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
refcount_create(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
refcount_create(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
refcount_create(&arc_anon->arcs_size);
refcount_create(&arc_mru->arcs_size);
refcount_create(&arc_mru_ghost->arcs_size);
refcount_create(&arc_mfu->arcs_size);
refcount_create(&arc_mfu_ghost->arcs_size);
refcount_create(&arc_l2c_only->arcs_size);
arc_anon->arcs_state = ARC_STATE_ANON;
arc_mru->arcs_state = ARC_STATE_MRU;
arc_mru_ghost->arcs_state = ARC_STATE_MRU_GHOST;
arc_mfu->arcs_state = ARC_STATE_MFU;
arc_mfu_ghost->arcs_state = ARC_STATE_MFU_GHOST;
arc_l2c_only->arcs_state = ARC_STATE_L2C_ONLY;
}
static void
arc_state_fini(void)
{
refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_METADATA]);
refcount_destroy(&arc_anon->arcs_esize[ARC_BUFC_DATA]);
refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_METADATA]);
refcount_destroy(&arc_mru->arcs_esize[ARC_BUFC_DATA]);
refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_METADATA]);
refcount_destroy(&arc_mru_ghost->arcs_esize[ARC_BUFC_DATA]);
refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_METADATA]);
refcount_destroy(&arc_mfu->arcs_esize[ARC_BUFC_DATA]);
refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_METADATA]);
refcount_destroy(&arc_mfu_ghost->arcs_esize[ARC_BUFC_DATA]);
refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_METADATA]);
refcount_destroy(&arc_l2c_only->arcs_esize[ARC_BUFC_DATA]);
refcount_destroy(&arc_anon->arcs_size);
refcount_destroy(&arc_mru->arcs_size);
refcount_destroy(&arc_mru_ghost->arcs_size);
refcount_destroy(&arc_mfu->arcs_size);
refcount_destroy(&arc_mfu_ghost->arcs_size);
refcount_destroy(&arc_l2c_only->arcs_size);
multilist_destroy(arc_mru->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(arc_mru->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(arc_mru_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(arc_mfu->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(arc_mfu_ghost->arcs_list[ARC_BUFC_DATA]);
multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_METADATA]);
multilist_destroy(arc_l2c_only->arcs_list[ARC_BUFC_DATA]);
}
uint64_t
arc_target_bytes(void)
{
return (arc_c);
}
void
arc_init(void)
{
uint64_t percent, allmem = arc_all_memory();
mutex_init(&arc_reclaim_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&arc_reclaim_thread_cv, NULL, CV_DEFAULT, NULL);
cv_init(&arc_reclaim_waiters_cv, NULL, CV_DEFAULT, NULL);
arc_min_prefetch_ms = 1000;
arc_min_prescient_prefetch_ms = 6000;
#ifdef _KERNEL
/*
* Register a shrinker to support synchronous (direct) memory
* reclaim from the arc. This is done to prevent kswapd from
* swapping out pages when it is preferable to shrink the arc.
*/
spl_register_shrinker(&arc_shrinker);
/* Set to 1/64 of all memory or a minimum of 512K */
arc_sys_free = MAX(allmem / 64, (512 * 1024));
arc_need_free = 0;
#endif
/* Set max to 1/2 of all memory */
arc_c_max = allmem / 2;
#ifdef _KERNEL
/* Set min cache to 1/32 of all memory, or 32MB, whichever is more */
arc_c_min = MAX(allmem / 32, 2ULL << SPA_MAXBLOCKSHIFT);
#else
/*
* In userland, there's only the memory pressure that we artificially
* create (see arc_available_memory()). Don't let arc_c get too
* small, because it can cause transactions to be larger than
* arc_c, causing arc_tempreserve_space() to fail.
*/
arc_c_min = MAX(arc_c_max / 2, 2ULL << SPA_MAXBLOCKSHIFT);
#endif
arc_c = arc_c_max;
arc_p = (arc_c >> 1);
arc_size = 0;
/* Set min to 1/2 of arc_c_min */
arc_meta_min = 1ULL << SPA_MAXBLOCKSHIFT;
/* Initialize maximum observed usage to zero */
arc_meta_max = 0;
/*
* Set arc_meta_limit to a percent of arc_c_max with a floor of
* arc_meta_min, and a ceiling of arc_c_max.
*/
percent = MIN(zfs_arc_meta_limit_percent, 100);
arc_meta_limit = MAX(arc_meta_min, (percent * arc_c_max) / 100);
percent = MIN(zfs_arc_dnode_limit_percent, 100);
arc_dnode_limit = (percent * arc_meta_limit) / 100;
/* Apply user specified tunings */
arc_tuning_update();
/* if kmem_flags are set, lets try to use less memory */
if (kmem_debugging())
arc_c = arc_c / 2;
if (arc_c < arc_c_min)
arc_c = arc_c_min;
arc_state_init();
buf_init();
list_create(&arc_prune_list, sizeof (arc_prune_t),
offsetof(arc_prune_t, p_node));
mutex_init(&arc_prune_mtx, NULL, MUTEX_DEFAULT, NULL);
arc_prune_taskq = taskq_create("arc_prune", max_ncpus, defclsyspri,
max_ncpus, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
arc_reclaim_thread_exit = B_FALSE;
arc_ksp = kstat_create("zfs", 0, "arcstats", "misc", KSTAT_TYPE_NAMED,
sizeof (arc_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (arc_ksp != NULL) {
arc_ksp->ks_data = &arc_stats;
arc_ksp->ks_update = arc_kstat_update;
kstat_install(arc_ksp);
}
(void) thread_create(NULL, 0, arc_reclaim_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
arc_dead = B_FALSE;
arc_warm = B_FALSE;
/*
* Calculate maximum amount of dirty data per pool.
*
* If it has been set by a module parameter, take that.
* Otherwise, use a percentage of physical memory defined by
* zfs_dirty_data_max_percent (default 10%) with a cap at
* zfs_dirty_data_max_max (default 4G or 25% of physical memory).
*/
if (zfs_dirty_data_max_max == 0)
zfs_dirty_data_max_max = MIN(4ULL * 1024 * 1024 * 1024,
allmem * zfs_dirty_data_max_max_percent / 100);
if (zfs_dirty_data_max == 0) {
zfs_dirty_data_max = allmem *
zfs_dirty_data_max_percent / 100;
zfs_dirty_data_max = MIN(zfs_dirty_data_max,
zfs_dirty_data_max_max);
}
}
void
arc_fini(void)
{
arc_prune_t *p;
#ifdef _KERNEL
spl_unregister_shrinker(&arc_shrinker);
#endif /* _KERNEL */
mutex_enter(&arc_reclaim_lock);
arc_reclaim_thread_exit = B_TRUE;
/*
* The reclaim thread will set arc_reclaim_thread_exit back to
* B_FALSE when it is finished exiting; we're waiting for that.
*/
while (arc_reclaim_thread_exit) {
cv_signal(&arc_reclaim_thread_cv);
cv_wait(&arc_reclaim_thread_cv, &arc_reclaim_lock);
}
mutex_exit(&arc_reclaim_lock);
/* Use B_TRUE to ensure *all* buffers are evicted */
arc_flush(NULL, B_TRUE);
arc_dead = B_TRUE;
if (arc_ksp != NULL) {
kstat_delete(arc_ksp);
arc_ksp = NULL;
}
taskq_wait(arc_prune_taskq);
taskq_destroy(arc_prune_taskq);
mutex_enter(&arc_prune_mtx);
while ((p = list_head(&arc_prune_list)) != NULL) {
list_remove(&arc_prune_list, p);
refcount_remove(&p->p_refcnt, &arc_prune_list);
refcount_destroy(&p->p_refcnt);
kmem_free(p, sizeof (*p));
}
mutex_exit(&arc_prune_mtx);
list_destroy(&arc_prune_list);
mutex_destroy(&arc_prune_mtx);
mutex_destroy(&arc_reclaim_lock);
cv_destroy(&arc_reclaim_thread_cv);
cv_destroy(&arc_reclaim_waiters_cv);
arc_state_fini();
buf_fini();
ASSERT0(arc_loaned_bytes);
}
/*
* Level 2 ARC
*
* The level 2 ARC (L2ARC) is a cache layer in-between main memory and disk.
* It uses dedicated storage devices to hold cached data, which are populated
* using large infrequent writes. The main role of this cache is to boost
* the performance of random read workloads. The intended L2ARC devices
* include short-stroked disks, solid state disks, and other media with
* substantially faster read latency than disk.
*
* +-----------------------+
* | ARC |
* +-----------------------+
* | ^ ^
* | | |
* l2arc_feed_thread() arc_read()
* | | |
* | l2arc read |
* V | |
* +---------------+ |
* | L2ARC | |
* +---------------+ |
* | ^ |
* l2arc_write() | |
* | | |
* V | |
* +-------+ +-------+
* | vdev | | vdev |
* | cache | | cache |
* +-------+ +-------+
* +=========+ .-----.
* : L2ARC : |-_____-|
* : devices : | Disks |
* +=========+ `-_____-'
*
* Read requests are satisfied from the following sources, in order:
*
* 1) ARC
* 2) vdev cache of L2ARC devices
* 3) L2ARC devices
* 4) vdev cache of disks
* 5) disks
*
* Some L2ARC device types exhibit extremely slow write performance.
* To accommodate for this there are some significant differences between
* the L2ARC and traditional cache design:
*
* 1. There is no eviction path from the ARC to the L2ARC. Evictions from
* the ARC behave as usual, freeing buffers and placing headers on ghost
* lists. The ARC does not send buffers to the L2ARC during eviction as
* this would add inflated write latencies for all ARC memory pressure.
*
* 2. The L2ARC attempts to cache data from the ARC before it is evicted.
* It does this by periodically scanning buffers from the eviction-end of
* the MFU and MRU ARC lists, copying them to the L2ARC devices if they are
* not already there. It scans until a headroom of buffers is satisfied,
* which itself is a buffer for ARC eviction. If a compressible buffer is
* found during scanning and selected for writing to an L2ARC device, we
* temporarily boost scanning headroom during the next scan cycle to make
* sure we adapt to compression effects (which might significantly reduce
* the data volume we write to L2ARC). The thread that does this is
* l2arc_feed_thread(), illustrated below; example sizes are included to
* provide a better sense of ratio than this diagram:
*
* head --> tail
* +---------------------+----------+
* ARC_mfu |:::::#:::::::::::::::|o#o###o###|-->. # already on L2ARC
* +---------------------+----------+ | o L2ARC eligible
* ARC_mru |:#:::::::::::::::::::|#o#ooo####|-->| : ARC buffer
* +---------------------+----------+ |
* 15.9 Gbytes ^ 32 Mbytes |
* headroom |
* l2arc_feed_thread()
* |
* l2arc write hand <--[oooo]--'
* | 8 Mbyte
* | write max
* V
* +==============================+
* L2ARC dev |####|#|###|###| |####| ... |
* +==============================+
* 32 Gbytes
*
* 3. If an ARC buffer is copied to the L2ARC but then hit instead of
* evicted, then the L2ARC has cached a buffer much sooner than it probably
* needed to, potentially wasting L2ARC device bandwidth and storage. It is
* safe to say that this is an uncommon case, since buffers at the end of
* the ARC lists have moved there due to inactivity.
*
* 4. If the ARC evicts faster than the L2ARC can maintain a headroom,
* then the L2ARC simply misses copying some buffers. This serves as a
* pressure valve to prevent heavy read workloads from both stalling the ARC
* with waits and clogging the L2ARC with writes. This also helps prevent
* the potential for the L2ARC to churn if it attempts to cache content too
* quickly, such as during backups of the entire pool.
*
* 5. After system boot and before the ARC has filled main memory, there are
* no evictions from the ARC and so the tails of the ARC_mfu and ARC_mru
* lists can remain mostly static. Instead of searching from tail of these
* lists as pictured, the l2arc_feed_thread() will search from the list heads
* for eligible buffers, greatly increasing its chance of finding them.
*
* The L2ARC device write speed is also boosted during this time so that
* the L2ARC warms up faster. Since there have been no ARC evictions yet,
* there are no L2ARC reads, and no fear of degrading read performance
* through increased writes.
*
* 6. Writes to the L2ARC devices are grouped and sent in-sequence, so that
* the vdev queue can aggregate them into larger and fewer writes. Each
* device is written to in a rotor fashion, sweeping writes through
* available space then repeating.
*
* 7. The L2ARC does not store dirty content. It never needs to flush
* write buffers back to disk based storage.
*
* 8. If an ARC buffer is written (and dirtied) which also exists in the
* L2ARC, the now stale L2ARC buffer is immediately dropped.
*
* The performance of the L2ARC can be tweaked by a number of tunables, which
* may be necessary for different workloads:
*
* l2arc_write_max max write bytes per interval
* l2arc_write_boost extra write bytes during device warmup
* l2arc_noprefetch skip caching prefetched buffers
* l2arc_headroom number of max device writes to precache
* l2arc_headroom_boost when we find compressed buffers during ARC
* scanning, we multiply headroom by this
* percentage factor for the next scan cycle,
* since more compressed buffers are likely to
* be present
* l2arc_feed_secs seconds between L2ARC writing
*
* Tunables may be removed or added as future performance improvements are
* integrated, and also may become zpool properties.
*
* There are three key functions that control how the L2ARC warms up:
*
* l2arc_write_eligible() check if a buffer is eligible to cache
* l2arc_write_size() calculate how much to write
* l2arc_write_interval() calculate sleep delay between writes
*
* These three functions determine what to write, how much, and how quickly
* to send writes.
*/
static boolean_t
l2arc_write_eligible(uint64_t spa_guid, arc_buf_hdr_t *hdr)
{
/*
* A buffer is *not* eligible for the L2ARC if it:
* 1. belongs to a different spa.
* 2. is already cached on the L2ARC.
* 3. has an I/O in progress (it may be an incomplete read).
* 4. is flagged not eligible (zfs property).
*/
if (hdr->b_spa != spa_guid || HDR_HAS_L2HDR(hdr) ||
HDR_IO_IN_PROGRESS(hdr) || !HDR_L2CACHE(hdr))
return (B_FALSE);
return (B_TRUE);
}
static uint64_t
l2arc_write_size(void)
{
uint64_t size;
/*
* Make sure our globals have meaningful values in case the user
* altered them.
*/
size = l2arc_write_max;
if (size == 0) {
cmn_err(CE_NOTE, "Bad value for l2arc_write_max, value must "
"be greater than zero, resetting it to the default (%d)",
L2ARC_WRITE_SIZE);
size = l2arc_write_max = L2ARC_WRITE_SIZE;
}
if (arc_warm == B_FALSE)
size += l2arc_write_boost;
return (size);
}
static clock_t
l2arc_write_interval(clock_t began, uint64_t wanted, uint64_t wrote)
{
clock_t interval, next, now;
/*
* If the ARC lists are busy, increase our write rate; if the
* lists are stale, idle back. This is achieved by checking
* how much we previously wrote - if it was more than half of
* what we wanted, schedule the next write much sooner.
*/
if (l2arc_feed_again && wrote > (wanted / 2))
interval = (hz * l2arc_feed_min_ms) / 1000;
else
interval = hz * l2arc_feed_secs;
now = ddi_get_lbolt();
next = MAX(now, MIN(now + interval, began + interval));
return (next);
}
/*
* Cycle through L2ARC devices. This is how L2ARC load balances.
* If a device is returned, this also returns holding the spa config lock.
*/
static l2arc_dev_t *
l2arc_dev_get_next(void)
{
l2arc_dev_t *first, *next = NULL;
/*
* Lock out the removal of spas (spa_namespace_lock), then removal
* of cache devices (l2arc_dev_mtx). Once a device has been selected,
* both locks will be dropped and a spa config lock held instead.
*/
mutex_enter(&spa_namespace_lock);
mutex_enter(&l2arc_dev_mtx);
/* if there are no vdevs, there is nothing to do */
if (l2arc_ndev == 0)
goto out;
first = NULL;
next = l2arc_dev_last;
do {
/* loop around the list looking for a non-faulted vdev */
if (next == NULL) {
next = list_head(l2arc_dev_list);
} else {
next = list_next(l2arc_dev_list, next);
if (next == NULL)
next = list_head(l2arc_dev_list);
}
/* if we have come back to the start, bail out */
if (first == NULL)
first = next;
else if (next == first)
break;
} while (vdev_is_dead(next->l2ad_vdev));
/* if we were unable to find any usable vdevs, return NULL */
if (vdev_is_dead(next->l2ad_vdev))
next = NULL;
l2arc_dev_last = next;
out:
mutex_exit(&l2arc_dev_mtx);
/*
* Grab the config lock to prevent the 'next' device from being
* removed while we are writing to it.
*/
if (next != NULL)
spa_config_enter(next->l2ad_spa, SCL_L2ARC, next, RW_READER);
mutex_exit(&spa_namespace_lock);
return (next);
}
/*
* Free buffers that were tagged for destruction.
*/
static void
l2arc_do_free_on_write(void)
{
list_t *buflist;
l2arc_data_free_t *df, *df_prev;
mutex_enter(&l2arc_free_on_write_mtx);
buflist = l2arc_free_on_write;
for (df = list_tail(buflist); df; df = df_prev) {
df_prev = list_prev(buflist, df);
ASSERT3P(df->l2df_abd, !=, NULL);
abd_free(df->l2df_abd);
list_remove(buflist, df);
kmem_free(df, sizeof (l2arc_data_free_t));
}
mutex_exit(&l2arc_free_on_write_mtx);
}
/*
* A write to a cache device has completed. Update all headers to allow
* reads from these buffers to begin.
*/
static void
l2arc_write_done(zio_t *zio)
{
l2arc_write_callback_t *cb;
l2arc_dev_t *dev;
list_t *buflist;
arc_buf_hdr_t *head, *hdr, *hdr_prev;
kmutex_t *hash_lock;
int64_t bytes_dropped = 0;
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
dev = cb->l2wcb_dev;
ASSERT3P(dev, !=, NULL);
head = cb->l2wcb_head;
ASSERT3P(head, !=, NULL);
buflist = &dev->l2ad_buflist;
ASSERT3P(buflist, !=, NULL);
DTRACE_PROBE2(l2arc__iodone, zio_t *, zio,
l2arc_write_callback_t *, cb);
if (zio->io_error != 0)
ARCSTAT_BUMP(arcstat_l2_writes_error);
/*
* All writes completed, or an error was hit.
*/
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_prev(buflist, head); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. We must retry so we
* don't leave the ARC_FLAG_L2_WRITING bit set.
*/
ARCSTAT_BUMP(arcstat_l2_writes_lock_retry);
/*
* We don't want to rescan the headers we've
* already marked as having been written out, so
* we reinsert the head node so we can pick up
* where we left off.
*/
list_remove(buflist, head);
list_insert_after(buflist, hdr, head);
mutex_exit(&dev->l2ad_mtx);
/*
* We wait for the hash lock to become available
* to try and prevent busy waiting, and increase
* the chance we'll be able to acquire the lock
* the next time around.
*/
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* We could not have been moved into the arc_l2c_only
* state while in-flight due to our ARC_FLAG_L2_WRITING
* bit being set. Let's just ensure that's being enforced.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
/*
* Skipped - drop L2ARC entry and mark the header as no
* longer L2 eligibile.
*/
if (zio->io_error != 0) {
/*
* Error - drop L2ARC entry.
*/
list_remove(buflist, hdr);
arc_hdr_clear_flags(hdr, ARC_FLAG_HAS_L2HDR);
ARCSTAT_INCR(arcstat_l2_psize, -arc_hdr_size(hdr));
ARCSTAT_INCR(arcstat_l2_lsize, -HDR_GET_LSIZE(hdr));
bytes_dropped += arc_hdr_size(hdr);
(void) refcount_remove_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
}
/*
* Allow ARC to begin reads and ghost list evictions to
* this L2ARC entry.
*/
arc_hdr_clear_flags(hdr, ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
}
atomic_inc_64(&l2arc_writes_done);
list_remove(buflist, head);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
mutex_exit(&dev->l2ad_mtx);
vdev_space_update(dev->l2ad_vdev, -bytes_dropped, 0, 0);
l2arc_do_free_on_write();
kmem_free(cb, sizeof (l2arc_write_callback_t));
}
static int
l2arc_untransform(zio_t *zio, l2arc_read_callback_t *cb)
{
int ret;
spa_t *spa = zio->io_spa;
arc_buf_hdr_t *hdr = cb->l2rcb_hdr;
blkptr_t *bp = zio->io_bp;
dsl_crypto_key_t *dck = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/*
* ZIL data is never be written to the L2ARC, so we don't need
* special handling for its unique MAC storage.
*/
ASSERT3U(BP_GET_TYPE(bp), !=, DMU_OT_INTENT_LOG);
ASSERT(MUTEX_HELD(HDR_LOCK(hdr)));
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
/*
* If the data was encrypted, decrypt it now. Note that
* we must check the bp here and not the hdr, since the
* hdr does not have its encryption parameters updated
* until arc_read_done().
*/
if (BP_IS_ENCRYPTED(bp)) {
abd_t *eabd = arc_get_data_abd(hdr,
arc_hdr_size(hdr), hdr);
zio_crypt_decode_params_bp(bp, salt, iv);
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_keystore_lookup_key(spa,
cb->l2rcb_zb.zb_objset, FTAG, &dck);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
goto error;
}
ret = zio_do_crypt_abd(B_FALSE, &dck->dck_key,
salt, BP_GET_TYPE(bp), iv, mac, HDR_GET_PSIZE(hdr),
BP_SHOULD_BYTESWAP(bp), eabd, hdr->b_l1hdr.b_pabd,
&no_crypt);
if (ret != 0) {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
spa_keystore_dsl_key_rele(spa, dck, FTAG);
goto error;
}
spa_keystore_dsl_key_rele(spa, dck, FTAG);
/*
* If we actually performed decryption, replace b_pabd
* with the decrypted data. Otherwise we can just throw
* our decryption buffer away.
*/
if (!no_crypt) {
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = eabd;
zio->io_abd = eabd;
} else {
arc_free_data_abd(hdr, eabd, arc_hdr_size(hdr), hdr);
}
}
/*
* If the L2ARC block was compressed, but ARC compression
* is disabled we decompress the data into a new buffer and
* replace the existing data.
*/
if (HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) {
abd_t *cabd = arc_get_data_abd(hdr, arc_hdr_size(hdr), hdr);
void *tmp = abd_borrow_buf(cabd, arc_hdr_size(hdr));
ret = zio_decompress_data(HDR_GET_COMPRESS(hdr),
hdr->b_l1hdr.b_pabd, tmp, HDR_GET_PSIZE(hdr),
HDR_GET_LSIZE(hdr));
if (ret != 0) {
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, cabd, arc_hdr_size(hdr), hdr);
goto error;
}
abd_return_buf_copy(cabd, tmp, arc_hdr_size(hdr));
arc_free_data_abd(hdr, hdr->b_l1hdr.b_pabd,
arc_hdr_size(hdr), hdr);
hdr->b_l1hdr.b_pabd = cabd;
zio->io_abd = cabd;
zio->io_size = HDR_GET_LSIZE(hdr);
}
return (0);
error:
return (ret);
}
/*
* A read to a cache device completed. Validate buffer contents before
* handing over to the regular ARC routines.
*/
static void
l2arc_read_done(zio_t *zio)
{
int tfm_error = 0;
l2arc_read_callback_t *cb;
arc_buf_hdr_t *hdr;
kmutex_t *hash_lock;
boolean_t valid_cksum, using_rdata;
ASSERT3P(zio->io_vd, !=, NULL);
ASSERT(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE);
spa_config_exit(zio->io_spa, SCL_L2ARC, zio->io_vd);
cb = zio->io_private;
ASSERT3P(cb, !=, NULL);
hdr = cb->l2rcb_hdr;
ASSERT3P(hdr, !=, NULL);
hash_lock = HDR_LOCK(hdr);
mutex_enter(hash_lock);
ASSERT3P(hash_lock, ==, HDR_LOCK(hdr));
/*
* If the data was read into a temporary buffer,
* move it and free the buffer.
*/
if (cb->l2rcb_abd != NULL) {
ASSERT3U(arc_hdr_size(hdr), <, zio->io_size);
if (zio->io_error == 0) {
abd_copy(hdr->b_l1hdr.b_pabd, cb->l2rcb_abd,
arc_hdr_size(hdr));
}
/*
* The following must be done regardless of whether
* there was an error:
* - free the temporary buffer
* - point zio to the real ARC buffer
* - set zio size accordingly
* These are required because zio is either re-used for
* an I/O of the block in the case of the error
* or the zio is passed to arc_read_done() and it
* needs real data.
*/
abd_free(cb->l2rcb_abd);
zio->io_size = zio->io_orig_size = arc_hdr_size(hdr);
if (BP_IS_ENCRYPTED(&cb->l2rcb_bp) &&
(cb->l2rcb_flags & ZIO_FLAG_RAW_ENCRYPT)) {
ASSERT(HDR_HAS_RABD(hdr));
zio->io_abd = zio->io_orig_abd =
hdr->b_crypt_hdr.b_rabd;
} else {
ASSERT3P(hdr->b_l1hdr.b_pabd, !=, NULL);
zio->io_abd = zio->io_orig_abd = hdr->b_l1hdr.b_pabd;
}
}
ASSERT3P(zio->io_abd, !=, NULL);
/*
* Check this survived the L2ARC journey.
*/
ASSERT(zio->io_abd == hdr->b_l1hdr.b_pabd ||
(HDR_HAS_RABD(hdr) && zio->io_abd == hdr->b_crypt_hdr.b_rabd));
zio->io_bp_copy = cb->l2rcb_bp; /* XXX fix in L2ARC 2.0 */
zio->io_bp = &zio->io_bp_copy; /* XXX fix in L2ARC 2.0 */
valid_cksum = arc_cksum_is_equal(hdr, zio);
using_rdata = (HDR_HAS_RABD(hdr) &&
zio->io_abd == hdr->b_crypt_hdr.b_rabd);
/*
* b_rabd will always match the data as it exists on disk if it is
* being used. Therefore if we are reading into b_rabd we do not
* attempt to untransform the data.
*/
if (valid_cksum && !using_rdata)
tfm_error = l2arc_untransform(zio, cb);
if (valid_cksum && tfm_error == 0 && zio->io_error == 0 &&
!HDR_L2_EVICTED(hdr)) {
mutex_exit(hash_lock);
zio->io_private = hdr;
arc_read_done(zio);
} else {
mutex_exit(hash_lock);
/*
* Buffer didn't survive caching. Increment stats and
* reissue to the original storage device.
*/
if (zio->io_error != 0) {
ARCSTAT_BUMP(arcstat_l2_io_error);
} else {
zio->io_error = SET_ERROR(EIO);
}
if (!valid_cksum || tfm_error != 0)
ARCSTAT_BUMP(arcstat_l2_cksum_bad);
/*
* If there's no waiter, issue an async i/o to the primary
* storage now. If there *is* a waiter, the caller must
* issue the i/o in a context where it's OK to block.
*/
if (zio->io_waiter == NULL) {
zio_t *pio = zio_unique_parent(zio);
void *abd = (using_rdata) ?
hdr->b_crypt_hdr.b_rabd : hdr->b_l1hdr.b_pabd;
ASSERT(!pio || pio->io_child_type == ZIO_CHILD_LOGICAL);
zio_nowait(zio_read(pio, zio->io_spa, zio->io_bp,
abd, zio->io_size, arc_read_done,
hdr, zio->io_priority, cb->l2rcb_flags,
&cb->l2rcb_zb));
}
}
kmem_free(cb, sizeof (l2arc_read_callback_t));
}
/*
* This is the list priority from which the L2ARC will search for pages to
* cache. This is used within loops (0..3) to cycle through lists in the
* desired order. This order can have a significant effect on cache
* performance.
*
* Currently the metadata lists are hit first, MFU then MRU, followed by
* the data lists. This function returns a locked list, and also returns
* the lock pointer.
*/
static multilist_sublist_t *
l2arc_sublist_lock(int list_num)
{
multilist_t *ml = NULL;
unsigned int idx;
ASSERT(list_num >= 0 && list_num < L2ARC_FEED_TYPES);
switch (list_num) {
case 0:
ml = arc_mfu->arcs_list[ARC_BUFC_METADATA];
break;
case 1:
ml = arc_mru->arcs_list[ARC_BUFC_METADATA];
break;
case 2:
ml = arc_mfu->arcs_list[ARC_BUFC_DATA];
break;
case 3:
ml = arc_mru->arcs_list[ARC_BUFC_DATA];
break;
default:
return (NULL);
}
/*
* Return a randomly-selected sublist. This is acceptable
* because the caller feeds only a little bit of data for each
* call (8MB). Subsequent calls will result in different
* sublists being selected.
*/
idx = multilist_get_random_index(ml);
return (multilist_sublist_lock(ml, idx));
}
/*
* Evict buffers from the device write hand to the distance specified in
* bytes. This distance may span populated buffers, it may span nothing.
* This is clearing a region on the L2ARC device ready for writing.
* If the 'all' boolean is set, every buffer is evicted.
*/
static void
l2arc_evict(l2arc_dev_t *dev, uint64_t distance, boolean_t all)
{
list_t *buflist;
arc_buf_hdr_t *hdr, *hdr_prev;
kmutex_t *hash_lock;
uint64_t taddr;
buflist = &dev->l2ad_buflist;
if (!all && dev->l2ad_first) {
/*
* This is the first sweep through the device. There is
* nothing to evict.
*/
return;
}
if (dev->l2ad_hand >= (dev->l2ad_end - (2 * distance))) {
/*
* When nearing the end of the device, evict to the end
* before the device write hand jumps to the start.
*/
taddr = dev->l2ad_end;
} else {
taddr = dev->l2ad_hand + distance;
}
DTRACE_PROBE4(l2arc__evict, l2arc_dev_t *, dev, list_t *, buflist,
uint64_t, taddr, boolean_t, all);
top:
mutex_enter(&dev->l2ad_mtx);
for (hdr = list_tail(buflist); hdr; hdr = hdr_prev) {
hdr_prev = list_prev(buflist, hdr);
hash_lock = HDR_LOCK(hdr);
/*
* We cannot use mutex_enter or else we can deadlock
* with l2arc_write_buffers (due to swapping the order
* the hash lock and l2ad_mtx are taken).
*/
if (!mutex_tryenter(hash_lock)) {
/*
* Missed the hash lock. Retry.
*/
ARCSTAT_BUMP(arcstat_l2_evict_lock_retry);
mutex_exit(&dev->l2ad_mtx);
mutex_enter(hash_lock);
mutex_exit(hash_lock);
goto top;
}
/*
* A header can't be on this list if it doesn't have L2 header.
*/
ASSERT(HDR_HAS_L2HDR(hdr));
/* Ensure this header has finished being written. */
ASSERT(!HDR_L2_WRITING(hdr));
ASSERT(!HDR_L2_WRITE_HEAD(hdr));
if (!all && (hdr->b_l2hdr.b_daddr >= taddr ||
hdr->b_l2hdr.b_daddr < dev->l2ad_hand)) {
/*
* We've evicted to the target address,
* or the end of the device.
*/
mutex_exit(hash_lock);
break;
}
if (!HDR_HAS_L1HDR(hdr)) {
ASSERT(!HDR_L2_READING(hdr));
/*
* This doesn't exist in the ARC. Destroy.
* arc_hdr_destroy() will call list_remove()
* and decrement arcstat_l2_lsize.
*/
arc_change_state(arc_anon, hdr, hash_lock);
arc_hdr_destroy(hdr);
} else {
ASSERT(hdr->b_l1hdr.b_state != arc_l2c_only);
ARCSTAT_BUMP(arcstat_l2_evict_l1cached);
/*
* Invalidate issued or about to be issued
* reads, since we may be about to write
* over this location.
*/
if (HDR_L2_READING(hdr)) {
ARCSTAT_BUMP(arcstat_l2_evict_reading);
arc_hdr_set_flags(hdr, ARC_FLAG_L2_EVICTED);
}
arc_hdr_l2hdr_destroy(hdr);
}
mutex_exit(hash_lock);
}
mutex_exit(&dev->l2ad_mtx);
}
/*
* Handle any abd transforms that might be required for writing to the L2ARC.
* If successful, this function will always return an abd with the data
* transformed as it is on disk in a new abd of asize bytes.
*/
static int
l2arc_apply_transforms(spa_t *spa, arc_buf_hdr_t *hdr, uint64_t asize,
abd_t **abd_out)
{
int ret;
void *tmp = NULL;
abd_t *cabd = NULL, *eabd = NULL, *to_write = hdr->b_l1hdr.b_pabd;
enum zio_compress compress = HDR_GET_COMPRESS(hdr);
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t size = arc_hdr_size(hdr);
boolean_t ismd = HDR_ISTYPE_METADATA(hdr);
boolean_t bswap = (hdr->b_l1hdr.b_byteswap != DMU_BSWAP_NUMFUNCS);
dsl_crypto_key_t *dck = NULL;
uint8_t mac[ZIO_DATA_MAC_LEN] = { 0 };
boolean_t no_crypt = B_FALSE;
ASSERT((HDR_GET_COMPRESS(hdr) != ZIO_COMPRESS_OFF &&
!HDR_COMPRESSION_ENABLED(hdr)) ||
HDR_ENCRYPTED(hdr) || HDR_SHARED_DATA(hdr) || psize != asize);
ASSERT3U(psize, <=, asize);
/*
* If this data simply needs its own buffer, we simply allocate it
* and copy the data. This may be done to elimiate a depedency on a
* shared buffer or to reallocate the buffer to match asize.
*/
if (HDR_HAS_RABD(hdr) && asize != psize) {
ASSERT3U(asize, >=, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_crypt_hdr.b_rabd, psize);
if (psize != asize)
abd_zero_off(to_write, psize, asize - psize);
goto out;
}
if ((compress == ZIO_COMPRESS_OFF || HDR_COMPRESSION_ENABLED(hdr)) &&
!HDR_ENCRYPTED(hdr)) {
ASSERT3U(size, ==, psize);
to_write = abd_alloc_for_io(asize, ismd);
abd_copy(to_write, hdr->b_l1hdr.b_pabd, size);
if (size != asize)
abd_zero_off(to_write, size, asize - size);
goto out;
}
if (compress != ZIO_COMPRESS_OFF && !HDR_COMPRESSION_ENABLED(hdr)) {
cabd = abd_alloc_for_io(asize, ismd);
tmp = abd_borrow_buf(cabd, asize);
psize = zio_compress_data(compress, to_write, tmp, size);
ASSERT3U(psize, <=, HDR_GET_PSIZE(hdr));
if (psize < asize)
bzero((char *)tmp + psize, asize - psize);
psize = HDR_GET_PSIZE(hdr);
abd_return_buf_copy(cabd, tmp, asize);
to_write = cabd;
}
if (HDR_ENCRYPTED(hdr)) {
eabd = abd_alloc_for_io(asize, ismd);
/*
* If the dataset was disowned before the buffer
* made it to this point, the key to re-encrypt
* it won't be available. In this case we simply
* won't write the buffer to the L2ARC.
*/
ret = spa_keystore_lookup_key(spa, hdr->b_crypt_hdr.b_dsobj,
FTAG, &dck);
if (ret != 0)
goto error;
ret = zio_do_crypt_abd(B_TRUE, &dck->dck_key,
hdr->b_crypt_hdr.b_salt, hdr->b_crypt_hdr.b_ot,
hdr->b_crypt_hdr.b_iv, mac, psize, bswap, to_write,
eabd, &no_crypt);
if (ret != 0)
goto error;
if (no_crypt)
abd_copy(eabd, to_write, psize);
if (psize != asize)
abd_zero_off(eabd, psize, asize - psize);
/* assert that the MAC we got here matches the one we saved */
ASSERT0(bcmp(mac, hdr->b_crypt_hdr.b_mac, ZIO_DATA_MAC_LEN));
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (to_write == cabd)
abd_free(cabd);
to_write = eabd;
}
out:
ASSERT3P(to_write, !=, hdr->b_l1hdr.b_pabd);
*abd_out = to_write;
return (0);
error:
if (dck != NULL)
spa_keystore_dsl_key_rele(spa, dck, FTAG);
if (cabd != NULL)
abd_free(cabd);
if (eabd != NULL)
abd_free(eabd);
*abd_out = NULL;
return (ret);
}
/*
* Find and write ARC buffers to the L2ARC device.
*
* An ARC_FLAG_L2_WRITING flag is set so that the L2ARC buffers are not valid
* for reading until they have completed writing.
* The headroom_boost is an in-out parameter used to maintain headroom boost
* state between calls to this function.
*
* Returns the number of bytes actually written (which may be smaller than
* the delta by which the device hand has changed due to alignment).
*/
static uint64_t
l2arc_write_buffers(spa_t *spa, l2arc_dev_t *dev, uint64_t target_sz)
{
arc_buf_hdr_t *hdr, *hdr_prev, *head;
uint64_t write_asize, write_psize, write_lsize, headroom;
boolean_t full;
l2arc_write_callback_t *cb;
zio_t *pio, *wzio;
uint64_t guid = spa_load_guid(spa);
ASSERT3P(dev->l2ad_vdev, !=, NULL);
pio = NULL;
write_lsize = write_asize = write_psize = 0;
full = B_FALSE;
head = kmem_cache_alloc(hdr_l2only_cache, KM_PUSHPAGE);
arc_hdr_set_flags(head, ARC_FLAG_L2_WRITE_HEAD | ARC_FLAG_HAS_L2HDR);
/*
* Copy buffers for L2ARC writing.
*/
for (int try = 0; try < L2ARC_FEED_TYPES; try++) {
multilist_sublist_t *mls = l2arc_sublist_lock(try);
uint64_t passed_sz = 0;
VERIFY3P(mls, !=, NULL);
/*
* L2ARC fast warmup.
*
* Until the ARC is warm and starts to evict, read from the
* head of the ARC lists rather than the tail.
*/
if (arc_warm == B_FALSE)
hdr = multilist_sublist_head(mls);
else
hdr = multilist_sublist_tail(mls);
headroom = target_sz * l2arc_headroom;
if (zfs_compressed_arc_enabled)
headroom = (headroom * l2arc_headroom_boost) / 100;
for (; hdr; hdr = hdr_prev) {
kmutex_t *hash_lock;
abd_t *to_write = NULL;
if (arc_warm == B_FALSE)
hdr_prev = multilist_sublist_next(mls, hdr);
else
hdr_prev = multilist_sublist_prev(mls, hdr);
hash_lock = HDR_LOCK(hdr);
if (!mutex_tryenter(hash_lock)) {
/*
* Skip this buffer rather than waiting.
*/
continue;
}
passed_sz += HDR_GET_LSIZE(hdr);
if (passed_sz > headroom) {
/*
* Searched too far.
*/
mutex_exit(hash_lock);
break;
}
if (!l2arc_write_eligible(guid, hdr)) {
mutex_exit(hash_lock);
continue;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT3U(arc_hdr_size(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
uint64_t psize = HDR_GET_PSIZE(hdr);
uint64_t asize = vdev_psize_to_asize(dev->l2ad_vdev,
psize);
if ((write_asize + asize) > target_sz) {
full = B_TRUE;
mutex_exit(hash_lock);
break;
}
/*
* We rely on the L1 portion of the header below, so
* it's invalid for this header to have been evicted out
* of the ghost cache, prior to being written out. The
* ARC_FLAG_L2_WRITING bit ensures this won't happen.
*/
arc_hdr_set_flags(hdr, ARC_FLAG_L2_WRITING);
ASSERT(HDR_HAS_L1HDR(hdr));
ASSERT3U(HDR_GET_PSIZE(hdr), >, 0);
ASSERT(hdr->b_l1hdr.b_pabd != NULL ||
HDR_HAS_RABD(hdr));
ASSERT3U(arc_hdr_size(hdr), >, 0);
/*
* If this header has b_rabd, we can use this since it
* must always match the data exactly as it exists on
* disk. Otherwise, the L2ARC can normally use the
* hdr's data, but if we're sharing data between the
* hdr and one of its bufs, L2ARC needs its own copy of
* the data so that the ZIO below can't race with the
* buf consumer. To ensure that this copy will be
* available for the lifetime of the ZIO and be cleaned
* up afterwards, we add it to the l2arc_free_on_write
* queue. If we need to apply any transforms to the
* data (compression, encryption) we will also need the
* extra buffer.
*/
if (HDR_HAS_RABD(hdr) && psize == asize) {
to_write = hdr->b_crypt_hdr.b_rabd;
} else if ((HDR_COMPRESSION_ENABLED(hdr) ||
HDR_GET_COMPRESS(hdr) == ZIO_COMPRESS_OFF) &&
!HDR_ENCRYPTED(hdr) && !HDR_SHARED_DATA(hdr) &&
psize == asize) {
to_write = hdr->b_l1hdr.b_pabd;
} else {
int ret;
arc_buf_contents_t type = arc_buf_type(hdr);
ret = l2arc_apply_transforms(spa, hdr, asize,
&to_write);
if (ret != 0) {
arc_hdr_clear_flags(hdr,
ARC_FLAG_L2_WRITING);
mutex_exit(hash_lock);
continue;
}
l2arc_free_abd_on_write(to_write, asize, type);
}
if (pio == NULL) {
/*
* Insert a dummy header on the buflist so
* l2arc_write_done() can find where the
* write buffers begin without searching.
*/
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, head);
mutex_exit(&dev->l2ad_mtx);
cb = kmem_alloc(
sizeof (l2arc_write_callback_t), KM_SLEEP);
cb->l2wcb_dev = dev;
cb->l2wcb_head = head;
pio = zio_root(spa, l2arc_write_done, cb,
ZIO_FLAG_CANFAIL);
}
hdr->b_l2hdr.b_dev = dev;
hdr->b_l2hdr.b_hits = 0;
hdr->b_l2hdr.b_daddr = dev->l2ad_hand;
arc_hdr_set_flags(hdr, ARC_FLAG_HAS_L2HDR);
mutex_enter(&dev->l2ad_mtx);
list_insert_head(&dev->l2ad_buflist, hdr);
mutex_exit(&dev->l2ad_mtx);
(void) refcount_add_many(&dev->l2ad_alloc,
arc_hdr_size(hdr), hdr);
wzio = zio_write_phys(pio, dev->l2ad_vdev,
hdr->b_l2hdr.b_daddr, asize, to_write,
ZIO_CHECKSUM_OFF, NULL, hdr,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_CANFAIL, B_FALSE);
write_lsize += HDR_GET_LSIZE(hdr);
DTRACE_PROBE2(l2arc__write, vdev_t *, dev->l2ad_vdev,
zio_t *, wzio);
write_psize += psize;
write_asize += asize;
dev->l2ad_hand += asize;
mutex_exit(hash_lock);
(void) zio_nowait(wzio);
}
multilist_sublist_unlock(mls);
if (full == B_TRUE)
break;
}
/* No buffers selected for writing? */
if (pio == NULL) {
ASSERT0(write_lsize);
ASSERT(!HDR_HAS_L1HDR(head));
kmem_cache_free(hdr_l2only_cache, head);
return (0);
}
ASSERT3U(write_asize, <=, target_sz);
ARCSTAT_BUMP(arcstat_l2_writes_sent);
ARCSTAT_INCR(arcstat_l2_write_bytes, write_psize);
ARCSTAT_INCR(arcstat_l2_lsize, write_lsize);
ARCSTAT_INCR(arcstat_l2_psize, write_psize);
vdev_space_update(dev->l2ad_vdev, write_psize, 0, 0);
/*
* Bump device hand to the device start if it is approaching the end.
* l2arc_evict() will already have evicted ahead for this case.
*/
if (dev->l2ad_hand >= (dev->l2ad_end - target_sz)) {
dev->l2ad_hand = dev->l2ad_start;
dev->l2ad_first = B_FALSE;
}
dev->l2ad_writing = B_TRUE;
(void) zio_wait(pio);
dev->l2ad_writing = B_FALSE;
return (write_asize);
}
/*
* This thread feeds the L2ARC at regular intervals. This is the beating
* heart of the L2ARC.
*/
/* ARGSUSED */
static void
l2arc_feed_thread(void *unused)
{
callb_cpr_t cpr;
l2arc_dev_t *dev;
spa_t *spa;
uint64_t size, wrote;
clock_t begin, next = ddi_get_lbolt();
fstrans_cookie_t cookie;
CALLB_CPR_INIT(&cpr, &l2arc_feed_thr_lock, callb_generic_cpr, FTAG);
mutex_enter(&l2arc_feed_thr_lock);
cookie = spl_fstrans_mark();
while (l2arc_thread_exit == 0) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_sig(&l2arc_feed_thr_cv,
&l2arc_feed_thr_lock, next);
CALLB_CPR_SAFE_END(&cpr, &l2arc_feed_thr_lock);
next = ddi_get_lbolt() + hz;
/*
* Quick check for L2ARC devices.
*/
mutex_enter(&l2arc_dev_mtx);
if (l2arc_ndev == 0) {
mutex_exit(&l2arc_dev_mtx);
continue;
}
mutex_exit(&l2arc_dev_mtx);
begin = ddi_get_lbolt();
/*
* This selects the next l2arc device to write to, and in
* doing so the next spa to feed from: dev->l2ad_spa. This
* will return NULL if there are now no l2arc devices or if
* they are all faulted.
*
* If a device is returned, its spa's config lock is also
* held to prevent device removal. l2arc_dev_get_next()
* will grab and release l2arc_dev_mtx.
*/
if ((dev = l2arc_dev_get_next()) == NULL)
continue;
spa = dev->l2ad_spa;
ASSERT3P(spa, !=, NULL);
/*
* If the pool is read-only then force the feed thread to
* sleep a little longer.
*/
if (!spa_writeable(spa)) {
next = ddi_get_lbolt() + 5 * l2arc_feed_secs * hz;
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
/*
* Avoid contributing to memory pressure.
*/
if (arc_reclaim_needed()) {
ARCSTAT_BUMP(arcstat_l2_abort_lowmem);
spa_config_exit(spa, SCL_L2ARC, dev);
continue;
}
ARCSTAT_BUMP(arcstat_l2_feeds);
size = l2arc_write_size();
/*
* Evict L2ARC buffers that will be overwritten.
*/
l2arc_evict(dev, size, B_FALSE);
/*
* Write ARC buffers.
*/
wrote = l2arc_write_buffers(spa, dev, size);
/*
* Calculate interval between writes.
*/
next = l2arc_write_interval(begin, size, wrote);
spa_config_exit(spa, SCL_L2ARC, dev);
}
spl_fstrans_unmark(cookie);
l2arc_thread_exit = 0;
cv_broadcast(&l2arc_feed_thr_cv);
CALLB_CPR_EXIT(&cpr); /* drops l2arc_feed_thr_lock */
thread_exit();
}
boolean_t
l2arc_vdev_present(vdev_t *vd)
{
l2arc_dev_t *dev;
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev != NULL;
dev = list_next(l2arc_dev_list, dev)) {
if (dev->l2ad_vdev == vd)
break;
}
mutex_exit(&l2arc_dev_mtx);
return (dev != NULL);
}
/*
* Add a vdev for use by the L2ARC. By this point the spa has already
* validated the vdev and opened it.
*/
void
l2arc_add_vdev(spa_t *spa, vdev_t *vd)
{
l2arc_dev_t *adddev;
ASSERT(!l2arc_vdev_present(vd));
/*
* Create a new l2arc device entry.
*/
adddev = kmem_zalloc(sizeof (l2arc_dev_t), KM_SLEEP);
adddev->l2ad_spa = spa;
adddev->l2ad_vdev = vd;
adddev->l2ad_start = VDEV_LABEL_START_SIZE;
adddev->l2ad_end = VDEV_LABEL_START_SIZE + vdev_get_min_asize(vd);
adddev->l2ad_hand = adddev->l2ad_start;
adddev->l2ad_first = B_TRUE;
adddev->l2ad_writing = B_FALSE;
list_link_init(&adddev->l2ad_node);
mutex_init(&adddev->l2ad_mtx, NULL, MUTEX_DEFAULT, NULL);
/*
* This is a list of all ARC buffers that are still valid on the
* device.
*/
list_create(&adddev->l2ad_buflist, sizeof (arc_buf_hdr_t),
offsetof(arc_buf_hdr_t, b_l2hdr.b_l2node));
vdev_space_update(vd, 0, 0, adddev->l2ad_end - adddev->l2ad_hand);
refcount_create(&adddev->l2ad_alloc);
/*
* Add device to global list
*/
mutex_enter(&l2arc_dev_mtx);
list_insert_head(l2arc_dev_list, adddev);
atomic_inc_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
}
/*
* Remove a vdev from the L2ARC.
*/
void
l2arc_remove_vdev(vdev_t *vd)
{
l2arc_dev_t *dev, *nextdev, *remdev = NULL;
/*
* Find the device by vdev
*/
mutex_enter(&l2arc_dev_mtx);
for (dev = list_head(l2arc_dev_list); dev; dev = nextdev) {
nextdev = list_next(l2arc_dev_list, dev);
if (vd == dev->l2ad_vdev) {
remdev = dev;
break;
}
}
ASSERT3P(remdev, !=, NULL);
/*
* Remove device from global list
*/
list_remove(l2arc_dev_list, remdev);
l2arc_dev_last = NULL; /* may have been invalidated */
atomic_dec_64(&l2arc_ndev);
mutex_exit(&l2arc_dev_mtx);
/*
* Clear all buflists and ARC references. L2ARC device flush.
*/
l2arc_evict(remdev, 0, B_TRUE);
list_destroy(&remdev->l2ad_buflist);
mutex_destroy(&remdev->l2ad_mtx);
refcount_destroy(&remdev->l2ad_alloc);
kmem_free(remdev, sizeof (l2arc_dev_t));
}
void
l2arc_init(void)
{
l2arc_thread_exit = 0;
l2arc_ndev = 0;
l2arc_writes_sent = 0;
l2arc_writes_done = 0;
mutex_init(&l2arc_feed_thr_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&l2arc_feed_thr_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&l2arc_dev_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&l2arc_free_on_write_mtx, NULL, MUTEX_DEFAULT, NULL);
l2arc_dev_list = &L2ARC_dev_list;
l2arc_free_on_write = &L2ARC_free_on_write;
list_create(l2arc_dev_list, sizeof (l2arc_dev_t),
offsetof(l2arc_dev_t, l2ad_node));
list_create(l2arc_free_on_write, sizeof (l2arc_data_free_t),
offsetof(l2arc_data_free_t, l2df_list_node));
}
void
l2arc_fini(void)
{
/*
* This is called from dmu_fini(), which is called from spa_fini();
* Because of this, we can assume that all l2arc devices have
* already been removed when the pools themselves were removed.
*/
l2arc_do_free_on_write();
mutex_destroy(&l2arc_feed_thr_lock);
cv_destroy(&l2arc_feed_thr_cv);
mutex_destroy(&l2arc_dev_mtx);
mutex_destroy(&l2arc_free_on_write_mtx);
list_destroy(l2arc_dev_list);
list_destroy(l2arc_free_on_write);
}
void
l2arc_start(void)
{
if (!(spa_mode_global & FWRITE))
return;
(void) thread_create(NULL, 0, l2arc_feed_thread, NULL, 0, &p0,
TS_RUN, defclsyspri);
}
void
l2arc_stop(void)
{
if (!(spa_mode_global & FWRITE))
return;
mutex_enter(&l2arc_feed_thr_lock);
cv_signal(&l2arc_feed_thr_cv); /* kick thread out of startup */
l2arc_thread_exit = 1;
while (l2arc_thread_exit != 0)
cv_wait(&l2arc_feed_thr_cv, &l2arc_feed_thr_lock);
mutex_exit(&l2arc_feed_thr_lock);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(arc_buf_size);
EXPORT_SYMBOL(arc_write);
EXPORT_SYMBOL(arc_read);
EXPORT_SYMBOL(arc_buf_info);
EXPORT_SYMBOL(arc_getbuf_func);
EXPORT_SYMBOL(arc_add_prune_callback);
EXPORT_SYMBOL(arc_remove_prune_callback);
/* BEGIN CSTYLED */
module_param(zfs_arc_min, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_min, "Min arc size");
module_param(zfs_arc_max, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_max, "Max arc size");
module_param(zfs_arc_meta_limit, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_meta_limit, "Meta limit for arc size");
module_param(zfs_arc_meta_limit_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_meta_limit_percent,
"Percent of arc size for arc meta limit");
module_param(zfs_arc_meta_min, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_meta_min, "Min arc metadata");
module_param(zfs_arc_meta_prune, int, 0644);
MODULE_PARM_DESC(zfs_arc_meta_prune, "Meta objects to scan for prune");
module_param(zfs_arc_meta_adjust_restarts, int, 0644);
MODULE_PARM_DESC(zfs_arc_meta_adjust_restarts,
"Limit number of restarts in arc_adjust_meta");
module_param(zfs_arc_meta_strategy, int, 0644);
MODULE_PARM_DESC(zfs_arc_meta_strategy, "Meta reclaim strategy");
module_param(zfs_arc_grow_retry, int, 0644);
MODULE_PARM_DESC(zfs_arc_grow_retry, "Seconds before growing arc size");
module_param(zfs_arc_p_dampener_disable, int, 0644);
MODULE_PARM_DESC(zfs_arc_p_dampener_disable, "disable arc_p adapt dampener");
module_param(zfs_arc_shrink_shift, int, 0644);
MODULE_PARM_DESC(zfs_arc_shrink_shift, "log2(fraction of arc to reclaim)");
module_param(zfs_arc_pc_percent, uint, 0644);
MODULE_PARM_DESC(zfs_arc_pc_percent,
"Percent of pagecache to reclaim arc to");
module_param(zfs_arc_p_min_shift, int, 0644);
MODULE_PARM_DESC(zfs_arc_p_min_shift, "arc_c shift to calc min/max arc_p");
module_param(zfs_arc_average_blocksize, int, 0444);
MODULE_PARM_DESC(zfs_arc_average_blocksize, "Target average block size");
module_param(zfs_compressed_arc_enabled, int, 0644);
MODULE_PARM_DESC(zfs_compressed_arc_enabled, "Disable compressed arc buffers");
module_param(zfs_arc_min_prefetch_ms, int, 0644);
MODULE_PARM_DESC(zfs_arc_min_prefetch_ms, "Min life of prefetch block in ms");
module_param(zfs_arc_min_prescient_prefetch_ms, int, 0644);
MODULE_PARM_DESC(zfs_arc_min_prescient_prefetch_ms,
"Min life of prescient prefetched block in ms");
module_param(l2arc_write_max, ulong, 0644);
MODULE_PARM_DESC(l2arc_write_max, "Max write bytes per interval");
module_param(l2arc_write_boost, ulong, 0644);
MODULE_PARM_DESC(l2arc_write_boost, "Extra write bytes during device warmup");
module_param(l2arc_headroom, ulong, 0644);
MODULE_PARM_DESC(l2arc_headroom, "Number of max device writes to precache");
module_param(l2arc_headroom_boost, ulong, 0644);
MODULE_PARM_DESC(l2arc_headroom_boost, "Compressed l2arc_headroom multiplier");
module_param(l2arc_feed_secs, ulong, 0644);
MODULE_PARM_DESC(l2arc_feed_secs, "Seconds between L2ARC writing");
module_param(l2arc_feed_min_ms, ulong, 0644);
MODULE_PARM_DESC(l2arc_feed_min_ms, "Min feed interval in milliseconds");
module_param(l2arc_noprefetch, int, 0644);
MODULE_PARM_DESC(l2arc_noprefetch, "Skip caching prefetched buffers");
module_param(l2arc_feed_again, int, 0644);
MODULE_PARM_DESC(l2arc_feed_again, "Turbo L2ARC warmup");
module_param(l2arc_norw, int, 0644);
MODULE_PARM_DESC(l2arc_norw, "No reads during writes");
module_param(zfs_arc_lotsfree_percent, int, 0644);
MODULE_PARM_DESC(zfs_arc_lotsfree_percent,
"System free memory I/O throttle in bytes");
module_param(zfs_arc_sys_free, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_sys_free, "System free memory target size in bytes");
module_param(zfs_arc_dnode_limit, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_dnode_limit, "Minimum bytes of dnodes in arc");
module_param(zfs_arc_dnode_limit_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_dnode_limit_percent,
"Percent of ARC meta buffers for dnodes");
module_param(zfs_arc_dnode_reduce_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_arc_dnode_reduce_percent,
"Percentage of excess dnodes to try to unpin");
/* END CSTYLED */
#endif
diff --git a/module/zfs/bpobj.c b/module/zfs/bpobj.c
index 1708eb7109b6..66ab84df58b2 100644
--- a/module/zfs/bpobj.c
+++ b/module/zfs/bpobj.c
@@ -1,595 +1,605 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright (c) 2017 Datto Inc.
*/
#include <sys/bpobj.h>
#include <sys/zfs_context.h>
#include <sys/refcount.h>
#include <sys/dsl_pool.h>
#include <sys/zfeature.h>
#include <sys/zap.h>
/*
* Return an empty bpobj, preferably the empty dummy one (dp_empty_bpobj).
*/
uint64_t
bpobj_alloc_empty(objset_t *os, int blocksize, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(os);
dsl_pool_t *dp = dmu_objset_pool(os);
if (spa_feature_is_enabled(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
if (!spa_feature_is_active(spa, SPA_FEATURE_EMPTY_BPOBJ)) {
ASSERT0(dp->dp_empty_bpobj);
dp->dp_empty_bpobj =
bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY(zap_add(os,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
&dp->dp_empty_bpobj, tx) == 0);
}
spa_feature_incr(spa, SPA_FEATURE_EMPTY_BPOBJ, tx);
ASSERT(dp->dp_empty_bpobj != 0);
return (dp->dp_empty_bpobj);
} else {
return (bpobj_alloc(os, blocksize, tx));
}
}
void
bpobj_decr_empty(objset_t *os, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_objset_pool(os);
spa_feature_decr(dmu_objset_spa(os), SPA_FEATURE_EMPTY_BPOBJ, tx);
if (!spa_feature_is_active(dmu_objset_spa(os),
SPA_FEATURE_EMPTY_BPOBJ)) {
VERIFY3U(0, ==, zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, tx));
VERIFY3U(0, ==, dmu_object_free(os, dp->dp_empty_bpobj, tx));
dp->dp_empty_bpobj = 0;
}
}
uint64_t
bpobj_alloc(objset_t *os, int blocksize, dmu_tx_t *tx)
{
int size;
if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_BPOBJ_ACCOUNT)
size = BPOBJ_SIZE_V0;
else if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
size = BPOBJ_SIZE_V1;
else
size = sizeof (bpobj_phys_t);
return (dmu_object_alloc(os, DMU_OT_BPOBJ, blocksize,
DMU_OT_BPOBJ_HDR, size, tx));
}
void
bpobj_free(objset_t *os, uint64_t obj, dmu_tx_t *tx)
{
int64_t i;
bpobj_t bpo;
dmu_object_info_t doi;
int epb;
dmu_buf_t *dbuf = NULL;
ASSERT(obj != dmu_objset_pool(os)->dp_empty_bpobj);
VERIFY3U(0, ==, bpobj_open(&bpo, os, obj));
mutex_enter(&bpo.bpo_lock);
if (!bpo.bpo_havesubobj || bpo.bpo_phys->bpo_subobjs == 0)
goto out;
VERIFY3U(0, ==, dmu_object_info(os, bpo.bpo_phys->bpo_subobjs, &doi));
epb = doi.doi_data_block_size / sizeof (uint64_t);
for (i = bpo.bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
uint64_t *objarray;
uint64_t offset, blkoff;
offset = i * sizeof (uint64_t);
blkoff = P2PHASE(i, epb);
if (dbuf == NULL || dbuf->db_offset > offset) {
if (dbuf)
dmu_buf_rele(dbuf, FTAG);
VERIFY3U(0, ==, dmu_buf_hold(os,
bpo.bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0));
}
ASSERT3U(offset, >=, dbuf->db_offset);
ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
objarray = dbuf->db_data;
bpobj_free(os, objarray[blkoff], tx);
}
if (dbuf) {
dmu_buf_rele(dbuf, FTAG);
dbuf = NULL;
}
VERIFY3U(0, ==, dmu_object_free(os, bpo.bpo_phys->bpo_subobjs, tx));
out:
mutex_exit(&bpo.bpo_lock);
bpobj_close(&bpo);
VERIFY3U(0, ==, dmu_object_free(os, obj, tx));
}
int
bpobj_open(bpobj_t *bpo, objset_t *os, uint64_t object)
{
dmu_object_info_t doi;
int err;
err = dmu_object_info(os, object, &doi);
if (err)
return (err);
bzero(bpo, sizeof (*bpo));
mutex_init(&bpo->bpo_lock, NULL, MUTEX_DEFAULT, NULL);
ASSERT(bpo->bpo_dbuf == NULL);
ASSERT(bpo->bpo_phys == NULL);
ASSERT(object != 0);
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_BPOBJ_HDR);
err = dmu_bonus_hold(os, object, bpo, &bpo->bpo_dbuf);
if (err)
return (err);
bpo->bpo_os = os;
bpo->bpo_object = object;
bpo->bpo_epb = doi.doi_data_block_size >> SPA_BLKPTRSHIFT;
bpo->bpo_havecomp = (doi.doi_bonus_size > BPOBJ_SIZE_V0);
bpo->bpo_havesubobj = (doi.doi_bonus_size > BPOBJ_SIZE_V1);
bpo->bpo_phys = bpo->bpo_dbuf->db_data;
return (0);
}
+boolean_t
+bpobj_is_open(const bpobj_t *bpo)
+{
+ return (bpo->bpo_object != 0);
+}
+
void
bpobj_close(bpobj_t *bpo)
{
/* Lame workaround for closing a bpobj that was never opened. */
if (bpo->bpo_object == 0)
return;
dmu_buf_rele(bpo->bpo_dbuf, bpo);
if (bpo->bpo_cached_dbuf != NULL)
dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
bpo->bpo_dbuf = NULL;
bpo->bpo_phys = NULL;
bpo->bpo_cached_dbuf = NULL;
bpo->bpo_object = 0;
mutex_destroy(&bpo->bpo_lock);
}
-static boolean_t
-bpobj_hasentries(bpobj_t *bpo)
+boolean_t
+bpobj_is_empty(bpobj_t *bpo)
{
- return (bpo->bpo_phys->bpo_num_blkptrs != 0 ||
- (bpo->bpo_havesubobj && bpo->bpo_phys->bpo_num_subobjs != 0));
+ return (bpo->bpo_phys->bpo_num_blkptrs == 0 &&
+ (!bpo->bpo_havesubobj || bpo->bpo_phys->bpo_num_subobjs == 0));
}
static int
bpobj_iterate_impl(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx,
boolean_t free)
{
dmu_object_info_t doi;
int epb;
int64_t i;
int err = 0;
dmu_buf_t *dbuf = NULL;
+ ASSERT(bpobj_is_open(bpo));
mutex_enter(&bpo->bpo_lock);
- if (!bpobj_hasentries(bpo))
- goto out;
-
if (free)
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
for (i = bpo->bpo_phys->bpo_num_blkptrs - 1; i >= 0; i--) {
blkptr_t *bparray;
blkptr_t *bp;
uint64_t offset, blkoff;
offset = i * sizeof (blkptr_t);
blkoff = P2PHASE(i, bpo->bpo_epb);
if (dbuf == NULL || dbuf->db_offset > offset) {
if (dbuf)
dmu_buf_rele(dbuf, FTAG);
err = dmu_buf_hold(bpo->bpo_os, bpo->bpo_object, offset,
FTAG, &dbuf, 0);
if (err)
break;
}
ASSERT3U(offset, >=, dbuf->db_offset);
ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
bparray = dbuf->db_data;
bp = &bparray[blkoff];
err = func(arg, bp, tx);
if (err)
break;
if (free) {
bpo->bpo_phys->bpo_bytes -=
bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
if (bpo->bpo_havecomp) {
bpo->bpo_phys->bpo_comp -= BP_GET_PSIZE(bp);
bpo->bpo_phys->bpo_uncomp -= BP_GET_UCSIZE(bp);
}
bpo->bpo_phys->bpo_num_blkptrs--;
ASSERT3S(bpo->bpo_phys->bpo_num_blkptrs, >=, 0);
}
}
if (dbuf) {
dmu_buf_rele(dbuf, FTAG);
dbuf = NULL;
}
if (free) {
VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os, bpo->bpo_object,
(i + 1) * sizeof (blkptr_t), DMU_OBJECT_END, tx));
}
if (err || !bpo->bpo_havesubobj || bpo->bpo_phys->bpo_subobjs == 0)
goto out;
ASSERT(bpo->bpo_havecomp);
err = dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi);
if (err) {
mutex_exit(&bpo->bpo_lock);
return (err);
}
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
epb = doi.doi_data_block_size / sizeof (uint64_t);
for (i = bpo->bpo_phys->bpo_num_subobjs - 1; i >= 0; i--) {
uint64_t *objarray;
uint64_t offset, blkoff;
bpobj_t sublist;
uint64_t used_before, comp_before, uncomp_before;
uint64_t used_after, comp_after, uncomp_after;
offset = i * sizeof (uint64_t);
blkoff = P2PHASE(i, epb);
if (dbuf == NULL || dbuf->db_offset > offset) {
if (dbuf)
dmu_buf_rele(dbuf, FTAG);
err = dmu_buf_hold(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs, offset, FTAG, &dbuf, 0);
if (err)
break;
}
ASSERT3U(offset, >=, dbuf->db_offset);
ASSERT3U(offset, <, dbuf->db_offset + dbuf->db_size);
objarray = dbuf->db_data;
err = bpobj_open(&sublist, bpo->bpo_os, objarray[blkoff]);
if (err)
break;
if (free) {
err = bpobj_space(&sublist,
&used_before, &comp_before, &uncomp_before);
if (err != 0) {
bpobj_close(&sublist);
break;
}
}
err = bpobj_iterate_impl(&sublist, func, arg, tx, free);
if (free) {
VERIFY3U(0, ==, bpobj_space(&sublist,
&used_after, &comp_after, &uncomp_after));
bpo->bpo_phys->bpo_bytes -= used_before - used_after;
ASSERT3S(bpo->bpo_phys->bpo_bytes, >=, 0);
bpo->bpo_phys->bpo_comp -= comp_before - comp_after;
bpo->bpo_phys->bpo_uncomp -=
uncomp_before - uncomp_after;
}
bpobj_close(&sublist);
if (err)
break;
if (free) {
err = dmu_object_free(bpo->bpo_os,
objarray[blkoff], tx);
if (err)
break;
bpo->bpo_phys->bpo_num_subobjs--;
ASSERT3S(bpo->bpo_phys->bpo_num_subobjs, >=, 0);
}
}
if (dbuf) {
dmu_buf_rele(dbuf, FTAG);
dbuf = NULL;
}
if (free) {
VERIFY3U(0, ==, dmu_free_range(bpo->bpo_os,
bpo->bpo_phys->bpo_subobjs,
(i + 1) * sizeof (uint64_t), DMU_OBJECT_END, tx));
}
out:
/* If there are no entries, there should be no bytes. */
- if (!bpobj_hasentries(bpo)) {
+ if (bpobj_is_empty(bpo)) {
ASSERT0(bpo->bpo_phys->bpo_bytes);
ASSERT0(bpo->bpo_phys->bpo_comp);
ASSERT0(bpo->bpo_phys->bpo_uncomp);
}
mutex_exit(&bpo->bpo_lock);
return (err);
}
/*
* Iterate and remove the entries. If func returns nonzero, iteration
* will stop and that entry will not be removed.
*/
int
bpobj_iterate(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
{
return (bpobj_iterate_impl(bpo, func, arg, tx, B_TRUE));
}
/*
* Iterate the entries. If func returns nonzero, iteration will stop.
*/
int
bpobj_iterate_nofree(bpobj_t *bpo, bpobj_itor_t func, void *arg, dmu_tx_t *tx)
{
return (bpobj_iterate_impl(bpo, func, arg, tx, B_FALSE));
}
void
bpobj_enqueue_subobj(bpobj_t *bpo, uint64_t subobj, dmu_tx_t *tx)
{
bpobj_t subbpo;
uint64_t used, comp, uncomp, subsubobjs;
+ ASSERT(bpobj_is_open(bpo));
+ ASSERT(subobj != 0);
ASSERT(bpo->bpo_havesubobj);
ASSERT(bpo->bpo_havecomp);
ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
if (subobj == dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj) {
bpobj_decr_empty(bpo->bpo_os, tx);
return;
}
VERIFY3U(0, ==, bpobj_open(&subbpo, bpo->bpo_os, subobj));
VERIFY3U(0, ==, bpobj_space(&subbpo, &used, &comp, &uncomp));
- if (!bpobj_hasentries(&subbpo)) {
+ if (bpobj_is_empty(&subbpo)) {
/* No point in having an empty subobj. */
bpobj_close(&subbpo);
bpobj_free(bpo->bpo_os, subobj, tx);
return;
}
mutex_enter(&bpo->bpo_lock);
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
if (bpo->bpo_phys->bpo_subobjs == 0) {
bpo->bpo_phys->bpo_subobjs = dmu_object_alloc(bpo->bpo_os,
DMU_OT_BPOBJ_SUBOBJ, SPA_OLD_MAXBLOCKSIZE,
DMU_OT_NONE, 0, tx);
}
ASSERTV(dmu_object_info_t doi);
ASSERT0(dmu_object_info(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs, &doi));
ASSERT3U(doi.doi_type, ==, DMU_OT_BPOBJ_SUBOBJ);
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
sizeof (subobj), &subobj, tx);
bpo->bpo_phys->bpo_num_subobjs++;
/*
* If subobj has only one block of subobjs, then move subobj's
* subobjs to bpo's subobj list directly. This reduces
* recursion in bpobj_iterate due to nested subobjs.
*/
subsubobjs = subbpo.bpo_phys->bpo_subobjs;
if (subsubobjs != 0) {
dmu_object_info_t doi;
VERIFY3U(0, ==, dmu_object_info(bpo->bpo_os, subsubobjs, &doi));
if (doi.doi_max_offset == doi.doi_data_block_size) {
dmu_buf_t *subdb;
uint64_t numsubsub = subbpo.bpo_phys->bpo_num_subobjs;
VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, subsubobjs,
0, FTAG, &subdb, 0));
/*
* Make sure that we are not asking dmu_write()
* to write more data than we have in our buffer.
*/
VERIFY3U(subdb->db_size, >=,
numsubsub * sizeof (subobj));
dmu_write(bpo->bpo_os, bpo->bpo_phys->bpo_subobjs,
bpo->bpo_phys->bpo_num_subobjs * sizeof (subobj),
numsubsub * sizeof (subobj), subdb->db_data, tx);
dmu_buf_rele(subdb, FTAG);
bpo->bpo_phys->bpo_num_subobjs += numsubsub;
dmu_buf_will_dirty(subbpo.bpo_dbuf, tx);
subbpo.bpo_phys->bpo_subobjs = 0;
VERIFY3U(0, ==, dmu_object_free(bpo->bpo_os,
subsubobjs, tx));
}
}
bpo->bpo_phys->bpo_bytes += used;
bpo->bpo_phys->bpo_comp += comp;
bpo->bpo_phys->bpo_uncomp += uncomp;
mutex_exit(&bpo->bpo_lock);
bpobj_close(&subbpo);
}
void
bpobj_enqueue(bpobj_t *bpo, const blkptr_t *bp, dmu_tx_t *tx)
{
blkptr_t stored_bp = *bp;
uint64_t offset;
int blkoff;
blkptr_t *bparray;
+ ASSERT(bpobj_is_open(bpo));
ASSERT(!BP_IS_HOLE(bp));
ASSERT(bpo->bpo_object != dmu_objset_pool(bpo->bpo_os)->dp_empty_bpobj);
if (BP_IS_EMBEDDED(bp)) {
/*
* The bpobj will compress better without the payload.
*
* Note that we store EMBEDDED bp's because they have an
* uncompressed size, which must be accounted for. An
* alternative would be to add their size to bpo_uncomp
* without storing the bp, but that would create additional
* complications: bpo_uncomp would be inconsistent with the
* set of BP's stored, and bpobj_iterate() wouldn't visit
* all the space accounted for in the bpobj.
*/
bzero(&stored_bp, sizeof (stored_bp));
stored_bp.blk_prop = bp->blk_prop;
stored_bp.blk_birth = bp->blk_birth;
} else if (!BP_GET_DEDUP(bp)) {
/* The bpobj will compress better without the checksum */
bzero(&stored_bp.blk_cksum, sizeof (stored_bp.blk_cksum));
}
/* We never need the fill count. */
stored_bp.blk_fill = 0;
mutex_enter(&bpo->bpo_lock);
offset = bpo->bpo_phys->bpo_num_blkptrs * sizeof (stored_bp);
blkoff = P2PHASE(bpo->bpo_phys->bpo_num_blkptrs, bpo->bpo_epb);
if (bpo->bpo_cached_dbuf == NULL ||
offset < bpo->bpo_cached_dbuf->db_offset ||
offset >= bpo->bpo_cached_dbuf->db_offset +
bpo->bpo_cached_dbuf->db_size) {
if (bpo->bpo_cached_dbuf)
dmu_buf_rele(bpo->bpo_cached_dbuf, bpo);
VERIFY3U(0, ==, dmu_buf_hold(bpo->bpo_os, bpo->bpo_object,
offset, bpo, &bpo->bpo_cached_dbuf, 0));
}
dmu_buf_will_dirty(bpo->bpo_cached_dbuf, tx);
bparray = bpo->bpo_cached_dbuf->db_data;
bparray[blkoff] = stored_bp;
dmu_buf_will_dirty(bpo->bpo_dbuf, tx);
bpo->bpo_phys->bpo_num_blkptrs++;
bpo->bpo_phys->bpo_bytes +=
bp_get_dsize_sync(dmu_objset_spa(bpo->bpo_os), bp);
if (bpo->bpo_havecomp) {
bpo->bpo_phys->bpo_comp += BP_GET_PSIZE(bp);
bpo->bpo_phys->bpo_uncomp += BP_GET_UCSIZE(bp);
}
mutex_exit(&bpo->bpo_lock);
}
struct space_range_arg {
spa_t *spa;
uint64_t mintxg;
uint64_t maxtxg;
uint64_t used;
uint64_t comp;
uint64_t uncomp;
};
/* ARGSUSED */
static int
space_range_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
struct space_range_arg *sra = arg;
if (bp->blk_birth > sra->mintxg && bp->blk_birth <= sra->maxtxg) {
if (dsl_pool_sync_context(spa_get_dsl(sra->spa)))
sra->used += bp_get_dsize_sync(sra->spa, bp);
else
sra->used += bp_get_dsize(sra->spa, bp);
sra->comp += BP_GET_PSIZE(bp);
sra->uncomp += BP_GET_UCSIZE(bp);
}
return (0);
}
int
bpobj_space(bpobj_t *bpo, uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
+ ASSERT(bpobj_is_open(bpo));
mutex_enter(&bpo->bpo_lock);
*usedp = bpo->bpo_phys->bpo_bytes;
if (bpo->bpo_havecomp) {
*compp = bpo->bpo_phys->bpo_comp;
*uncompp = bpo->bpo_phys->bpo_uncomp;
mutex_exit(&bpo->bpo_lock);
return (0);
} else {
mutex_exit(&bpo->bpo_lock);
return (bpobj_space_range(bpo, 0, UINT64_MAX,
usedp, compp, uncompp));
}
}
/*
* Return the amount of space in the bpobj which is:
* mintxg < blk_birth <= maxtxg
*/
int
bpobj_space_range(bpobj_t *bpo, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
struct space_range_arg sra = { 0 };
int err;
+ ASSERT(bpobj_is_open(bpo));
+
/*
* As an optimization, if they want the whole txg range, just
* get bpo_bytes rather than iterating over the bps.
*/
if (mintxg < TXG_INITIAL && maxtxg == UINT64_MAX && bpo->bpo_havecomp)
return (bpobj_space(bpo, usedp, compp, uncompp));
sra.spa = dmu_objset_spa(bpo->bpo_os);
sra.mintxg = mintxg;
sra.maxtxg = maxtxg;
err = bpobj_iterate_nofree(bpo, space_range_cb, &sra, NULL);
*usedp = sra.used;
*compp = sra.comp;
*uncompp = sra.uncomp;
return (err);
}
diff --git a/module/zfs/dbuf.c b/module/zfs/dbuf.c
index 1a6e560d21d6..b0ae6cc7294d 100644
--- a/module/zfs/dbuf.c
+++ b/module/zfs/dbuf.c
@@ -1,4243 +1,4382 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/arc.h>
#include <sys/dmu.h>
#include <sys/dmu_send.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_tx.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/blkptr.h>
#include <sys/range_tree.h>
#include <sys/trace_dbuf.h>
#include <sys/callb.h>
#include <sys/abd.h>
+#include <sys/vdev.h>
kstat_t *dbuf_ksp;
typedef struct dbuf_stats {
/*
* Various statistics about the size of the dbuf cache.
*/
kstat_named_t cache_count;
kstat_named_t cache_size_bytes;
kstat_named_t cache_size_bytes_max;
/*
* Statistics regarding the bounds on the dbuf cache size.
*/
kstat_named_t cache_target_bytes;
kstat_named_t cache_lowater_bytes;
kstat_named_t cache_hiwater_bytes;
/*
* Total number of dbuf cache evictions that have occurred.
*/
kstat_named_t cache_total_evicts;
/*
* The distribution of dbuf levels in the dbuf cache and
* the total size of all dbufs at each level.
*/
kstat_named_t cache_levels[DN_MAX_LEVELS];
kstat_named_t cache_levels_bytes[DN_MAX_LEVELS];
/*
* Statistics about the dbuf hash table.
*/
kstat_named_t hash_hits;
kstat_named_t hash_misses;
kstat_named_t hash_collisions;
kstat_named_t hash_elements;
kstat_named_t hash_elements_max;
/*
* Number of sublists containing more than one dbuf in the dbuf
* hash table. Keep track of the longest hash chain.
*/
kstat_named_t hash_chains;
kstat_named_t hash_chain_max;
/*
* Number of times a dbuf_create() discovers that a dbuf was
* already created and in the dbuf hash table.
*/
kstat_named_t hash_insert_race;
} dbuf_stats_t;
dbuf_stats_t dbuf_stats = {
{ "cache_count", KSTAT_DATA_UINT64 },
{ "cache_size_bytes", KSTAT_DATA_UINT64 },
{ "cache_size_bytes_max", KSTAT_DATA_UINT64 },
{ "cache_target_bytes", KSTAT_DATA_UINT64 },
{ "cache_lowater_bytes", KSTAT_DATA_UINT64 },
{ "cache_hiwater_bytes", KSTAT_DATA_UINT64 },
{ "cache_total_evicts", KSTAT_DATA_UINT64 },
{ { "cache_levels_N", KSTAT_DATA_UINT64 } },
{ { "cache_levels_bytes_N", KSTAT_DATA_UINT64 } },
{ "hash_hits", KSTAT_DATA_UINT64 },
{ "hash_misses", KSTAT_DATA_UINT64 },
{ "hash_collisions", KSTAT_DATA_UINT64 },
{ "hash_elements", KSTAT_DATA_UINT64 },
{ "hash_elements_max", KSTAT_DATA_UINT64 },
{ "hash_chains", KSTAT_DATA_UINT64 },
{ "hash_chain_max", KSTAT_DATA_UINT64 },
{ "hash_insert_race", KSTAT_DATA_UINT64 }
};
#define DBUF_STAT_INCR(stat, val) \
atomic_add_64(&dbuf_stats.stat.value.ui64, (val));
#define DBUF_STAT_DECR(stat, val) \
DBUF_STAT_INCR(stat, -(val));
#define DBUF_STAT_BUMP(stat) \
DBUF_STAT_INCR(stat, 1);
#define DBUF_STAT_BUMPDOWN(stat) \
DBUF_STAT_INCR(stat, -1);
#define DBUF_STAT_MAX(stat, v) { \
uint64_t _m; \
while ((v) > (_m = dbuf_stats.stat.value.ui64) && \
(_m != atomic_cas_64(&dbuf_stats.stat.value.ui64, _m, (v))))\
continue; \
}
struct dbuf_hold_impl_data {
/* Function arguments */
dnode_t *dh_dn;
uint8_t dh_level;
uint64_t dh_blkid;
boolean_t dh_fail_sparse;
boolean_t dh_fail_uncached;
void *dh_tag;
dmu_buf_impl_t **dh_dbp;
/* Local variables */
dmu_buf_impl_t *dh_db;
dmu_buf_impl_t *dh_parent;
blkptr_t *dh_bp;
int dh_err;
dbuf_dirty_record_t *dh_dr;
int dh_depth;
};
static void __dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
dnode_t *dn, uint8_t level, uint64_t blkid, boolean_t fail_sparse,
boolean_t fail_uncached,
void *tag, dmu_buf_impl_t **dbp, int depth);
static int __dbuf_hold_impl(struct dbuf_hold_impl_data *dh);
uint_t zfs_dbuf_evict_key;
static boolean_t dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx);
static void dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx);
extern inline void dmu_buf_init_user(dmu_buf_user_t *dbu,
dmu_buf_evict_func_t *evict_func_sync,
dmu_buf_evict_func_t *evict_func_async,
dmu_buf_t **clear_on_evict_dbufp);
/*
* Global data structures and functions for the dbuf cache.
*/
static kmem_cache_t *dbuf_kmem_cache;
static taskq_t *dbu_evict_taskq;
static kthread_t *dbuf_cache_evict_thread;
static kmutex_t dbuf_evict_lock;
static kcondvar_t dbuf_evict_cv;
static boolean_t dbuf_evict_thread_exit;
/*
* LRU cache of dbufs. The dbuf cache maintains a list of dbufs that
* are not currently held but have been recently released. These dbufs
* are not eligible for arc eviction until they are aged out of the cache.
* Dbufs are added to the dbuf cache once the last hold is released. If a
* dbuf is later accessed and still exists in the dbuf cache, then it will
* be removed from the cache and later re-added to the head of the cache.
* Dbufs that are aged out of the cache will be immediately destroyed and
* become eligible for arc eviction.
*/
static multilist_t *dbuf_cache;
static refcount_t dbuf_cache_size;
unsigned long dbuf_cache_max_bytes = 0;
/* Set the default size of the dbuf cache to log2 fraction of arc size. */
int dbuf_cache_shift = 5;
/*
* The dbuf cache uses a three-stage eviction policy:
* - A low water marker designates when the dbuf eviction thread
* should stop evicting from the dbuf cache.
* - When we reach the maximum size (aka mid water mark), we
* signal the eviction thread to run.
* - The high water mark indicates when the eviction thread
* is unable to keep up with the incoming load and eviction must
* happen in the context of the calling thread.
*
* The dbuf cache:
* (max size)
* low water mid water hi water
* +----------------------------------------+----------+----------+
* | | | |
* | | | |
* | | | |
* | | | |
* +----------------------------------------+----------+----------+
* stop signal evict
* evicting eviction directly
* thread
*
* The high and low water marks indicate the operating range for the eviction
* thread. The low water mark is, by default, 90% of the total size of the
* cache and the high water mark is at 110% (both of these percentages can be
* changed by setting dbuf_cache_lowater_pct and dbuf_cache_hiwater_pct,
* respectively). The eviction thread will try to ensure that the cache remains
* within this range by waking up every second and checking if the cache is
* above the low water mark. The thread can also be woken up by callers adding
* elements into the cache if the cache is larger than the mid water (i.e max
* cache size). Once the eviction thread is woken up and eviction is required,
* it will continue evicting buffers until it's able to reduce the cache size
* to the low water mark. If the cache size continues to grow and hits the high
* water mark, then callers adding elements to the cache will begin to evict
* directly from the cache until the cache is no longer above the high water
* mark.
*/
/*
* The percentage above and below the maximum cache size.
*/
uint_t dbuf_cache_hiwater_pct = 10;
uint_t dbuf_cache_lowater_pct = 10;
/* ARGSUSED */
static int
dbuf_cons(void *vdb, void *unused, int kmflag)
{
dmu_buf_impl_t *db = vdb;
bzero(db, sizeof (dmu_buf_impl_t));
mutex_init(&db->db_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&db->db_changed, NULL, CV_DEFAULT, NULL);
multilist_link_init(&db->db_cache_link);
refcount_create(&db->db_holds);
return (0);
}
/* ARGSUSED */
static void
dbuf_dest(void *vdb, void *unused)
{
dmu_buf_impl_t *db = vdb;
mutex_destroy(&db->db_mtx);
cv_destroy(&db->db_changed);
ASSERT(!multilist_link_active(&db->db_cache_link));
refcount_destroy(&db->db_holds);
}
/*
* dbuf hash table routines
*/
static dbuf_hash_table_t dbuf_hash_table;
static uint64_t dbuf_hash_count;
static uint64_t
dbuf_hash(void *os, uint64_t obj, uint8_t lvl, uint64_t blkid)
{
uintptr_t osv = (uintptr_t)os;
uint64_t crc = -1ULL;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (lvl)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 0)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (blkid >> 8)) & 0xFF];
crc ^= (osv>>14) ^ (obj>>16) ^ (blkid>>16);
return (crc);
}
#define DBUF_EQUAL(dbuf, os, obj, level, blkid) \
((dbuf)->db.db_object == (obj) && \
(dbuf)->db_objset == (os) && \
(dbuf)->db_level == (level) && \
(dbuf)->db_blkid == (blkid))
dmu_buf_impl_t *
dbuf_find(objset_t *os, uint64_t obj, uint8_t level, uint64_t blkid)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv;
uint64_t idx;
dmu_buf_impl_t *db;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (db = h->hash_table[idx]; db != NULL; db = db->db_hash_next) {
if (DBUF_EQUAL(db, os, obj, level, blkid)) {
mutex_enter(&db->db_mtx);
if (db->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (db);
}
mutex_exit(&db->db_mtx);
}
}
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (NULL);
}
static dmu_buf_impl_t *
dbuf_find_bonus(objset_t *os, uint64_t object)
{
dnode_t *dn;
dmu_buf_impl_t *db = NULL;
if (dnode_hold(os, object, FTAG, &dn) == 0) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus != NULL) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
return (db);
}
/*
* Insert an entry into the hash table. If there is already an element
* equal to elem in the hash table, then the already existing element
* will be returned and the new element will not be inserted.
* Otherwise returns NULL.
*/
static dmu_buf_impl_t *
dbuf_hash_insert(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
objset_t *os = db->db_objset;
uint64_t obj = db->db.db_object;
int level = db->db_level;
uint64_t blkid, hv, idx;
dmu_buf_impl_t *dbf;
uint32_t i;
blkid = db->db_blkid;
hv = dbuf_hash(os, obj, level, blkid);
idx = hv & h->hash_table_mask;
mutex_enter(DBUF_HASH_MUTEX(h, idx));
for (dbf = h->hash_table[idx], i = 0; dbf != NULL;
dbf = dbf->db_hash_next, i++) {
if (DBUF_EQUAL(dbf, os, obj, level, blkid)) {
mutex_enter(&dbf->db_mtx);
if (dbf->db_state != DB_EVICTING) {
mutex_exit(DBUF_HASH_MUTEX(h, idx));
return (dbf);
}
mutex_exit(&dbf->db_mtx);
}
}
if (i > 0) {
DBUF_STAT_BUMP(hash_collisions);
if (i == 1)
DBUF_STAT_BUMP(hash_chains);
DBUF_STAT_MAX(hash_chain_max, i);
}
mutex_enter(&db->db_mtx);
db->db_hash_next = h->hash_table[idx];
h->hash_table[idx] = db;
mutex_exit(DBUF_HASH_MUTEX(h, idx));
atomic_inc_64(&dbuf_hash_count);
DBUF_STAT_MAX(hash_elements_max, dbuf_hash_count);
return (NULL);
}
/*
* Remove an entry from the hash table. It must be in the EVICTING state.
*/
static void
dbuf_hash_remove(dmu_buf_impl_t *db)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
uint64_t hv, idx;
dmu_buf_impl_t *dbf, **dbp;
hv = dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid);
idx = hv & h->hash_table_mask;
/*
* We mustn't hold db_mtx to maintain lock ordering:
* DBUF_HASH_MUTEX > db_mtx.
*/
ASSERT(refcount_is_zero(&db->db_holds));
ASSERT(db->db_state == DB_EVICTING);
ASSERT(!MUTEX_HELD(&db->db_mtx));
mutex_enter(DBUF_HASH_MUTEX(h, idx));
dbp = &h->hash_table[idx];
while ((dbf = *dbp) != db) {
dbp = &dbf->db_hash_next;
ASSERT(dbf != NULL);
}
*dbp = db->db_hash_next;
db->db_hash_next = NULL;
if (h->hash_table[idx] &&
h->hash_table[idx]->db_hash_next == NULL)
DBUF_STAT_BUMPDOWN(hash_chains);
mutex_exit(DBUF_HASH_MUTEX(h, idx));
atomic_dec_64(&dbuf_hash_count);
}
typedef enum {
DBVU_EVICTING,
DBVU_NOT_EVICTING
} dbvu_verify_type_t;
static void
dbuf_verify_user(dmu_buf_impl_t *db, dbvu_verify_type_t verify_type)
{
#ifdef ZFS_DEBUG
int64_t holds;
if (db->db_user == NULL)
return;
/* Only data blocks support the attachment of user data. */
ASSERT(db->db_level == 0);
/* Clients must resolve a dbuf before attaching user data. */
ASSERT(db->db.db_data != NULL);
ASSERT3U(db->db_state, ==, DB_CACHED);
holds = refcount_count(&db->db_holds);
if (verify_type == DBVU_EVICTING) {
/*
* Immediate eviction occurs when holds == dirtycnt.
* For normal eviction buffers, holds is zero on
* eviction, except when dbuf_fix_old_data() calls
* dbuf_clear_data(). However, the hold count can grow
* during eviction even though db_mtx is held (see
* dmu_bonus_hold() for an example), so we can only
* test the generic invariant that holds >= dirtycnt.
*/
ASSERT3U(holds, >=, db->db_dirtycnt);
} else {
if (db->db_user_immediate_evict == TRUE)
ASSERT3U(holds, >=, db->db_dirtycnt);
else
ASSERT3U(holds, >, 0);
}
#endif
}
static void
dbuf_evict_user(dmu_buf_impl_t *db)
{
dmu_buf_user_t *dbu = db->db_user;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (dbu == NULL)
return;
dbuf_verify_user(db, DBVU_EVICTING);
db->db_user = NULL;
#ifdef ZFS_DEBUG
if (dbu->dbu_clear_on_evict_dbufp != NULL)
*dbu->dbu_clear_on_evict_dbufp = NULL;
#endif
/*
* There are two eviction callbacks - one that we call synchronously
* and one that we invoke via a taskq. The async one is useful for
* avoiding lock order reversals and limiting stack depth.
*
* Note that if we have a sync callback but no async callback,
* it's likely that the sync callback will free the structure
* containing the dbu. In that case we need to take care to not
* dereference dbu after calling the sync evict func.
*/
boolean_t has_async = (dbu->dbu_evict_func_async != NULL);
if (dbu->dbu_evict_func_sync != NULL)
dbu->dbu_evict_func_sync(dbu);
if (has_async) {
taskq_dispatch_ent(dbu_evict_taskq, dbu->dbu_evict_func_async,
dbu, 0, &dbu->dbu_tqent);
}
}
boolean_t
dbuf_is_metadata(dmu_buf_impl_t *db)
{
/*
* Consider indirect blocks and spill blocks to be meta data.
*/
if (db->db_level > 0 || db->db_blkid == DMU_SPILL_BLKID) {
return (B_TRUE);
} else {
boolean_t is_metadata;
DB_DNODE_ENTER(db);
is_metadata = DMU_OT_IS_METADATA(DB_DNODE(db)->dn_type);
DB_DNODE_EXIT(db);
return (is_metadata);
}
}
/*
* This function *must* return indices evenly distributed between all
* sublists of the multilist. This is needed due to how the dbuf eviction
* code is laid out; dbuf_evict_thread() assumes dbufs are evenly
* distributed between all sublists and uses this assumption when
* deciding which sublist to evict from and how much to evict from it.
*/
unsigned int
dbuf_cache_multilist_index_func(multilist_t *ml, void *obj)
{
dmu_buf_impl_t *db = obj;
/*
* The assumption here, is the hash value for a given
* dmu_buf_impl_t will remain constant throughout it's lifetime
* (i.e. it's objset, object, level and blkid fields don't change).
* Thus, we don't need to store the dbuf's sublist index
* on insertion, as this index can be recalculated on removal.
*
* Also, the low order bits of the hash value are thought to be
* distributed evenly. Otherwise, in the case that the multilist
* has a power of two number of sublists, each sublists' usage
* would not be evenly distributed.
*/
return (dbuf_hash(db->db_objset, db->db.db_object,
db->db_level, db->db_blkid) %
multilist_get_num_sublists(ml));
}
static inline unsigned long
dbuf_cache_target_bytes(void)
{
return MIN(dbuf_cache_max_bytes,
arc_target_bytes() >> dbuf_cache_shift);
}
static inline uint64_t
dbuf_cache_hiwater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target +
(dbuf_cache_target * dbuf_cache_hiwater_pct) / 100);
}
static inline uint64_t
dbuf_cache_lowater_bytes(void)
{
uint64_t dbuf_cache_target = dbuf_cache_target_bytes();
return (dbuf_cache_target -
(dbuf_cache_target * dbuf_cache_lowater_pct) / 100);
}
static inline boolean_t
dbuf_cache_above_hiwater(void)
{
return (refcount_count(&dbuf_cache_size) > dbuf_cache_hiwater_bytes());
}
static inline boolean_t
dbuf_cache_above_lowater(void)
{
return (refcount_count(&dbuf_cache_size) > dbuf_cache_lowater_bytes());
}
/*
* Evict the oldest eligible dbuf from the dbuf cache.
*/
static void
dbuf_evict_one(void)
{
int idx = multilist_get_random_index(dbuf_cache);
multilist_sublist_t *mls = multilist_sublist_lock(dbuf_cache, idx);
ASSERT(!MUTEX_HELD(&dbuf_evict_lock));
/*
* Set the thread's tsd to indicate that it's processing evictions.
* Once a thread stops evicting from the dbuf cache it will
* reset its tsd to NULL.
*/
ASSERT3P(tsd_get(zfs_dbuf_evict_key), ==, NULL);
(void) tsd_set(zfs_dbuf_evict_key, (void *)B_TRUE);
dmu_buf_impl_t *db = multilist_sublist_tail(mls);
while (db != NULL && mutex_tryenter(&db->db_mtx) == 0) {
db = multilist_sublist_prev(mls, db);
}
DTRACE_PROBE2(dbuf__evict__one, dmu_buf_impl_t *, db,
multilist_sublist_t *, mls);
if (db != NULL) {
multilist_sublist_remove(mls, db);
multilist_sublist_unlock(mls);
(void) refcount_remove_many(&dbuf_cache_size,
db->db.db_size, db);
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
dbuf_destroy(db);
DBUF_STAT_MAX(cache_size_bytes_max,
refcount_count(&dbuf_cache_size));
DBUF_STAT_BUMP(cache_total_evicts);
} else {
multilist_sublist_unlock(mls);
}
(void) tsd_set(zfs_dbuf_evict_key, NULL);
}
/*
* The dbuf evict thread is responsible for aging out dbufs from the
* cache. Once the cache has reached it's maximum size, dbufs are removed
* and destroyed. The eviction thread will continue running until the size
* of the dbuf cache is at or below the maximum size. Once the dbuf is aged
* out of the cache it is destroyed and becomes eligible for arc eviction.
*/
/* ARGSUSED */
static void
dbuf_evict_thread(void *unused)
{
callb_cpr_t cpr;
CALLB_CPR_INIT(&cpr, &dbuf_evict_lock, callb_generic_cpr, FTAG);
mutex_enter(&dbuf_evict_lock);
while (!dbuf_evict_thread_exit) {
while (!dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_sig_hires(&dbuf_evict_cv,
&dbuf_evict_lock, SEC2NSEC(1), MSEC2NSEC(1), 0);
CALLB_CPR_SAFE_END(&cpr, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
/*
* Keep evicting as long as we're above the low water mark
* for the cache. We do this without holding the locks to
* minimize lock contention.
*/
while (dbuf_cache_above_lowater() && !dbuf_evict_thread_exit) {
dbuf_evict_one();
}
mutex_enter(&dbuf_evict_lock);
}
dbuf_evict_thread_exit = B_FALSE;
cv_broadcast(&dbuf_evict_cv);
CALLB_CPR_EXIT(&cpr); /* drops dbuf_evict_lock */
thread_exit();
}
/*
* Wake up the dbuf eviction thread if the dbuf cache is at its max size.
* If the dbuf cache is at its high water mark, then evict a dbuf from the
* dbuf cache using the callers context.
*/
static void
dbuf_evict_notify(void)
{
/*
* We use thread specific data to track when a thread has
* started processing evictions. This allows us to avoid deeply
* nested stacks that would have a call flow similar to this:
*
* dbuf_rele()-->dbuf_rele_and_unlock()-->dbuf_evict_notify()
* ^ |
* | |
* +-----dbuf_destroy()<--dbuf_evict_one()<--------+
*
* The dbuf_eviction_thread will always have its tsd set until
* that thread exits. All other threads will only set their tsd
* if they are participating in the eviction process. This only
* happens if the eviction thread is unable to process evictions
* fast enough. To keep the dbuf cache size in check, other threads
* can evict from the dbuf cache directly. Those threads will set
* their tsd values so that we ensure that they only evict one dbuf
* from the dbuf cache.
*/
if (tsd_get(zfs_dbuf_evict_key) != NULL)
return;
/*
* We check if we should evict without holding the dbuf_evict_lock,
* because it's OK to occasionally make the wrong decision here,
* and grabbing the lock results in massive lock contention.
*/
if (refcount_count(&dbuf_cache_size) > dbuf_cache_target_bytes()) {
if (dbuf_cache_above_hiwater())
dbuf_evict_one();
cv_signal(&dbuf_evict_cv);
}
}
static int
dbuf_kstat_update(kstat_t *ksp, int rw)
{
dbuf_stats_t *ds = ksp->ks_data;
if (rw == KSTAT_WRITE) {
return (SET_ERROR(EACCES));
} else {
ds->cache_size_bytes.value.ui64 =
refcount_count(&dbuf_cache_size);
ds->cache_target_bytes.value.ui64 = dbuf_cache_target_bytes();
ds->cache_hiwater_bytes.value.ui64 = dbuf_cache_hiwater_bytes();
ds->cache_lowater_bytes.value.ui64 = dbuf_cache_lowater_bytes();
ds->hash_elements.value.ui64 = dbuf_hash_count;
}
return (0);
}
void
dbuf_init(void)
{
uint64_t hsize = 1ULL << 16;
dbuf_hash_table_t *h = &dbuf_hash_table;
int i;
/*
* The hash table is big enough to fill all of physical memory
* with an average block size of zfs_arc_average_blocksize (default 8K).
* By default, the table will take up
* totalmem * sizeof(void*) / 8K (1MB per GB with 8-byte pointers).
*/
while (hsize * zfs_arc_average_blocksize < physmem * PAGESIZE)
hsize <<= 1;
retry:
h->hash_table_mask = hsize - 1;
#if defined(_KERNEL) && defined(HAVE_SPL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_alloc() in the linux kernel
*/
h->hash_table = vmem_zalloc(hsize * sizeof (void *), KM_SLEEP);
#else
h->hash_table = kmem_zalloc(hsize * sizeof (void *), KM_NOSLEEP);
#endif
if (h->hash_table == NULL) {
/* XXX - we should really return an error instead of assert */
ASSERT(hsize > (1ULL << 10));
hsize >>= 1;
goto retry;
}
dbuf_kmem_cache = kmem_cache_create("dmu_buf_impl_t",
sizeof (dmu_buf_impl_t),
0, dbuf_cons, dbuf_dest, NULL, NULL, NULL, 0);
for (i = 0; i < DBUF_MUTEXES; i++)
mutex_init(&h->hash_mutexes[i], NULL, MUTEX_DEFAULT, NULL);
dbuf_stats_init(h);
/*
* Setup the parameters for the dbuf cache. We set the size of the
* dbuf cache to 1/32nd (default) of the target size of the ARC. If
* the value has been specified as a module option and it's not
* greater than the target size of the ARC, then we honor that value.
*/
if (dbuf_cache_max_bytes == 0 ||
dbuf_cache_max_bytes >= arc_target_bytes()) {
dbuf_cache_max_bytes = arc_target_bytes() >> dbuf_cache_shift;
}
/*
* All entries are queued via taskq_dispatch_ent(), so min/maxalloc
* configuration is not required.
*/
dbu_evict_taskq = taskq_create("dbu_evict", 1, defclsyspri, 0, 0, 0);
dbuf_cache = multilist_create(sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_cache_link),
dbuf_cache_multilist_index_func);
refcount_create(&dbuf_cache_size);
tsd_create(&zfs_dbuf_evict_key, NULL);
dbuf_evict_thread_exit = B_FALSE;
mutex_init(&dbuf_evict_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dbuf_evict_cv, NULL, CV_DEFAULT, NULL);
dbuf_cache_evict_thread = thread_create(NULL, 0, dbuf_evict_thread,
NULL, 0, &p0, TS_RUN, minclsyspri);
dbuf_ksp = kstat_create("zfs", 0, "dbufstats", "misc",
KSTAT_TYPE_NAMED, sizeof (dbuf_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dbuf_ksp != NULL) {
dbuf_ksp->ks_data = &dbuf_stats;
dbuf_ksp->ks_update = dbuf_kstat_update;
kstat_install(dbuf_ksp);
for (i = 0; i < DN_MAX_LEVELS; i++) {
snprintf(dbuf_stats.cache_levels[i].name,
KSTAT_STRLEN, "cache_level_%d", i);
dbuf_stats.cache_levels[i].data_type =
KSTAT_DATA_UINT64;
snprintf(dbuf_stats.cache_levels_bytes[i].name,
KSTAT_STRLEN, "cache_level_%d_bytes", i);
dbuf_stats.cache_levels_bytes[i].data_type =
KSTAT_DATA_UINT64;
}
}
}
void
dbuf_fini(void)
{
dbuf_hash_table_t *h = &dbuf_hash_table;
int i;
dbuf_stats_destroy();
for (i = 0; i < DBUF_MUTEXES; i++)
mutex_destroy(&h->hash_mutexes[i]);
#if defined(_KERNEL) && defined(HAVE_SPL)
/*
* Large allocations which do not require contiguous pages
* should be using vmem_free() in the linux kernel
*/
vmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
#else
kmem_free(h->hash_table, (h->hash_table_mask + 1) * sizeof (void *));
#endif
kmem_cache_destroy(dbuf_kmem_cache);
taskq_destroy(dbu_evict_taskq);
mutex_enter(&dbuf_evict_lock);
dbuf_evict_thread_exit = B_TRUE;
while (dbuf_evict_thread_exit) {
cv_signal(&dbuf_evict_cv);
cv_wait(&dbuf_evict_cv, &dbuf_evict_lock);
}
mutex_exit(&dbuf_evict_lock);
tsd_destroy(&zfs_dbuf_evict_key);
mutex_destroy(&dbuf_evict_lock);
cv_destroy(&dbuf_evict_cv);
refcount_destroy(&dbuf_cache_size);
multilist_destroy(dbuf_cache);
if (dbuf_ksp != NULL) {
kstat_delete(dbuf_ksp);
dbuf_ksp = NULL;
}
}
/*
* Other stuff.
*/
#ifdef ZFS_DEBUG
static void
dbuf_verify(dmu_buf_impl_t *db)
{
dnode_t *dn;
dbuf_dirty_record_t *dr;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!(zfs_flags & ZFS_DEBUG_DBUF_VERIFY))
return;
ASSERT(db->db_objset != NULL);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn == NULL) {
ASSERT(db->db_parent == NULL);
ASSERT(db->db_blkptr == NULL);
} else {
ASSERT3U(db->db.db_object, ==, dn->dn_object);
ASSERT3P(db->db_objset, ==, dn->dn_objset);
ASSERT3U(db->db_level, <, dn->dn_nlevels);
ASSERT(db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID ||
!avl_is_empty(&dn->dn_dbufs));
}
if (db->db_blkid == DMU_BONUS_BLKID) {
ASSERT(dn != NULL);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
ASSERT3U(db->db.db_offset, ==, DMU_BONUS_BLKID);
} else if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn != NULL);
ASSERT0(db->db.db_offset);
} else {
ASSERT3U(db->db.db_offset, ==, db->db_blkid * db->db.db_size);
}
for (dr = db->db_data_pending; dr != NULL; dr = dr->dr_next)
ASSERT(dr->dr_dbuf == db);
for (dr = db->db_last_dirty; dr != NULL; dr = dr->dr_next)
ASSERT(dr->dr_dbuf == db);
/*
* We can't assert that db_size matches dn_datablksz because it
* can be momentarily different when another thread is doing
* dnode_set_blksz().
*/
if (db->db_level == 0 && db->db.db_object == DMU_META_DNODE_OBJECT) {
dr = db->db_data_pending;
/*
* It should only be modified in syncing context, so
* make sure we only have one copy of the data.
*/
ASSERT(dr == NULL || dr->dt.dl.dr_data == db->db_buf);
}
/* verify db->db_blkptr */
if (db->db_blkptr) {
if (db->db_parent == dn->dn_dbuf) {
/* db is pointed to by the dnode */
/* ASSERT3U(db->db_blkid, <, dn->dn_nblkptr); */
if (DMU_OBJECT_IS_SPECIAL(db->db.db_object))
ASSERT(db->db_parent == NULL);
else
ASSERT(db->db_parent != NULL);
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
} else {
/* db is pointed to by an indirect block */
ASSERTV(int epb = db->db_parent->db.db_size >>
SPA_BLKPTRSHIFT);
ASSERT3U(db->db_parent->db_level, ==, db->db_level+1);
ASSERT3U(db->db_parent->db.db_object, ==,
db->db.db_object);
/*
* dnode_grow_indblksz() can make this fail if we don't
* have the struct_rwlock. XXX indblksz no longer
* grows. safe to do this now?
*/
if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
ASSERT3P(db->db_blkptr, ==,
((blkptr_t *)db->db_parent->db.db_data +
db->db_blkid % epb));
}
}
}
if ((db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr)) &&
(db->db_buf == NULL || db->db_buf->b_data) &&
db->db.db_data && db->db_blkid != DMU_BONUS_BLKID &&
db->db_state != DB_FILL && !dn->dn_free_txg) {
/*
* If the blkptr isn't set but they have nonzero data,
* it had better be dirty, otherwise we'll lose that
* data when we evict this buffer.
*
* There is an exception to this rule for indirect blocks; in
* this case, if the indirect block is a hole, we fill in a few
* fields on each of the child blocks (importantly, birth time)
* to prevent hole birth times from being lost when you
* partially fill in a hole.
*/
if (db->db_dirtycnt == 0) {
if (db->db_level == 0) {
uint64_t *buf = db->db.db_data;
int i;
for (i = 0; i < db->db.db_size >> 3; i++) {
ASSERT(buf[i] == 0);
}
} else {
blkptr_t *bps = db->db.db_data;
ASSERT3U(1 << DB_DNODE(db)->dn_indblkshift, ==,
db->db.db_size);
/*
* We want to verify that all the blkptrs in the
* indirect block are holes, but we may have
* automatically set up a few fields for them.
* We iterate through each blkptr and verify
* they only have those fields set.
*/
for (int i = 0;
i < db->db.db_size / sizeof (blkptr_t);
i++) {
blkptr_t *bp = &bps[i];
ASSERT(ZIO_CHECKSUM_IS_ZERO(
&bp->blk_cksum));
ASSERT(
DVA_IS_EMPTY(&bp->blk_dva[0]) &&
DVA_IS_EMPTY(&bp->blk_dva[1]) &&
DVA_IS_EMPTY(&bp->blk_dva[2]));
ASSERT0(bp->blk_fill);
ASSERT0(bp->blk_pad[0]);
ASSERT0(bp->blk_pad[1]);
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(BP_IS_HOLE(bp));
ASSERT0(bp->blk_phys_birth);
}
}
}
}
DB_DNODE_EXIT(db);
}
#endif
static void
dbuf_clear_data(dmu_buf_impl_t *db)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
dbuf_evict_user(db);
ASSERT3P(db->db_buf, ==, NULL);
db->db.db_data = NULL;
if (db->db_state != DB_NOFILL)
db->db_state = DB_UNCACHED;
}
static void
dbuf_set_data(dmu_buf_impl_t *db, arc_buf_t *buf)
{
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(buf != NULL);
db->db_buf = buf;
ASSERT(buf->b_data != NULL);
db->db.db_data = buf->b_data;
}
/*
* Loan out an arc_buf for read. Return the loaned arc_buf.
*/
arc_buf_t *
dbuf_loan_arcbuf(dmu_buf_impl_t *db)
{
arc_buf_t *abuf;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
if (arc_released(db->db_buf) || refcount_count(&db->db_holds) > 1) {
int blksz = db->db.db_size;
spa_t *spa = db->db_objset->os_spa;
mutex_exit(&db->db_mtx);
abuf = arc_loan_buf(spa, B_FALSE, blksz);
bcopy(db->db.db_data, abuf->b_data, blksz);
} else {
abuf = db->db_buf;
arc_loan_inuse_buf(abuf, db);
db->db_buf = NULL;
dbuf_clear_data(db);
mutex_exit(&db->db_mtx);
}
return (abuf);
}
/*
* Calculate which level n block references the data at the level 0 offset
* provided.
*/
uint64_t
dbuf_whichblock(const dnode_t *dn, const int64_t level, const uint64_t offset)
{
if (dn->dn_datablkshift != 0 && dn->dn_indblkshift != 0) {
/*
* The level n blkid is equal to the level 0 blkid divided by
* the number of level 0s in a level n block.
*
* The level 0 blkid is offset >> datablkshift =
* offset / 2^datablkshift.
*
* The number of level 0s in a level n is the number of block
* pointers in an indirect block, raised to the power of level.
* This is 2^(indblkshift - SPA_BLKPTRSHIFT)^level =
* 2^(level*(indblkshift - SPA_BLKPTRSHIFT)).
*
* Thus, the level n blkid is: offset /
* ((2^datablkshift)*(2^(level*(indblkshift - SPA_BLKPTRSHIFT)))
* = offset / 2^(datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
* = offset >> (datablkshift + level *
* (indblkshift - SPA_BLKPTRSHIFT))
*/
const unsigned exp = dn->dn_datablkshift +
level * (dn->dn_indblkshift - SPA_BLKPTRSHIFT);
if (exp >= 8 * sizeof (offset)) {
/* This only happens on the highest indirection level */
ASSERT3U(level, ==, dn->dn_nlevels - 1);
return (0);
}
ASSERT3U(exp, <, 8 * sizeof (offset));
return (offset >> exp);
} else {
ASSERT3U(offset, <, dn->dn_datablksz);
return (0);
}
}
static void
dbuf_read_done(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *vdb)
{
dmu_buf_impl_t *db = vdb;
mutex_enter(&db->db_mtx);
ASSERT3U(db->db_state, ==, DB_READ);
/*
* All reads are synchronous, so we must have a hold on the dbuf
*/
ASSERT(refcount_count(&db->db_holds) > 0);
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
if (db->db_level == 0 && db->db_freed_in_flight) {
/* we were freed in flight; disregard any error */
if (buf == NULL) {
buf = arc_alloc_buf(db->db_objset->os_spa,
db, DBUF_GET_BUFC_TYPE(db), db->db.db_size);
}
arc_release(buf, db);
bzero(buf->b_data, db->db.db_size);
arc_buf_freeze(buf);
db->db_freed_in_flight = FALSE;
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
} else if (buf != NULL) {
dbuf_set_data(db, buf);
db->db_state = DB_CACHED;
} else {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT3P(db->db_buf, ==, NULL);
db->db_state = DB_UNCACHED;
}
cv_broadcast(&db->db_changed);
dbuf_rele_and_unlock(db, NULL);
}
static int
dbuf_read_impl(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
{
dnode_t *dn;
zbookmark_phys_t zb;
uint32_t aflags = ARC_FLAG_NOWAIT;
int err, zio_flags = 0;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
ASSERT(!refcount_is_zero(&db->db_holds));
/* We need the struct_rwlock to prevent db_blkptr from changing. */
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db_state == DB_UNCACHED);
ASSERT(db->db_buf == NULL);
if (db->db_blkid == DMU_BONUS_BLKID) {
/*
* The bonus length stored in the dnode may be less than
* the maximum available space in the bonus buffer.
*/
int bonuslen = MIN(dn->dn_bonuslen, dn->dn_phys->dn_bonuslen);
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
arc_buf_t *dn_buf = (dn->dn_dbuf != NULL) ?
dn->dn_dbuf->db_buf : NULL;
/* if the underlying dnode block is encrypted, decrypt it */
if (dn_buf != NULL && dn->dn_objset->os_encrypted &&
DMU_OT_IS_ENCRYPTED(dn->dn_bonustype) &&
(flags & DB_RF_NO_DECRYPT) == 0 &&
arc_is_encrypted(dn_buf)) {
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
DMU_META_DNODE_OBJECT, 0, dn->dn_dbuf->db_blkid);
err = arc_untransform(dn_buf, dn->dn_objset->os_spa,
&zb, B_TRUE);
if (err != 0) {
DB_DNODE_EXIT(db);
mutex_exit(&db->db_mtx);
return (err);
}
}
ASSERT3U(bonuslen, <=, db->db.db_size);
db->db.db_data = kmem_alloc(max_bonuslen, KM_SLEEP);
arc_space_consume(max_bonuslen, ARC_SPACE_BONUS);
if (bonuslen < max_bonuslen)
bzero(db->db.db_data, max_bonuslen);
if (bonuslen)
bcopy(DN_BONUS(dn->dn_phys), db->db.db_data, bonuslen);
DB_DNODE_EXIT(db);
db->db_state = DB_CACHED;
mutex_exit(&db->db_mtx);
return (0);
}
/*
* Recheck BP_IS_HOLE() after dnode_block_freed() in case dnode_sync()
* processes the delete record and clears the bp while we are waiting
* for the dn_mtx (resulting in a "no" from block_freed).
*/
if (db->db_blkptr == NULL || BP_IS_HOLE(db->db_blkptr) ||
(db->db_level == 0 && (dnode_block_freed(dn, db->db_blkid) ||
BP_IS_HOLE(db->db_blkptr)))) {
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
dbuf_set_data(db, arc_alloc_buf(db->db_objset->os_spa, db, type,
db->db.db_size));
bzero(db->db.db_data, db->db.db_size);
if (db->db_blkptr != NULL && db->db_level > 0 &&
BP_IS_HOLE(db->db_blkptr) &&
db->db_blkptr->blk_birth != 0) {
blkptr_t *bps = db->db.db_data;
for (int i = 0; i < ((1 <<
DB_DNODE(db)->dn_indblkshift) / sizeof (blkptr_t));
i++) {
blkptr_t *bp = &bps[i];
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
1 << dn->dn_indblkshift);
BP_SET_LSIZE(bp,
BP_GET_LEVEL(db->db_blkptr) == 1 ?
dn->dn_datablksz :
BP_GET_LSIZE(db->db_blkptr));
BP_SET_TYPE(bp, BP_GET_TYPE(db->db_blkptr));
BP_SET_LEVEL(bp,
BP_GET_LEVEL(db->db_blkptr) - 1);
BP_SET_BIRTH(bp, db->db_blkptr->blk_birth, 0);
}
}
DB_DNODE_EXIT(db);
db->db_state = DB_CACHED;
mutex_exit(&db->db_mtx);
return (0);
}
DB_DNODE_EXIT(db);
db->db_state = DB_READ;
mutex_exit(&db->db_mtx);
if (DBUF_IS_L2CACHEABLE(db))
aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
/*
* All bps of an encrypted os should have the encryption bit set.
* If this is not true it indicates tampering and we report an error.
*/
if (db->db_objset->os_encrypted && !BP_USES_CRYPT(db->db_blkptr)) {
spa_log_error(db->db_objset->os_spa, &zb);
zfs_panic_recover("unencrypted block in encrypted "
"object set %llu", dmu_objset_id(db->db_objset));
return (SET_ERROR(EIO));
}
dbuf_add_ref(db, NULL);
zio_flags = (flags & DB_RF_CANFAIL) ?
ZIO_FLAG_CANFAIL : ZIO_FLAG_MUSTSUCCEED;
if ((flags & DB_RF_NO_DECRYPT) && BP_IS_PROTECTED(db->db_blkptr))
zio_flags |= ZIO_FLAG_RAW;
err = arc_read(zio, db->db_objset->os_spa, db->db_blkptr,
dbuf_read_done, db, ZIO_PRIORITY_SYNC_READ, zio_flags,
&aflags, &zb);
return (err);
}
/*
* This is our just-in-time copy function. It makes a copy of buffers that
* have been modified in a previous transaction group before we access them in
* the current active group.
*
* This function is used in three places: when we are dirtying a buffer for the
* first time in a txg, when we are freeing a range in a dnode that includes
* this buffer, and when we are accessing a buffer which was received compressed
* and later referenced in a WRITE_BYREF record.
*
* Note that when we are called from dbuf_free_range() we do not put a hold on
* the buffer, we just traverse the active dbuf list for the dnode.
*/
static void
dbuf_fix_old_data(dmu_buf_impl_t *db, uint64_t txg)
{
dbuf_dirty_record_t *dr = db->db_last_dirty;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(db->db.db_data != NULL);
ASSERT(db->db_level == 0);
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT);
if (dr == NULL ||
(dr->dt.dl.dr_data !=
((db->db_blkid == DMU_BONUS_BLKID) ? db->db.db_data : db->db_buf)))
return;
/*
* If the last dirty record for this dbuf has not yet synced
* and its referencing the dbuf data, either:
* reset the reference to point to a new copy,
* or (if there a no active holders)
* just null out the current db_data pointer.
*/
ASSERT3U(dr->dr_txg, >=, txg - 2);
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn = DB_DNODE(db);
int bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
dr->dt.dl.dr_data = kmem_alloc(bonuslen, KM_SLEEP);
arc_space_consume(bonuslen, ARC_SPACE_BONUS);
bcopy(db->db.db_data, dr->dt.dl.dr_data, bonuslen);
} else if (refcount_count(&db->db_holds) > db->db_dirtycnt) {
dnode_t *dn = DB_DNODE(db);
int size = arc_buf_size(db->db_buf);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
spa_t *spa = db->db_objset->os_spa;
enum zio_compress compress_type =
arc_get_compression(db->db_buf);
if (arc_is_encrypted(db->db_buf)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(db->db_buf, &byteorder, salt,
iv, mac);
dr->dt.dl.dr_data = arc_alloc_raw_buf(spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv,
mac, dn->dn_type, size, arc_buf_lsize(db->db_buf),
compress_type);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
dr->dt.dl.dr_data = arc_alloc_compressed_buf(spa, db,
size, arc_buf_lsize(db->db_buf), compress_type);
} else {
dr->dt.dl.dr_data = arc_alloc_buf(spa, db, type, size);
}
bcopy(db->db.db_data, dr->dt.dl.dr_data->b_data, size);
} else {
db->db_buf = NULL;
dbuf_clear_data(db);
}
}
int
dbuf_read(dmu_buf_impl_t *db, zio_t *zio, uint32_t flags)
{
int err = 0;
boolean_t prefetch;
dnode_t *dn;
/*
* We don't have to hold the mutex to check db_state because it
* can't be freed while we have a hold on the buffer.
*/
ASSERT(!refcount_is_zero(&db->db_holds));
if (db->db_state == DB_NOFILL)
return (SET_ERROR(EIO));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_enter(&dn->dn_struct_rwlock, RW_READER);
prefetch = db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
(flags & DB_RF_NOPREFETCH) == 0 && dn != NULL &&
DBUF_IS_CACHEABLE(db);
mutex_enter(&db->db_mtx);
if (db->db_state == DB_CACHED) {
spa_t *spa = dn->dn_objset->os_spa;
/*
* If the arc buf is compressed or encrypted, we need to
* untransform it to read the data. This could happen during
* the "zfs receive" of a stream which is deduplicated and
* either raw or compressed. We do not need to do this if the
* caller wants raw encrypted data.
*/
if (db->db_buf != NULL && (flags & DB_RF_NO_DECRYPT) == 0 &&
(arc_is_encrypted(db->db_buf) ||
arc_get_compression(db->db_buf) != ZIO_COMPRESS_OFF)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
dbuf_fix_old_data(db, spa_syncing_txg(spa));
err = arc_untransform(db->db_buf, spa, &zb, B_FALSE);
dbuf_set_data(db, db->db_buf);
}
mutex_exit(&db->db_mtx);
if (prefetch)
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_hits);
} else if (db->db_state == DB_UNCACHED) {
spa_t *spa = dn->dn_objset->os_spa;
boolean_t need_wait = B_FALSE;
if (zio == NULL &&
db->db_blkptr != NULL && !BP_IS_HOLE(db->db_blkptr)) {
zio = zio_root(spa, NULL, NULL, ZIO_FLAG_CANFAIL);
need_wait = B_TRUE;
}
err = dbuf_read_impl(db, zio, flags);
/* dbuf_read_impl has dropped db_mtx for us */
if (!err && prefetch)
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
if (!err && need_wait)
err = zio_wait(zio);
} else {
/*
* Another reader came in while the dbuf was in flight
* between UNCACHED and CACHED. Either a writer will finish
* writing the buffer (sending the dbuf to CACHED) or the
* first reader's request will reach the read_done callback
* and send the dbuf to CACHED. Otherwise, a failure
* occurred and the dbuf went to UNCACHED.
*/
mutex_exit(&db->db_mtx);
if (prefetch)
dmu_zfetch(&dn->dn_zfetch, db->db_blkid, 1, B_TRUE);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
DB_DNODE_EXIT(db);
DBUF_STAT_BUMP(hash_misses);
/* Skip the wait per the caller's request. */
mutex_enter(&db->db_mtx);
if ((flags & DB_RF_NEVERWAIT) == 0) {
while (db->db_state == DB_READ ||
db->db_state == DB_FILL) {
ASSERT(db->db_state == DB_READ ||
(flags & DB_RF_HAVESTRUCT) == 0);
DTRACE_PROBE2(blocked__read, dmu_buf_impl_t *,
db, zio_t *, zio);
cv_wait(&db->db_changed, &db->db_mtx);
}
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
}
mutex_exit(&db->db_mtx);
}
return (err);
}
static void
dbuf_noread(dmu_buf_impl_t *db)
{
ASSERT(!refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED) {
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
spa_t *spa = db->db_objset->os_spa;
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
dbuf_set_data(db, arc_alloc_buf(spa, db, type, db->db.db_size));
db->db_state = DB_FILL;
} else if (db->db_state == DB_NOFILL) {
dbuf_clear_data(db);
} else {
ASSERT3U(db->db_state, ==, DB_CACHED);
}
mutex_exit(&db->db_mtx);
}
void
dbuf_unoverride(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *bp = &dr->dt.dl.dr_overridden_by;
uint64_t txg = dr->dr_txg;
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* This assert is valid because dmu_sync() expects to be called by
* a zilog's get_data while holding a range lock. This call only
* comes from dbuf_dirty() callers who must also hold a range lock.
*/
ASSERT(dr->dt.dl.dr_override_state != DR_IN_DMU_SYNC);
ASSERT(db->db_level == 0);
if (db->db_blkid == DMU_BONUS_BLKID ||
dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN)
return;
ASSERT(db->db_data_pending != dr);
/* free this block */
if (!BP_IS_HOLE(bp) && !dr->dt.dl.dr_nopwrite)
zio_free(db->db_objset->os_spa, txg, bp);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
dr->dt.dl.dr_nopwrite = B_FALSE;
dr->dt.dl.dr_raw = B_FALSE;
/*
* Release the already-written buffer, so we leave it in
* a consistent dirty state. Note that all callers are
* modifying the buffer, so they will immediately do
* another (redundant) arc_release(). Therefore, leave
* the buf thawed to save the effort of freezing &
* immediately re-thawing it.
*/
arc_release(dr->dt.dl.dr_data, db);
}
/*
* Evict (if its unreferenced) or clear (if its referenced) any level-0
* data blocks in the free range, so that any future readers will find
* empty blocks.
*/
void
dbuf_free_range(dnode_t *dn, uint64_t start_blkid, uint64_t end_blkid,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db_search;
dmu_buf_impl_t *db, *db_next;
uint64_t txg = tx->tx_txg;
avl_index_t where;
if (end_blkid > dn->dn_maxblkid &&
!(start_blkid == DMU_SPILL_BLKID || end_blkid == DMU_SPILL_BLKID))
end_blkid = dn->dn_maxblkid;
dprintf_dnode(dn, "start=%llu end=%llu\n", start_blkid, end_blkid);
db_search = kmem_alloc(sizeof (dmu_buf_impl_t), KM_SLEEP);
db_search->db_level = 0;
db_search->db_blkid = start_blkid;
db_search->db_state = DB_SEARCH;
mutex_enter(&dn->dn_dbufs_mtx);
db = avl_find(&dn->dn_dbufs, db_search, &where);
ASSERT3P(db, ==, NULL);
db = avl_nearest(&dn->dn_dbufs, where, AVL_AFTER);
for (; db != NULL; db = db_next) {
db_next = AVL_NEXT(&dn->dn_dbufs, db);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
if (db->db_level != 0 || db->db_blkid > end_blkid) {
break;
}
ASSERT3U(db->db_blkid, >=, start_blkid);
/* found a level 0 buffer in the range */
mutex_enter(&db->db_mtx);
if (dbuf_undirty(db, tx)) {
/* mutex has been dropped and dbuf destroyed */
continue;
}
if (db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL ||
db->db_state == DB_EVICTING) {
ASSERT(db->db.db_data == NULL);
mutex_exit(&db->db_mtx);
continue;
}
if (db->db_state == DB_READ || db->db_state == DB_FILL) {
/* will be handled in dbuf_read_done or dbuf_rele */
db->db_freed_in_flight = TRUE;
mutex_exit(&db->db_mtx);
continue;
}
if (refcount_count(&db->db_holds) == 0) {
ASSERT(db->db_buf);
dbuf_destroy(db);
continue;
}
/* The dbuf is referenced */
if (db->db_last_dirty != NULL) {
dbuf_dirty_record_t *dr = db->db_last_dirty;
if (dr->dr_txg == txg) {
/*
* This buffer is "in-use", re-adjust the file
* size to reflect that this buffer may
* contain new data when we sync.
*/
if (db->db_blkid != DMU_SPILL_BLKID &&
db->db_blkid > dn->dn_maxblkid)
dn->dn_maxblkid = db->db_blkid;
dbuf_unoverride(dr);
} else {
/*
* This dbuf is not dirty in the open context.
* Either uncache it (if its not referenced in
* the open context) or reset its contents to
* empty.
*/
dbuf_fix_old_data(db, txg);
}
}
/* clear the contents if its cached */
if (db->db_state == DB_CACHED) {
ASSERT(db->db.db_data != NULL);
arc_release(db->db_buf, db);
bzero(db->db.db_data, db->db.db_size);
arc_buf_freeze(db->db_buf);
}
mutex_exit(&db->db_mtx);
}
kmem_free(db_search, sizeof (dmu_buf_impl_t));
mutex_exit(&dn->dn_dbufs_mtx);
}
void
dbuf_new_size(dmu_buf_impl_t *db, int size, dmu_tx_t *tx)
{
arc_buf_t *buf, *obuf;
int osize = db->db.db_size;
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
dnode_t *dn;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/* XXX does *this* func really need the lock? */
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
/*
* This call to dmu_buf_will_dirty() with the dn_struct_rwlock held
* is OK, because there can be no other references to the db
* when we are changing its size, so no concurrent DB_FILL can
* be happening.
*/
/*
* XXX we should be doing a dbuf_read, checking the return
* value and returning that up to our callers
*/
dmu_buf_will_dirty(&db->db, tx);
/* create the data buffer for the new block */
buf = arc_alloc_buf(dn->dn_objset->os_spa, db, type, size);
/* copy old block data to the new block */
obuf = db->db_buf;
bcopy(obuf->b_data, buf->b_data, MIN(osize, size));
/* zero the remainder */
if (size > osize)
bzero((uint8_t *)buf->b_data + osize, size - osize);
mutex_enter(&db->db_mtx);
dbuf_set_data(db, buf);
arc_buf_destroy(obuf, db);
db->db.db_size = size;
if (db->db_level == 0) {
ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
db->db_last_dirty->dt.dl.dr_data = buf;
}
mutex_exit(&db->db_mtx);
dmu_objset_willuse_space(dn->dn_objset, size - osize, tx);
DB_DNODE_EXIT(db);
}
void
dbuf_release_bp(dmu_buf_impl_t *db)
{
ASSERTV(objset_t *os = db->db_objset);
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(arc_released(os->os_phys_buf) ||
list_link_active(&os->os_dsl_dataset->ds_synced_link));
ASSERT(db->db_parent == NULL || arc_released(db->db_parent->db_buf));
(void) arc_release(db->db_buf, db);
}
/*
* We already have a dirty record for this TXG, and we are being
* dirtied again.
*/
static void
dbuf_redirty(dbuf_dirty_record_t *dr)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID) {
/*
* If this buffer has already been written out,
* we now need to reset its state.
*/
dbuf_unoverride(dr);
if (db->db.db_object != DMU_META_DNODE_OBJECT &&
db->db_state != DB_NOFILL) {
/* Already released on initial dirty, so just thaw. */
ASSERT(arc_released(db->db_buf));
arc_buf_thaw(db->db_buf);
}
}
}
dbuf_dirty_record_t *
dbuf_dirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dnode_t *dn;
objset_t *os;
dbuf_dirty_record_t **drp, *dr;
int drop_struct_lock = FALSE;
int txgoff = tx->tx_txg & TXG_MASK;
ASSERT(tx->tx_txg != 0);
ASSERT(!refcount_is_zero(&db->db_holds));
DMU_TX_DIRTY_BUF(tx, db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/*
* Shouldn't dirty a regular buffer in syncing context. Private
* objects may be dirtied in syncing context, but only if they
* were already pre-dirtied in open context.
*/
#ifdef DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
}
ASSERT(!dmu_tx_is_syncing(tx) ||
BP_IS_HOLE(dn->dn_objset->os_rootbp) ||
DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
dn->dn_objset->os_dsl_dataset == NULL);
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
/*
* We make this assert for private objects as well, but after we
* check if we're already dirty. They are allowed to re-dirty
* in syncing context.
*/
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
mutex_enter(&db->db_mtx);
/*
* XXX make this true for indirects too? The problem is that
* transactions created with dmu_tx_create_assigned() from
* syncing context don't bother holding ahead.
*/
ASSERT(db->db_level != 0 ||
db->db_state == DB_CACHED || db->db_state == DB_FILL ||
db->db_state == DB_NOFILL);
mutex_enter(&dn->dn_mtx);
/*
* Don't set dirtyctx to SYNC if we're just modifying this as we
* initialize the objset.
*/
if (dn->dn_dirtyctx == DN_UNDIRTIED) {
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_enter(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
RW_READER, FTAG);
}
if (!BP_IS_HOLE(dn->dn_objset->os_rootbp)) {
dn->dn_dirtyctx = (dmu_tx_is_syncing(tx) ?
DN_DIRTY_SYNC : DN_DIRTY_OPEN);
ASSERT(dn->dn_dirtyctx_firstset == NULL);
dn->dn_dirtyctx_firstset = kmem_alloc(1, KM_SLEEP);
}
if (dn->dn_objset->os_dsl_dataset != NULL) {
rrw_exit(&dn->dn_objset->os_dsl_dataset->ds_bp_rwlock,
FTAG);
}
}
if (tx->tx_txg > dn->dn_dirty_txg)
dn->dn_dirty_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
if (db->db_blkid == DMU_SPILL_BLKID)
dn->dn_have_spill = B_TRUE;
/*
* If this buffer is already dirty, we're done.
*/
drp = &db->db_last_dirty;
ASSERT(*drp == NULL || (*drp)->dr_txg <= tx->tx_txg ||
db->db.db_object == DMU_META_DNODE_OBJECT);
while ((dr = *drp) != NULL && dr->dr_txg > tx->tx_txg)
drp = &dr->dr_next;
if (dr && dr->dr_txg == tx->tx_txg) {
DB_DNODE_EXIT(db);
dbuf_redirty(dr);
mutex_exit(&db->db_mtx);
return (dr);
}
/*
* Only valid if not already dirty.
*/
ASSERT(dn->dn_object == 0 ||
dn->dn_dirtyctx == DN_UNDIRTIED || dn->dn_dirtyctx ==
(dmu_tx_is_syncing(tx) ? DN_DIRTY_SYNC : DN_DIRTY_OPEN));
ASSERT3U(dn->dn_nlevels, >, db->db_level);
/*
* We should only be dirtying in syncing context if it's the
* mos or we're initializing the os or it's a special object.
* However, we are allowed to dirty in syncing context provided
* we already dirtied it in open context. Hence we must make
* this assertion only if we're not already dirty.
*/
os = dn->dn_objset;
VERIFY3U(tx->tx_txg, <=, spa_final_dirty_txg(os->os_spa));
#ifdef DEBUG
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_READER, FTAG);
ASSERT(!dmu_tx_is_syncing(tx) || DMU_OBJECT_IS_SPECIAL(dn->dn_object) ||
os->os_dsl_dataset == NULL || BP_IS_HOLE(os->os_rootbp));
if (dn->dn_objset->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
#endif
ASSERT(db->db.db_size != 0);
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
if (db->db_blkid != DMU_BONUS_BLKID) {
dmu_objset_willuse_space(os, db->db.db_size, tx);
}
/*
* If this buffer is dirty in an old transaction group we need
* to make a copy of it so that the changes we make in this
* transaction group won't leak out when we sync the older txg.
*/
dr = kmem_zalloc(sizeof (dbuf_dirty_record_t), KM_SLEEP);
list_link_init(&dr->dr_dirty_node);
if (db->db_level == 0) {
void *data_old = db->db_buf;
if (db->db_state != DB_NOFILL) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db.db_data;
} else if (db->db.db_object != DMU_META_DNODE_OBJECT) {
/*
* Release the data buffer from the cache so
* that we can modify it without impacting
* possible other users of this cached data
* block. Note that indirect blocks and
* private objects are not released until the
* syncing state (since they are only modified
* then).
*/
arc_release(db->db_buf, db);
dbuf_fix_old_data(db, tx->tx_txg);
data_old = db->db_buf;
}
ASSERT(data_old != NULL);
}
dr->dt.dl.dr_data = data_old;
} else {
mutex_init(&dr->dt.di.dr_mtx, NULL, MUTEX_NOLOCKDEP, NULL);
list_create(&dr->dt.di.dr_children,
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
if (db->db_blkid != DMU_BONUS_BLKID && os->os_dsl_dataset != NULL)
dr->dr_accounted = db->db.db_size;
dr->dr_dbuf = db;
dr->dr_txg = tx->tx_txg;
dr->dr_next = *drp;
*drp = dr;
/*
* We could have been freed_in_flight between the dbuf_noread
* and dbuf_dirty. We win, as though the dbuf_noread() had
* happened after the free.
*/
if (db->db_level == 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_free_ranges[txgoff] != NULL) {
range_tree_clear(dn->dn_free_ranges[txgoff],
db->db_blkid, 1);
}
mutex_exit(&dn->dn_mtx);
db->db_freed_in_flight = FALSE;
}
/*
* This buffer is now part of this txg
*/
dbuf_add_ref(db, (void *)(uintptr_t)tx->tx_txg);
db->db_dirtycnt += 1;
ASSERT3U(db->db_dirtycnt, <=, 3);
mutex_exit(&db->db_mtx);
if (db->db_blkid == DMU_BONUS_BLKID ||
db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
/*
* The dn_struct_rwlock prevents db_blkptr from changing
* due to a write from syncing context completing
* while we are running, so we want to acquire it before
* looking at db_blkptr.
*/
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
/*
* We need to hold the dn_struct_rwlock to make this assertion,
* because it protects dn_phys / dn_next_nlevels from changing.
*/
ASSERT((dn->dn_phys->dn_nlevels == 0 && db->db_level == 0) ||
dn->dn_phys->dn_nlevels > db->db_level ||
dn->dn_next_nlevels[txgoff] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-1) & TXG_MASK] > db->db_level ||
dn->dn_next_nlevels[(tx->tx_txg-2) & TXG_MASK] > db->db_level);
/*
* If we are overwriting a dedup BP, then unless it is snapshotted,
* when we get to syncing context we will need to decrement its
* refcount in the DDT. Prefetch the relevant DDT block so that
* syncing context won't have to wait for the i/o.
*/
ddt_prefetch(os->os_spa, db->db_blkptr);
if (db->db_level == 0) {
dnode_new_blkid(dn, db->db_blkid, tx, drop_struct_lock);
ASSERT(dn->dn_maxblkid >= db->db_blkid);
}
if (db->db_level+1 < dn->dn_nlevels) {
dmu_buf_impl_t *parent = db->db_parent;
dbuf_dirty_record_t *di;
int parent_held = FALSE;
if (db->db_parent == NULL || db->db_parent == dn->dn_dbuf) {
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
parent = dbuf_hold_level(dn, db->db_level+1,
db->db_blkid >> epbs, FTAG);
ASSERT(parent != NULL);
parent_held = TRUE;
}
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
ASSERT3U(db->db_level+1, ==, parent->db_level);
di = dbuf_dirty(parent, tx);
if (parent_held)
dbuf_rele(parent, FTAG);
mutex_enter(&db->db_mtx);
/*
* Since we've dropped the mutex, it's possible that
* dbuf_undirty() might have changed this out from under us.
*/
if (db->db_last_dirty == dr ||
dn->dn_object == DMU_META_DNODE_OBJECT) {
mutex_enter(&di->dt.di.dr_mtx);
ASSERT3U(di->dr_txg, ==, tx->tx_txg);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&di->dt.di.dr_children, dr);
mutex_exit(&di->dt.di.dr_mtx);
dr->dr_parent = di;
}
mutex_exit(&db->db_mtx);
} else {
ASSERT(db->db_level+1 == dn->dn_nlevels);
ASSERT(db->db_blkid < dn->dn_nblkptr);
ASSERT(db->db_parent == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
list_insert_tail(&dn->dn_dirty_records[txgoff], dr);
mutex_exit(&dn->dn_mtx);
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
}
dnode_setdirty(dn, tx);
DB_DNODE_EXIT(db);
return (dr);
}
/*
* Undirty a buffer in the transaction group referenced by the given
* transaction. Return whether this evicted the dbuf.
*/
static boolean_t
dbuf_undirty(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dnode_t *dn;
uint64_t txg = tx->tx_txg;
dbuf_dirty_record_t *dr, **drp;
ASSERT(txg != 0);
/*
* Due to our use of dn_nlevels below, this can only be called
* in open context, unless we are operating on the MOS.
* From syncing context, dn_nlevels may be different from the
* dn_nlevels used when dbuf was dirtied.
*/
ASSERT(db->db_objset ==
dmu_objset_pool(db->db_objset)->dp_meta_objset ||
txg != spa_syncing_txg(dmu_objset_spa(db->db_objset)));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT0(db->db_level);
ASSERT(MUTEX_HELD(&db->db_mtx));
/*
* If this buffer is not dirty, we're done.
*/
for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
if (dr->dr_txg <= txg)
break;
if (dr == NULL || dr->dr_txg < txg)
return (B_FALSE);
ASSERT(dr->dr_txg == txg);
ASSERT(dr->dr_dbuf == db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dprintf_dbuf(db, "size=%llx\n", (u_longlong_t)db->db.db_size);
ASSERT(db->db.db_size != 0);
dsl_pool_undirty_space(dmu_objset_pool(dn->dn_objset),
dr->dr_accounted, txg);
*drp = dr->dr_next;
/*
* Note that there are three places in dbuf_dirty()
* where this dirty record may be put on a list.
* Make sure to do a list_remove corresponding to
* every one of those list_insert calls.
*/
if (dr->dr_parent) {
mutex_enter(&dr->dr_parent->dt.di.dr_mtx);
list_remove(&dr->dr_parent->dt.di.dr_children, dr);
mutex_exit(&dr->dr_parent->dt.di.dr_mtx);
} else if (db->db_blkid == DMU_SPILL_BLKID ||
db->db_level + 1 == dn->dn_nlevels) {
ASSERT(db->db_blkptr == NULL || db->db_parent == dn->dn_dbuf);
mutex_enter(&dn->dn_mtx);
list_remove(&dn->dn_dirty_records[txg & TXG_MASK], dr);
mutex_exit(&dn->dn_mtx);
}
DB_DNODE_EXIT(db);
if (db->db_state != DB_NOFILL) {
dbuf_unoverride(dr);
ASSERT(db->db_buf != NULL);
ASSERT(dr->dt.dl.dr_data != NULL);
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
if (refcount_remove(&db->db_holds, (void *)(uintptr_t)txg) == 0) {
ASSERT(db->db_state == DB_NOFILL || arc_released(db->db_buf));
dbuf_destroy(db);
return (B_TRUE);
}
return (B_FALSE);
}
static void
dmu_buf_will_dirty_impl(dmu_buf_t *db_fake, int flags, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(tx->tx_txg != 0);
ASSERT(!refcount_is_zero(&db->db_holds));
/*
* Quick check for dirtyness. For already dirty blocks, this
* reduces runtime of this function by >90%, and overall performance
* by 50% for some workloads (e.g. file deletion with indirect blocks
* cached).
*/
mutex_enter(&db->db_mtx);
dbuf_dirty_record_t *dr;
for (dr = db->db_last_dirty;
dr != NULL && dr->dr_txg >= tx->tx_txg; dr = dr->dr_next) {
/*
* It's possible that it is already dirty but not cached,
* because there are some calls to dbuf_dirty() that don't
* go through dmu_buf_will_dirty().
*/
if (dr->dr_txg == tx->tx_txg && db->db_state == DB_CACHED) {
/* This dbuf is already dirty and cached. */
dbuf_redirty(dr);
mutex_exit(&db->db_mtx);
return;
}
}
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
if (RW_WRITE_HELD(&DB_DNODE(db)->dn_struct_rwlock))
flags |= DB_RF_HAVESTRUCT;
DB_DNODE_EXIT(db);
(void) dbuf_read(db, NULL, flags);
(void) dbuf_dirty(db, tx);
}
void
dmu_buf_will_dirty(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH, tx);
}
void
dmu_buf_will_not_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_state = DB_NOFILL;
dmu_buf_will_fill(db_fake, tx);
}
void
dmu_buf_will_fill(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(tx->tx_txg != 0);
ASSERT(db->db_level == 0);
ASSERT(!refcount_is_zero(&db->db_holds));
ASSERT(db->db.db_object != DMU_META_DNODE_OBJECT ||
dmu_tx_private_ok(tx));
dbuf_noread(db);
(void) dbuf_dirty(db, tx);
}
/*
* This function is effectively the same as dmu_buf_will_dirty(), but
* indicates the caller expects raw encrypted data in the db. It will
* also set the raw flag on the created dirty record.
*/
void
dmu_buf_will_change_crypt_params(dmu_buf_t *db_fake, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_dirty_record_t *dr;
dmu_buf_will_dirty_impl(db_fake,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_NO_DECRYPT, tx);
dr = db->db_last_dirty;
while (dr != NULL && dr->dr_txg > tx->tx_txg)
dr = dr->dr_next;
ASSERT3P(dr, !=, NULL);
ASSERT3U(dr->dr_txg, ==, tx->tx_txg);
dr->dt.dl.dr_raw = B_TRUE;
db->db_objset->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
}
#pragma weak dmu_buf_fill_done = dbuf_fill_done
/* ARGSUSED */
void
dbuf_fill_done(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
if (db->db_state == DB_FILL) {
if (db->db_level == 0 && db->db_freed_in_flight) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
/* we were freed while filling */
/* XXX dbuf_undirty? */
bzero(db->db.db_data, db->db.db_size);
db->db_freed_in_flight = FALSE;
}
db->db_state = DB_CACHED;
cv_broadcast(&db->db_changed);
}
mutex_exit(&db->db_mtx);
}
void
dmu_buf_write_embedded(dmu_buf_t *dbuf, void *data,
bp_embedded_type_t etype, enum zio_compress comp,
int uncompressed_size, int compressed_size, int byteorder,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbuf;
struct dirty_leaf *dl;
dmu_object_type_t type;
if (etype == BP_EMBEDDED_TYPE_DATA) {
ASSERT(spa_feature_is_active(dmu_objset_spa(db->db_objset),
SPA_FEATURE_EMBEDDED_DATA));
}
DB_DNODE_ENTER(db);
type = DB_DNODE(db)->dn_type;
DB_DNODE_EXIT(db);
ASSERT0(db->db_level);
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
dmu_buf_will_not_fill(dbuf, tx);
ASSERT3U(db->db_last_dirty->dr_txg, ==, tx->tx_txg);
dl = &db->db_last_dirty->dt.dl;
encode_embedded_bp_compressed(&dl->dr_overridden_by,
data, comp, uncompressed_size, compressed_size);
BPE_SET_ETYPE(&dl->dr_overridden_by, etype);
BP_SET_TYPE(&dl->dr_overridden_by, type);
BP_SET_LEVEL(&dl->dr_overridden_by, 0);
BP_SET_BYTEORDER(&dl->dr_overridden_by, byteorder);
dl->dr_override_state = DR_OVERRIDDEN;
dl->dr_overridden_by.blk_birth = db->db_last_dirty->dr_txg;
}
/*
* Directly assign a provided arc buf to a given dbuf if it's not referenced
* by anybody except our caller. Otherwise copy arcbuf's contents to dbuf.
*/
void
dbuf_assign_arcbuf(dmu_buf_impl_t *db, arc_buf_t *buf, dmu_tx_t *tx)
{
ASSERT(!refcount_is_zero(&db->db_holds));
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(db->db_level == 0);
ASSERT3U(dbuf_is_metadata(db), ==, arc_is_metadata(buf));
ASSERT(buf != NULL);
ASSERT(arc_buf_lsize(buf) == db->db.db_size);
ASSERT(tx->tx_txg != 0);
arc_return_buf(buf, db);
ASSERT(arc_released(buf));
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ || db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_UNCACHED);
if (db->db_state == DB_CACHED &&
refcount_count(&db->db_holds) - 1 > db->db_dirtycnt) {
/*
* In practice, we will never have a case where we have an
* encrypted arc buffer while additional holds exist on the
* dbuf. We don't handle this here so we simply assert that
* fact instead.
*/
ASSERT(!arc_is_encrypted(buf));
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
bcopy(buf->b_data, db->db.db_data, db->db.db_size);
arc_buf_destroy(buf, db);
xuio_stat_wbuf_copied();
return;
}
xuio_stat_wbuf_nocopy();
if (db->db_state == DB_CACHED) {
dbuf_dirty_record_t *dr = db->db_last_dirty;
ASSERT(db->db_buf != NULL);
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
ASSERT(dr->dt.dl.dr_data == db->db_buf);
IMPLY(arc_is_encrypted(buf), dr->dt.dl.dr_raw);
if (!arc_released(db->db_buf)) {
ASSERT(dr->dt.dl.dr_override_state ==
DR_OVERRIDDEN);
arc_release(db->db_buf, db);
}
dr->dt.dl.dr_data = buf;
arc_buf_destroy(db->db_buf, db);
} else if (dr == NULL || dr->dt.dl.dr_data != db->db_buf) {
arc_release(db->db_buf, db);
arc_buf_destroy(db->db_buf, db);
}
db->db_buf = NULL;
}
ASSERT(db->db_buf == NULL);
dbuf_set_data(db, buf);
db->db_state = DB_FILL;
mutex_exit(&db->db_mtx);
(void) dbuf_dirty(db, tx);
dmu_buf_fill_done(&db->db, tx);
}
void
dbuf_destroy(dmu_buf_impl_t *db)
{
dnode_t *dn;
dmu_buf_impl_t *parent = db->db_parent;
dmu_buf_impl_t *dndb;
ASSERT(MUTEX_HELD(&db->db_mtx));
ASSERT(refcount_is_zero(&db->db_holds));
if (db->db_buf != NULL) {
arc_buf_destroy(db->db_buf, db);
db->db_buf = NULL;
}
if (db->db_blkid == DMU_BONUS_BLKID) {
int slots = DB_DNODE(db)->dn_num_slots;
int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
if (db->db.db_data != NULL) {
kmem_free(db->db.db_data, bonuslen);
arc_space_return(bonuslen, ARC_SPACE_BONUS);
db->db_state = DB_UNCACHED;
}
}
dbuf_clear_data(db);
if (multilist_link_active(&db->db_cache_link)) {
multilist_remove(dbuf_cache, db);
(void) refcount_remove_many(&dbuf_cache_size,
db->db.db_size, db);
DBUF_STAT_BUMPDOWN(cache_levels[db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[db->db_level],
db->db.db_size);
}
ASSERT(db->db_state == DB_UNCACHED || db->db_state == DB_NOFILL);
ASSERT(db->db_data_pending == NULL);
db->db_state = DB_EVICTING;
db->db_blkptr = NULL;
/*
* Now that db_state is DB_EVICTING, nobody else can find this via
* the hash table. We can now drop db_mtx, which allows us to
* acquire the dn_dbufs_mtx.
*/
mutex_exit(&db->db_mtx);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dndb = dn->dn_dbuf;
if (db->db_blkid != DMU_BONUS_BLKID) {
boolean_t needlock = !MUTEX_HELD(&dn->dn_dbufs_mtx);
if (needlock)
mutex_enter(&dn->dn_dbufs_mtx);
avl_remove(&dn->dn_dbufs, db);
atomic_dec_32(&dn->dn_dbufs_count);
membar_producer();
DB_DNODE_EXIT(db);
if (needlock)
mutex_exit(&dn->dn_dbufs_mtx);
/*
* Decrementing the dbuf count means that the hold corresponding
* to the removed dbuf is no longer discounted in dnode_move(),
* so the dnode cannot be moved until after we release the hold.
* The membar_producer() ensures visibility of the decremented
* value in dnode_move(), since DB_DNODE_EXIT doesn't actually
* release any lock.
*/
dnode_rele(dn, db);
db->db_dnode_handle = NULL;
dbuf_hash_remove(db);
} else {
DB_DNODE_EXIT(db);
}
ASSERT(refcount_is_zero(&db->db_holds));
db->db_parent = NULL;
ASSERT(db->db_buf == NULL);
ASSERT(db->db.db_data == NULL);
ASSERT(db->db_hash_next == NULL);
ASSERT(db->db_blkptr == NULL);
ASSERT(db->db_data_pending == NULL);
ASSERT(!multilist_link_active(&db->db_cache_link));
kmem_cache_free(dbuf_kmem_cache, db);
arc_space_return(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
/*
* If this dbuf is referenced from an indirect dbuf,
* decrement the ref count on the indirect dbuf.
*/
if (parent && parent != dndb)
dbuf_rele(parent, db);
}
/*
* Note: While bpp will always be updated if the function returns success,
* parentp will not be updated if the dnode does not have dn_dbuf filled in;
* this happens when the dnode is the meta-dnode, or {user|group|project}used
* object.
*/
__attribute__((always_inline))
static inline int
dbuf_findbp(dnode_t *dn, int level, uint64_t blkid, int fail_sparse,
dmu_buf_impl_t **parentp, blkptr_t **bpp, struct dbuf_hold_impl_data *dh)
{
*parentp = NULL;
*bpp = NULL;
ASSERT(blkid != DMU_BONUS_BLKID);
if (blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_have_spill &&
(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
*bpp = DN_SPILL_BLKPTR(dn->dn_phys);
else
*bpp = NULL;
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
mutex_exit(&dn->dn_mtx);
return (0);
}
int nlevels =
(dn->dn_phys->dn_nlevels == 0) ? 1 : dn->dn_phys->dn_nlevels;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(level * epbs, <, 64);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
/*
* This assertion shouldn't trip as long as the max indirect block size
* is less than 1M. The reason for this is that up to that point,
* the number of levels required to address an entire object with blocks
* of size SPA_MINBLOCKSIZE satisfies nlevels * epbs + 1 <= 64. In
* other words, if N * epbs + 1 > 64, then if (N-1) * epbs + 1 > 55
* (i.e. we can address the entire object), objects will all use at most
* N-1 levels and the assertion won't overflow. However, once epbs is
* 13, 4 * 13 + 1 = 53, but 5 * 13 + 1 = 66. Then, 4 levels will not be
* enough to address an entire object, so objects will have 5 levels,
* but then this assertion will overflow.
*
* All this is to say that if we ever increase DN_MAX_INDBLKSHIFT, we
* need to redo this logic to handle overflows.
*/
ASSERT(level >= nlevels ||
((nlevels - level - 1) * epbs) +
highbit64(dn->dn_phys->dn_nblkptr) <= 64);
if (level >= nlevels ||
blkid >= ((uint64_t)dn->dn_phys->dn_nblkptr <<
((nlevels - level - 1) * epbs)) ||
(fail_sparse &&
blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))) {
/* the buffer has no parent yet */
return (SET_ERROR(ENOENT));
} else if (level < nlevels-1) {
/* this block is referenced from an indirect block */
int err;
if (dh == NULL) {
err = dbuf_hold_impl(dn, level+1,
blkid >> epbs, fail_sparse, FALSE, NULL, parentp);
} else {
__dbuf_hold_impl_init(dh + 1, dn, dh->dh_level + 1,
blkid >> epbs, fail_sparse, FALSE, NULL,
parentp, dh->dh_depth + 1);
err = __dbuf_hold_impl(dh + 1);
}
if (err)
return (err);
err = dbuf_read(*parentp, NULL,
(DB_RF_HAVESTRUCT | DB_RF_NOPREFETCH | DB_RF_CANFAIL));
if (err) {
dbuf_rele(*parentp, NULL);
*parentp = NULL;
return (err);
}
*bpp = ((blkptr_t *)(*parentp)->db.db_data) +
(blkid & ((1ULL << epbs) - 1));
if (blkid > (dn->dn_phys->dn_maxblkid >> (level * epbs)))
ASSERT(BP_IS_HOLE(*bpp));
return (0);
} else {
/* the block is referenced from the dnode */
ASSERT3U(level, ==, nlevels-1);
ASSERT(dn->dn_phys->dn_nblkptr == 0 ||
blkid < dn->dn_phys->dn_nblkptr);
if (dn->dn_dbuf) {
dbuf_add_ref(dn->dn_dbuf, NULL);
*parentp = dn->dn_dbuf;
}
*bpp = &dn->dn_phys->dn_blkptr[blkid];
return (0);
}
}
static dmu_buf_impl_t *
dbuf_create(dnode_t *dn, uint8_t level, uint64_t blkid,
dmu_buf_impl_t *parent, blkptr_t *blkptr)
{
objset_t *os = dn->dn_objset;
dmu_buf_impl_t *db, *odb;
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_type != DMU_OT_NONE);
db = kmem_cache_alloc(dbuf_kmem_cache, KM_SLEEP);
db->db_objset = os;
db->db.db_object = dn->dn_object;
db->db_level = level;
db->db_blkid = blkid;
db->db_last_dirty = NULL;
db->db_dirtycnt = 0;
db->db_dnode_handle = dn->dn_handle;
db->db_parent = parent;
db->db_blkptr = blkptr;
db->db_user = NULL;
db->db_user_immediate_evict = FALSE;
db->db_freed_in_flight = FALSE;
db->db_pending_evict = FALSE;
if (blkid == DMU_BONUS_BLKID) {
ASSERT3P(parent, ==, dn->dn_dbuf);
db->db.db_size = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT3U(db->db.db_size, >=, dn->dn_bonuslen);
db->db.db_offset = DMU_BONUS_BLKID;
db->db_state = DB_UNCACHED;
/* the bonus dbuf is not placed in the hash table */
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
return (db);
} else if (blkid == DMU_SPILL_BLKID) {
db->db.db_size = (blkptr != NULL) ?
BP_GET_LSIZE(blkptr) : SPA_MINBLOCKSIZE;
db->db.db_offset = 0;
} else {
int blocksize =
db->db_level ? 1 << dn->dn_indblkshift : dn->dn_datablksz;
db->db.db_size = blocksize;
db->db.db_offset = db->db_blkid * blocksize;
}
/*
* Hold the dn_dbufs_mtx while we get the new dbuf
* in the hash table *and* added to the dbufs list.
* This prevents a possible deadlock with someone
* trying to look up this dbuf before its added to the
* dn_dbufs list.
*/
mutex_enter(&dn->dn_dbufs_mtx);
db->db_state = DB_EVICTING;
if ((odb = dbuf_hash_insert(db)) != NULL) {
/* someone else inserted it first */
kmem_cache_free(dbuf_kmem_cache, db);
mutex_exit(&dn->dn_dbufs_mtx);
DBUF_STAT_BUMP(hash_insert_race);
return (odb);
}
avl_add(&dn->dn_dbufs, db);
db->db_state = DB_UNCACHED;
mutex_exit(&dn->dn_dbufs_mtx);
arc_space_consume(sizeof (dmu_buf_impl_t), ARC_SPACE_DBUF);
if (parent && parent != dn->dn_dbuf)
dbuf_add_ref(parent, db);
ASSERT(dn->dn_object == DMU_META_DNODE_OBJECT ||
refcount_count(&dn->dn_holds) > 0);
(void) refcount_add(&dn->dn_holds, db);
atomic_inc_32(&dn->dn_dbufs_count);
dprintf_dbuf(db, "db=%p\n", db);
return (db);
}
typedef struct dbuf_prefetch_arg {
spa_t *dpa_spa; /* The spa to issue the prefetch in. */
zbookmark_phys_t dpa_zb; /* The target block to prefetch. */
int dpa_epbs; /* Entries (blkptr_t's) Per Block Shift. */
int dpa_curlevel; /* The current level that we're reading */
dnode_t *dpa_dnode; /* The dnode associated with the prefetch */
zio_priority_t dpa_prio; /* The priority I/Os should be issued at. */
zio_t *dpa_zio; /* The parent zio_t for all prefetches. */
arc_flags_t dpa_aflags; /* Flags to pass to the final prefetch. */
} dbuf_prefetch_arg_t;
/*
* Actually issue the prefetch read for the block given.
*/
static void
dbuf_issue_final_prefetch(dbuf_prefetch_arg_t *dpa, blkptr_t *bp)
{
if (BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE;
arc_flags_t aflags =
dpa->dpa_aflags | ARC_FLAG_NOWAIT | ARC_FLAG_PREFETCH;
/* dnodes are always read as raw and then converted later */
if (BP_GET_TYPE(bp) == DMU_OT_DNODE && BP_IS_PROTECTED(bp) &&
dpa->dpa_curlevel == 0)
zio_flags |= ZIO_FLAG_RAW;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
ASSERT3U(dpa->dpa_curlevel, ==, dpa->dpa_zb.zb_level);
ASSERT(dpa->dpa_zio != NULL);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa, bp, NULL, NULL,
dpa->dpa_prio, zio_flags, &aflags, &dpa->dpa_zb);
}
/*
* Called when an indirect block above our prefetch target is read in. This
* will either read in the next indirect block down the tree or issue the actual
* prefetch if the next block down is our target.
*/
static void
dbuf_prefetch_indirect_done(zio_t *zio, const zbookmark_phys_t *zb,
const blkptr_t *iobp, arc_buf_t *abuf, void *private)
{
dbuf_prefetch_arg_t *dpa = private;
ASSERT3S(dpa->dpa_zb.zb_level, <, dpa->dpa_curlevel);
ASSERT3S(dpa->dpa_curlevel, >, 0);
/*
* The dpa_dnode is only valid if we are called with a NULL
* zio. This indicates that the arc_read() returned without
* first calling zio_read() to issue a physical read. Once
* a physical read is made the dpa_dnode must be invalidated
* as the locks guarding it may have been dropped. If the
* dpa_dnode is still valid, then we want to add it to the dbuf
* cache. To do so, we must hold the dbuf associated with the block
* we just prefetched, read its contents so that we associate it
* with an arc_buf_t, and then release it.
*/
if (zio != NULL) {
ASSERT3S(BP_GET_LEVEL(zio->io_bp), ==, dpa->dpa_curlevel);
if (zio->io_flags & ZIO_FLAG_RAW_COMPRESS) {
ASSERT3U(BP_GET_PSIZE(zio->io_bp), ==, zio->io_size);
} else {
ASSERT3U(BP_GET_LSIZE(zio->io_bp), ==, zio->io_size);
}
ASSERT3P(zio->io_spa, ==, dpa->dpa_spa);
dpa->dpa_dnode = NULL;
} else if (dpa->dpa_dnode != NULL) {
uint64_t curblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel -
dpa->dpa_zb.zb_level));
dmu_buf_impl_t *db = dbuf_hold_level(dpa->dpa_dnode,
dpa->dpa_curlevel, curblkid, FTAG);
(void) dbuf_read(db, NULL,
DB_RF_MUST_SUCCEED | DB_RF_NOPREFETCH | DB_RF_HAVESTRUCT);
dbuf_rele(db, FTAG);
}
if (abuf == NULL) {
kmem_free(dpa, sizeof (*dpa));
return;
}
dpa->dpa_curlevel--;
uint64_t nextblkid = dpa->dpa_zb.zb_blkid >>
(dpa->dpa_epbs * (dpa->dpa_curlevel - dpa->dpa_zb.zb_level));
blkptr_t *bp = ((blkptr_t *)abuf->b_data) +
P2PHASE(nextblkid, 1ULL << dpa->dpa_epbs);
if (BP_IS_HOLE(bp)) {
kmem_free(dpa, sizeof (*dpa));
} else if (dpa->dpa_curlevel == dpa->dpa_zb.zb_level) {
ASSERT3U(nextblkid, ==, dpa->dpa_zb.zb_blkid);
dbuf_issue_final_prefetch(dpa, bp);
kmem_free(dpa, sizeof (*dpa));
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (dpa->dpa_aflags & ARC_FLAG_L2CACHE)
iter_aflags |= ARC_FLAG_L2CACHE;
ASSERT3U(dpa->dpa_curlevel, ==, BP_GET_LEVEL(bp));
SET_BOOKMARK(&zb, dpa->dpa_zb.zb_objset,
dpa->dpa_zb.zb_object, dpa->dpa_curlevel, nextblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
bp, dbuf_prefetch_indirect_done, dpa, dpa->dpa_prio,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
arc_buf_destroy(abuf, private);
}
/*
* Issue prefetch reads for the given block on the given level. If the indirect
* blocks above that block are not in memory, we will read them in
* asynchronously. As a result, this call never blocks waiting for a read to
* complete. Note that the prefetch might fail if the dataset is encrypted and
* the encryption key is unmapped before the IO completes.
*/
void
dbuf_prefetch(dnode_t *dn, int64_t level, uint64_t blkid, zio_priority_t prio,
arc_flags_t aflags)
{
blkptr_t bp;
int epbs, nlevels, curlevel;
uint64_t curblkid;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dn->dn_struct_rwlock));
if (blkid > dn->dn_maxblkid)
return;
if (dnode_block_freed(dn, blkid))
return;
/*
* This dnode hasn't been written to disk yet, so there's nothing to
* prefetch.
*/
nlevels = dn->dn_phys->dn_nlevels;
if (level >= nlevels || dn->dn_phys->dn_nblkptr == 0)
return;
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
if (dn->dn_phys->dn_maxblkid < blkid << (epbs * level))
return;
dmu_buf_impl_t *db = dbuf_find(dn->dn_objset, dn->dn_object,
level, blkid);
if (db != NULL) {
mutex_exit(&db->db_mtx);
/*
* This dbuf already exists. It is either CACHED, or
* (we assume) about to be read or filled.
*/
return;
}
/*
* Find the closest ancestor (indirect block) of the target block
* that is present in the cache. In this indirect block, we will
* find the bp that is at curlevel, curblkid.
*/
curlevel = level;
curblkid = blkid;
while (curlevel < nlevels - 1) {
int parent_level = curlevel + 1;
uint64_t parent_blkid = curblkid >> epbs;
dmu_buf_impl_t *db;
if (dbuf_hold_impl(dn, parent_level, parent_blkid,
FALSE, TRUE, FTAG, &db) == 0) {
blkptr_t *bpp = db->db_buf->b_data;
bp = bpp[P2PHASE(curblkid, 1 << epbs)];
dbuf_rele(db, FTAG);
break;
}
curlevel = parent_level;
curblkid = parent_blkid;
}
if (curlevel == nlevels - 1) {
/* No cached indirect blocks found. */
ASSERT3U(curblkid, <, dn->dn_phys->dn_nblkptr);
bp = dn->dn_phys->dn_blkptr[curblkid];
}
if (BP_IS_HOLE(&bp))
return;
ASSERT3U(curlevel, ==, BP_GET_LEVEL(&bp));
zio_t *pio = zio_root(dmu_objset_spa(dn->dn_objset), NULL, NULL,
ZIO_FLAG_CANFAIL);
dbuf_prefetch_arg_t *dpa = kmem_zalloc(sizeof (*dpa), KM_SLEEP);
dsl_dataset_t *ds = dn->dn_objset->os_dsl_dataset;
SET_BOOKMARK(&dpa->dpa_zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, level, blkid);
dpa->dpa_curlevel = curlevel;
dpa->dpa_prio = prio;
dpa->dpa_aflags = aflags;
dpa->dpa_spa = dn->dn_objset->os_spa;
dpa->dpa_dnode = dn;
dpa->dpa_epbs = epbs;
dpa->dpa_zio = pio;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
dpa->dpa_aflags |= ARC_FLAG_L2CACHE;
/*
* If we have the indirect just above us, no need to do the asynchronous
* prefetch chain; we'll just run the last step ourselves. If we're at
* a higher level, though, we want to issue the prefetches for all the
* indirect blocks asynchronously, so we can go on with whatever we were
* doing.
*/
if (curlevel == level) {
ASSERT3U(curblkid, ==, blkid);
dbuf_issue_final_prefetch(dpa, &bp);
kmem_free(dpa, sizeof (*dpa));
} else {
arc_flags_t iter_aflags = ARC_FLAG_NOWAIT;
zbookmark_phys_t zb;
/* flag if L2ARC eligible, l2arc_noprefetch then decides */
if (DNODE_LEVEL_IS_L2CACHEABLE(dn, level))
iter_aflags |= ARC_FLAG_L2CACHE;
SET_BOOKMARK(&zb, ds != NULL ? ds->ds_object : DMU_META_OBJSET,
dn->dn_object, curlevel, curblkid);
(void) arc_read(dpa->dpa_zio, dpa->dpa_spa,
&bp, dbuf_prefetch_indirect_done, dpa, prio,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&iter_aflags, &zb);
}
/*
* We use pio here instead of dpa_zio since it's possible that
* dpa may have already been freed.
*/
zio_nowait(pio);
}
#define DBUF_HOLD_IMPL_MAX_DEPTH 20
/*
* Helper function for __dbuf_hold_impl() to copy a buffer. Handles
* the case of encrypted, compressed and uncompressed buffers by
* allocating the new buffer, respectively, with arc_alloc_raw_buf(),
* arc_alloc_compressed_buf() or arc_alloc_buf().*
*
* NOTE: Declared noinline to avoid stack bloat in __dbuf_hold_impl().
*/
noinline static void
dbuf_hold_copy(struct dbuf_hold_impl_data *dh)
{
dnode_t *dn = dh->dh_dn;
dmu_buf_impl_t *db = dh->dh_db;
dbuf_dirty_record_t *dr = dh->dh_dr;
arc_buf_t *data = dr->dt.dl.dr_data;
enum zio_compress compress_type = arc_get_compression(data);
if (arc_is_encrypted(data)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(data, &byteorder, salt, iv, mac);
dbuf_set_data(db, arc_alloc_raw_buf(dn->dn_objset->os_spa, db,
dmu_objset_id(dn->dn_objset), byteorder, salt, iv, mac,
dn->dn_type, arc_buf_size(data), arc_buf_lsize(data),
compress_type));
} else if (compress_type != ZIO_COMPRESS_OFF) {
dbuf_set_data(db, arc_alloc_compressed_buf(
dn->dn_objset->os_spa, db, arc_buf_size(data),
arc_buf_lsize(data), compress_type));
} else {
dbuf_set_data(db, arc_alloc_buf(dn->dn_objset->os_spa, db,
DBUF_GET_BUFC_TYPE(db), db->db.db_size));
}
bcopy(data->b_data, db->db.db_data, arc_buf_size(data));
}
/*
* Returns with db_holds incremented, and db_mtx not held.
* Note: dn_struct_rwlock must be held.
*/
static int
__dbuf_hold_impl(struct dbuf_hold_impl_data *dh)
{
ASSERT3S(dh->dh_depth, <, DBUF_HOLD_IMPL_MAX_DEPTH);
dh->dh_parent = NULL;
ASSERT(dh->dh_blkid != DMU_BONUS_BLKID);
ASSERT(RW_LOCK_HELD(&dh->dh_dn->dn_struct_rwlock));
ASSERT3U(dh->dh_dn->dn_nlevels, >, dh->dh_level);
*(dh->dh_dbp) = NULL;
/* dbuf_find() returns with db_mtx held */
dh->dh_db = dbuf_find(dh->dh_dn->dn_objset, dh->dh_dn->dn_object,
dh->dh_level, dh->dh_blkid);
if (dh->dh_db == NULL) {
dh->dh_bp = NULL;
if (dh->dh_fail_uncached)
return (SET_ERROR(ENOENT));
ASSERT3P(dh->dh_parent, ==, NULL);
dh->dh_err = dbuf_findbp(dh->dh_dn, dh->dh_level, dh->dh_blkid,
dh->dh_fail_sparse, &dh->dh_parent, &dh->dh_bp, dh);
if (dh->dh_fail_sparse) {
if (dh->dh_err == 0 &&
dh->dh_bp && BP_IS_HOLE(dh->dh_bp))
dh->dh_err = SET_ERROR(ENOENT);
if (dh->dh_err) {
if (dh->dh_parent)
dbuf_rele(dh->dh_parent, NULL);
return (dh->dh_err);
}
}
if (dh->dh_err && dh->dh_err != ENOENT)
return (dh->dh_err);
dh->dh_db = dbuf_create(dh->dh_dn, dh->dh_level, dh->dh_blkid,
dh->dh_parent, dh->dh_bp);
}
if (dh->dh_fail_uncached && dh->dh_db->db_state != DB_CACHED) {
mutex_exit(&dh->dh_db->db_mtx);
return (SET_ERROR(ENOENT));
}
if (dh->dh_db->db_buf != NULL) {
arc_buf_access(dh->dh_db->db_buf);
ASSERT3P(dh->dh_db->db.db_data, ==, dh->dh_db->db_buf->b_data);
}
ASSERT(dh->dh_db->db_buf == NULL || arc_referenced(dh->dh_db->db_buf));
/*
* If this buffer is currently syncing out, and we are are
* still referencing it from db_data, we need to make a copy
* of it in case we decide we want to dirty it again in this txg.
*/
if (dh->dh_db->db_level == 0 &&
dh->dh_db->db_blkid != DMU_BONUS_BLKID &&
dh->dh_dn->dn_object != DMU_META_DNODE_OBJECT &&
dh->dh_db->db_state == DB_CACHED && dh->dh_db->db_data_pending) {
dh->dh_dr = dh->dh_db->db_data_pending;
if (dh->dh_dr->dt.dl.dr_data == dh->dh_db->db_buf)
dbuf_hold_copy(dh);
}
if (multilist_link_active(&dh->dh_db->db_cache_link)) {
ASSERT(refcount_is_zero(&dh->dh_db->db_holds));
multilist_remove(dbuf_cache, dh->dh_db);
(void) refcount_remove_many(&dbuf_cache_size,
dh->dh_db->db.db_size, dh->dh_db);
DBUF_STAT_BUMPDOWN(cache_levels[dh->dh_db->db_level]);
DBUF_STAT_BUMPDOWN(cache_count);
DBUF_STAT_DECR(cache_levels_bytes[dh->dh_db->db_level],
dh->dh_db->db.db_size);
}
(void) refcount_add(&dh->dh_db->db_holds, dh->dh_tag);
DBUF_VERIFY(dh->dh_db);
mutex_exit(&dh->dh_db->db_mtx);
/* NOTE: we can't rele the parent until after we drop the db_mtx */
if (dh->dh_parent)
dbuf_rele(dh->dh_parent, NULL);
ASSERT3P(DB_DNODE(dh->dh_db), ==, dh->dh_dn);
ASSERT3U(dh->dh_db->db_blkid, ==, dh->dh_blkid);
ASSERT3U(dh->dh_db->db_level, ==, dh->dh_level);
*(dh->dh_dbp) = dh->dh_db;
return (0);
}
/*
* The following code preserves the recursive function dbuf_hold_impl()
* but moves the local variables AND function arguments to the heap to
* minimize the stack frame size. Enough space is initially allocated
* on the stack for 20 levels of recursion.
*/
int
dbuf_hold_impl(dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
void *tag, dmu_buf_impl_t **dbp)
{
struct dbuf_hold_impl_data *dh;
int error;
dh = kmem_alloc(sizeof (struct dbuf_hold_impl_data) *
DBUF_HOLD_IMPL_MAX_DEPTH, KM_SLEEP);
__dbuf_hold_impl_init(dh, dn, level, blkid, fail_sparse,
fail_uncached, tag, dbp, 0);
error = __dbuf_hold_impl(dh);
kmem_free(dh, sizeof (struct dbuf_hold_impl_data) *
DBUF_HOLD_IMPL_MAX_DEPTH);
return (error);
}
static void
__dbuf_hold_impl_init(struct dbuf_hold_impl_data *dh,
dnode_t *dn, uint8_t level, uint64_t blkid,
boolean_t fail_sparse, boolean_t fail_uncached,
void *tag, dmu_buf_impl_t **dbp, int depth)
{
dh->dh_dn = dn;
dh->dh_level = level;
dh->dh_blkid = blkid;
dh->dh_fail_sparse = fail_sparse;
dh->dh_fail_uncached = fail_uncached;
dh->dh_tag = tag;
dh->dh_dbp = dbp;
dh->dh_db = NULL;
dh->dh_parent = NULL;
dh->dh_bp = NULL;
dh->dh_err = 0;
dh->dh_dr = NULL;
dh->dh_depth = depth;
}
dmu_buf_impl_t *
dbuf_hold(dnode_t *dn, uint64_t blkid, void *tag)
{
return (dbuf_hold_level(dn, 0, blkid, tag));
}
dmu_buf_impl_t *
dbuf_hold_level(dnode_t *dn, int level, uint64_t blkid, void *tag)
{
dmu_buf_impl_t *db;
int err = dbuf_hold_impl(dn, level, blkid, FALSE, FALSE, tag, &db);
return (err ? NULL : db);
}
void
dbuf_create_bonus(dnode_t *dn)
{
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
ASSERT(dn->dn_bonus == NULL);
dn->dn_bonus = dbuf_create(dn, 0, DMU_BONUS_BLKID, dn->dn_dbuf, NULL);
}
int
dbuf_spill_set_blksz(dmu_buf_t *db_fake, uint64_t blksz, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
if (db->db_blkid != DMU_SPILL_BLKID)
return (SET_ERROR(ENOTSUP));
if (blksz == 0)
blksz = SPA_MINBLOCKSIZE;
ASSERT3U(blksz, <=, spa_maxblocksize(dmu_objset_spa(db->db_objset)));
blksz = P2ROUNDUP(blksz, SPA_MINBLOCKSIZE);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dbuf_new_size(db, blksz, tx);
rw_exit(&dn->dn_struct_rwlock);
DB_DNODE_EXIT(db);
return (0);
}
void
dbuf_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
dbuf_free_range(dn, DMU_SPILL_BLKID, DMU_SPILL_BLKID, tx);
}
#pragma weak dmu_buf_add_ref = dbuf_add_ref
void
dbuf_add_ref(dmu_buf_impl_t *db, void *tag)
{
int64_t holds = refcount_add(&db->db_holds, tag);
VERIFY3S(holds, >, 1);
}
#pragma weak dmu_buf_try_add_ref = dbuf_try_add_ref
boolean_t
dbuf_try_add_ref(dmu_buf_t *db_fake, objset_t *os, uint64_t obj, uint64_t blkid,
void *tag)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dmu_buf_impl_t *found_db;
boolean_t result = B_FALSE;
if (blkid == DMU_BONUS_BLKID)
found_db = dbuf_find_bonus(os, obj);
else
found_db = dbuf_find(os, obj, 0, blkid);
if (found_db != NULL) {
if (db == found_db && dbuf_refcount(db) > db->db_dirtycnt) {
(void) refcount_add(&db->db_holds, tag);
result = B_TRUE;
}
mutex_exit(&found_db->db_mtx);
}
return (result);
}
/*
* If you call dbuf_rele() you had better not be referencing the dnode handle
* unless you have some other direct or indirect hold on the dnode. (An indirect
* hold is a hold on one of the dnode's dbufs, including the bonus buffer.)
* Without that, the dbuf_rele() could lead to a dnode_rele() followed by the
* dnode's parent dbuf evicting its dnode handles.
*/
void
dbuf_rele(dmu_buf_impl_t *db, void *tag)
{
mutex_enter(&db->db_mtx);
dbuf_rele_and_unlock(db, tag);
}
void
dmu_buf_rele(dmu_buf_t *db, void *tag)
{
dbuf_rele((dmu_buf_impl_t *)db, tag);
}
/*
* dbuf_rele() for an already-locked dbuf. This is necessary to allow
* db_dirtycnt and db_holds to be updated atomically.
*/
void
dbuf_rele_and_unlock(dmu_buf_impl_t *db, void *tag)
{
int64_t holds;
ASSERT(MUTEX_HELD(&db->db_mtx));
DBUF_VERIFY(db);
/*
* Remove the reference to the dbuf before removing its hold on the
* dnode so we can guarantee in dnode_move() that a referenced bonus
* buffer has a corresponding dnode hold.
*/
holds = refcount_remove(&db->db_holds, tag);
ASSERT(holds >= 0);
/*
* We can't freeze indirects if there is a possibility that they
* may be modified in the current syncing context.
*/
if (db->db_buf != NULL &&
holds == (db->db_level == 0 ? db->db_dirtycnt : 0)) {
arc_buf_freeze(db->db_buf);
}
if (holds == db->db_dirtycnt &&
db->db_level == 0 && db->db_user_immediate_evict)
dbuf_evict_user(db);
if (holds == 0) {
if (db->db_blkid == DMU_BONUS_BLKID) {
dnode_t *dn;
boolean_t evict_dbuf = db->db_pending_evict;
/*
* If the dnode moves here, we cannot cross this
* barrier until the move completes.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
atomic_dec_32(&dn->dn_dbufs_count);
/*
* Decrementing the dbuf count means that the bonus
* buffer's dnode hold is no longer discounted in
* dnode_move(). The dnode cannot move until after
* the dnode_rele() below.
*/
DB_DNODE_EXIT(db);
/*
* Do not reference db after its lock is dropped.
* Another thread may evict it.
*/
mutex_exit(&db->db_mtx);
if (evict_dbuf)
dnode_evict_bonus(dn);
dnode_rele(dn, db);
} else if (db->db_buf == NULL) {
/*
* This is a special case: we never associated this
* dbuf with any data allocated from the ARC.
*/
ASSERT(db->db_state == DB_UNCACHED ||
db->db_state == DB_NOFILL);
dbuf_destroy(db);
} else if (arc_released(db->db_buf)) {
/*
* This dbuf has anonymous data associated with it.
*/
dbuf_destroy(db);
} else {
boolean_t do_arc_evict = B_FALSE;
blkptr_t bp;
spa_t *spa = dmu_objset_spa(db->db_objset);
if (!DBUF_IS_CACHEABLE(db) &&
db->db_blkptr != NULL &&
!BP_IS_HOLE(db->db_blkptr) &&
!BP_IS_EMBEDDED(db->db_blkptr)) {
do_arc_evict = B_TRUE;
bp = *db->db_blkptr;
}
if (!DBUF_IS_CACHEABLE(db) ||
db->db_pending_evict) {
dbuf_destroy(db);
} else if (!multilist_link_active(&db->db_cache_link)) {
multilist_insert(dbuf_cache, db);
(void) refcount_add_many(&dbuf_cache_size,
db->db.db_size, db);
DBUF_STAT_BUMP(cache_levels[db->db_level]);
DBUF_STAT_BUMP(cache_count);
DBUF_STAT_INCR(cache_levels_bytes[db->db_level],
db->db.db_size);
DBUF_STAT_MAX(cache_size_bytes_max,
refcount_count(&dbuf_cache_size));
mutex_exit(&db->db_mtx);
dbuf_evict_notify();
}
if (do_arc_evict)
arc_freed(spa, &bp);
}
} else {
mutex_exit(&db->db_mtx);
}
}
#pragma weak dmu_buf_refcount = dbuf_refcount
uint64_t
dbuf_refcount(dmu_buf_impl_t *db)
{
return (refcount_count(&db->db_holds));
}
void *
dmu_buf_replace_user(dmu_buf_t *db_fake, dmu_buf_user_t *old_user,
dmu_buf_user_t *new_user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
mutex_enter(&db->db_mtx);
dbuf_verify_user(db, DBVU_NOT_EVICTING);
if (db->db_user == old_user)
db->db_user = new_user;
else
old_user = db->db_user;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
mutex_exit(&db->db_mtx);
return (old_user);
}
void *
dmu_buf_set_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, NULL, user));
}
void *
dmu_buf_set_user_ie(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
db->db_user_immediate_evict = TRUE;
return (dmu_buf_set_user(db_fake, user));
}
void *
dmu_buf_remove_user(dmu_buf_t *db_fake, dmu_buf_user_t *user)
{
return (dmu_buf_replace_user(db_fake, user, NULL));
}
void *
dmu_buf_get_user(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dbuf_verify_user(db, DBVU_NOT_EVICTING);
return (db->db_user);
}
void
dmu_buf_user_evict_wait()
{
taskq_wait(dbu_evict_taskq);
}
blkptr_t *
dmu_buf_get_blkptr(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_blkptr);
}
objset_t *
dmu_buf_get_objset(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
return (dbi->db_objset);
}
dnode_t *
dmu_buf_dnode_enter(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_ENTER(dbi);
return (DB_DNODE(dbi));
}
void
dmu_buf_dnode_exit(dmu_buf_t *db)
{
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
DB_DNODE_EXIT(dbi);
}
static void
dbuf_check_blkptr(dnode_t *dn, dmu_buf_impl_t *db)
{
/* ASSERT(dmu_tx_is_syncing(tx) */
ASSERT(MUTEX_HELD(&db->db_mtx));
if (db->db_blkptr != NULL)
return;
if (db->db_blkid == DMU_SPILL_BLKID) {
db->db_blkptr = DN_SPILL_BLKPTR(dn->dn_phys);
BP_ZERO(db->db_blkptr);
return;
}
if (db->db_level == dn->dn_phys->dn_nlevels-1) {
/*
* This buffer was allocated at a time when there was
* no available blkptrs from the dnode, or it was
* inappropriate to hook it in (i.e., nlevels mis-match).
*/
ASSERT(db->db_blkid < dn->dn_phys->dn_nblkptr);
ASSERT(db->db_parent == NULL);
db->db_parent = dn->dn_dbuf;
db->db_blkptr = &dn->dn_phys->dn_blkptr[db->db_blkid];
DBUF_VERIFY(db);
} else {
dmu_buf_impl_t *parent = db->db_parent;
int epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT(dn->dn_phys->dn_nlevels > 1);
if (parent == NULL) {
mutex_exit(&db->db_mtx);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
parent = dbuf_hold_level(dn, db->db_level + 1,
db->db_blkid >> epbs, db);
rw_exit(&dn->dn_struct_rwlock);
mutex_enter(&db->db_mtx);
db->db_parent = parent;
}
db->db_blkptr = (blkptr_t *)parent->db.db_data +
(db->db_blkid & ((1ULL << epbs) - 1));
DBUF_VERIFY(db);
}
}
/*
* Ensure the dbuf's data is untransformed if the associated dirty
* record requires it. This is used by dbuf_sync_leaf() to ensure
* that a dnode block is decrypted before we write new data to it.
* For raw writes we assert that the buffer is already encrypted.
*/
static void
dbuf_check_crypt(dbuf_dirty_record_t *dr)
{
int err;
dmu_buf_impl_t *db = dr->dr_dbuf;
ASSERT(MUTEX_HELD(&db->db_mtx));
if (!dr->dt.dl.dr_raw && arc_is_encrypted(db->db_buf)) {
zbookmark_phys_t zb;
/*
* Unfortunately, there is currently no mechanism for
* syncing context to handle decryption errors. An error
* here is only possible if an attacker maliciously
* changed a dnode block and updated the associated
* checksums going up the block tree.
*/
SET_BOOKMARK(&zb, dmu_objset_id(db->db_objset),
db->db.db_object, db->db_level, db->db_blkid);
err = arc_untransform(db->db_buf, db->db_objset->os_spa,
&zb, B_TRUE);
if (err)
panic("Invalid dnode block MAC");
} else if (dr->dt.dl.dr_raw) {
/*
* Writing raw encrypted data requires the db's arc buffer
* to be converted to raw by the caller.
*/
ASSERT(arc_is_encrypted(db->db_buf));
}
}
/*
* dbuf_sync_indirect() is called recursively from dbuf_sync_list() so it
* is critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_indirect(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn;
zio_t *zio;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
ASSERT(db->db_level > 0);
DBUF_VERIFY(db);
/* Read the block if it hasn't been read yet. */
if (db->db_buf == NULL) {
mutex_exit(&db->db_mtx);
(void) dbuf_read(db, NULL, DB_RF_MUST_SUCCEED);
mutex_enter(&db->db_mtx);
}
ASSERT3U(db->db_state, ==, DB_CACHED);
ASSERT(db->db_buf != NULL);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
/* Indirect block size must match what the dnode thinks it is. */
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
dbuf_check_blkptr(dn, db);
DB_DNODE_EXIT(db);
/* Provide the pending dirty record to child dbufs */
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
+
dbuf_write(dr, db->db_buf, tx);
zio = dr->dr_zio;
mutex_enter(&dr->dt.di.dr_mtx);
dbuf_sync_list(&dr->dt.di.dr_children, db->db_level - 1, tx);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
mutex_exit(&dr->dt.di.dr_mtx);
zio_nowait(zio);
}
/*
* dbuf_sync_leaf() is called recursively from dbuf_sync_list() so it is
* critical the we not allow the compiler to inline this function in to
* dbuf_sync_list() thereby drastically bloating the stack usage.
*/
noinline static void
dbuf_sync_leaf(dbuf_dirty_record_t *dr, dmu_tx_t *tx)
{
arc_buf_t **datap = &dr->dt.dl.dr_data;
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn;
objset_t *os;
uint64_t txg = tx->tx_txg;
ASSERT(dmu_tx_is_syncing(tx));
dprintf_dbuf_bp(db, db->db_blkptr, "blkptr=%p", db->db_blkptr);
mutex_enter(&db->db_mtx);
/*
* To be synced, we must be dirtied. But we
* might have been freed after the dirty.
*/
if (db->db_state == DB_UNCACHED) {
/* This buffer has been freed since it was dirtied */
ASSERT(db->db.db_data == NULL);
} else if (db->db_state == DB_FILL) {
/* This buffer was freed and is now being re-filled */
ASSERT(db->db.db_data != dr->dt.dl.dr_data);
} else {
ASSERT(db->db_state == DB_CACHED || db->db_state == DB_NOFILL);
}
DBUF_VERIFY(db);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (db->db_blkid == DMU_SPILL_BLKID) {
mutex_enter(&dn->dn_mtx);
if (!(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR)) {
/*
* In the previous transaction group, the bonus buffer
* was entirely used to store the attributes for the
* dnode which overrode the dn_spill field. However,
* when adding more attributes to the file a spill
* block was required to hold the extra attributes.
*
* Make sure to clear the garbage left in the dn_spill
* field from the previous attributes in the bonus
* buffer. Otherwise, after writing out the spill
* block to the new allocated dva, it will free
* the old block pointed to by the invalid dn_spill.
*/
db->db_blkptr = NULL;
}
dn->dn_phys->dn_flags |= DNODE_FLAG_SPILL_BLKPTR;
mutex_exit(&dn->dn_mtx);
}
/*
* If this is a bonus buffer, simply copy the bonus data into the
* dnode. It will be written out when the dnode is synced (and it
* will be synced, since it must have been dirty for dbuf_sync to
* be called).
*/
if (db->db_blkid == DMU_BONUS_BLKID) {
dbuf_dirty_record_t **drp;
ASSERT(*datap != NULL);
ASSERT0(db->db_level);
ASSERT3U(DN_MAX_BONUS_LEN(dn->dn_phys), <=,
DN_SLOTS_TO_BONUSLEN(dn->dn_phys->dn_extra_slots + 1));
bcopy(*datap, DN_BONUS(dn->dn_phys),
DN_MAX_BONUS_LEN(dn->dn_phys));
DB_DNODE_EXIT(db);
if (*datap != db->db.db_data) {
int slots = DB_DNODE(db)->dn_num_slots;
int bonuslen = DN_SLOTS_TO_BONUSLEN(slots);
kmem_free(*datap, bonuslen);
arc_space_return(bonuslen, ARC_SPACE_BONUS);
}
db->db_data_pending = NULL;
drp = &db->db_last_dirty;
while (*drp != dr)
drp = &(*drp)->dr_next;
ASSERT(dr->dr_next == NULL);
ASSERT(dr->dr_dbuf == db);
*drp = dr->dr_next;
if (dr->dr_dbuf->db_level != 0) {
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)txg);
return;
}
os = dn->dn_objset;
/*
* This function may have dropped the db_mtx lock allowing a dmu_sync
* operation to sneak in. As a result, we need to ensure that we
* don't check the dr_override_state until we have returned from
* dbuf_check_blkptr.
*/
dbuf_check_blkptr(dn, db);
/*
* If this buffer is in the middle of an immediate write,
* wait for the synchronous IO to complete.
*/
while (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
cv_wait(&db->db_changed, &db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state != DR_NOT_OVERRIDDEN);
}
/*
* If this is a dnode block, ensure it is appropriately encrypted
* or decrypted, depending on what we are writing to it this txg.
*/
if (os->os_encrypted && dn->dn_object == DMU_META_DNODE_OBJECT)
dbuf_check_crypt(dr);
if (db->db_state != DB_NOFILL &&
dn->dn_object != DMU_META_DNODE_OBJECT &&
refcount_count(&db->db_holds) > 1 &&
dr->dt.dl.dr_override_state != DR_OVERRIDDEN &&
*datap == db->db_buf) {
/*
* If this buffer is currently "in use" (i.e., there
* are active holds and db_data still references it),
* then make a copy before we start the write so that
* any modifications from the open txg will not leak
* into this write.
*
* NOTE: this copy does not need to be made for
* objects only modified in the syncing context (e.g.
* DNONE_DNODE blocks).
*/
int psize = arc_buf_size(*datap);
int lsize = arc_buf_lsize(*datap);
arc_buf_contents_t type = DBUF_GET_BUFC_TYPE(db);
enum zio_compress compress_type = arc_get_compression(*datap);
if (arc_is_encrypted(*datap)) {
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
arc_get_raw_params(*datap, &byteorder, salt, iv, mac);
*datap = arc_alloc_raw_buf(os->os_spa, db,
dmu_objset_id(os), byteorder, salt, iv, mac,
dn->dn_type, psize, lsize, compress_type);
} else if (compress_type != ZIO_COMPRESS_OFF) {
ASSERT3U(type, ==, ARC_BUFC_DATA);
*datap = arc_alloc_compressed_buf(os->os_spa, db,
psize, lsize, compress_type);
} else {
*datap = arc_alloc_buf(os->os_spa, db, type, psize);
}
bcopy(db->db.db_data, (*datap)->b_data, psize);
}
db->db_data_pending = dr;
mutex_exit(&db->db_mtx);
dbuf_write(dr, *datap, tx);
ASSERT(!list_link_active(&dr->dr_dirty_node));
if (dn->dn_object == DMU_META_DNODE_OBJECT) {
list_insert_tail(&dn->dn_dirty_records[txg&TXG_MASK], dr);
DB_DNODE_EXIT(db);
} else {
/*
* Although zio_nowait() does not "wait for an IO", it does
* initiate the IO. If this is an empty write it seems plausible
* that the IO could actually be completed before the nowait
* returns. We need to DB_DNODE_EXIT() first in case
* zio_nowait() invalidates the dbuf.
*/
DB_DNODE_EXIT(db);
zio_nowait(dr->dr_zio);
}
}
void
dbuf_sync_list(list_t *list, int level, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr;
while ((dr = list_head(list))) {
if (dr->dr_zio != NULL) {
/*
* If we find an already initialized zio then we
* are processing the meta-dnode, and we have finished.
* The dbufs for all dnodes are put back on the list
* during processing, so that we can zio_wait()
* these IOs after initiating all child IOs.
*/
ASSERT3U(dr->dr_dbuf->db.db_object, ==,
DMU_META_DNODE_OBJECT);
break;
}
if (dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
VERIFY3U(dr->dr_dbuf->db_level, ==, level);
}
list_remove(list, dr);
if (dr->dr_dbuf->db_level > 0)
dbuf_sync_indirect(dr, tx);
else
dbuf_sync_leaf(dr, tx);
}
}
/* ARGSUSED */
static void
dbuf_write_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
spa_t *spa = zio->io_spa;
int64_t delta;
uint64_t fill = 0;
int i;
ASSERT3P(db->db_blkptr, !=, NULL);
ASSERT3P(&db->db_data_pending->dr_bp_copy, ==, bp);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
delta = bp_get_dsize_sync(spa, bp) - bp_get_dsize_sync(spa, bp_orig);
dnode_diduse_space(dn, delta - zio->io_prev_space_delta);
zio->io_prev_space_delta = delta;
if (bp->blk_birth != 0) {
ASSERT((db->db_blkid != DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_type) ||
(db->db_blkid == DMU_SPILL_BLKID &&
BP_GET_TYPE(bp) == dn->dn_bonustype) ||
BP_IS_EMBEDDED(bp));
ASSERT(BP_GET_LEVEL(bp) == db->db_level);
}
mutex_enter(&db->db_mtx);
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(bp)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
}
#endif
if (db->db_level == 0) {
mutex_enter(&dn->dn_mtx);
if (db->db_blkid > dn->dn_phys->dn_maxblkid &&
db->db_blkid != DMU_SPILL_BLKID)
dn->dn_phys->dn_maxblkid = db->db_blkid;
mutex_exit(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_DNODE) {
i = 0;
while (i < db->db.db_size) {
dnode_phys_t *dnp =
(void *)(((char *)db->db.db_data) + i);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE) {
fill++;
i += dnp->dn_extra_slots *
DNODE_MIN_SIZE;
}
}
} else {
if (BP_IS_HOLE(bp)) {
fill = 0;
} else {
fill = 1;
}
}
} else {
blkptr_t *ibp = db->db.db_data;
ASSERT3U(db->db.db_size, ==, 1<<dn->dn_phys->dn_indblkshift);
for (i = db->db.db_size >> SPA_BLKPTRSHIFT; i > 0; i--, ibp++) {
if (BP_IS_HOLE(ibp))
continue;
fill += BP_GET_FILL(ibp);
}
}
DB_DNODE_EXIT(db);
if (!BP_IS_EMBEDDED(bp))
BP_SET_FILL(bp, fill);
mutex_exit(&db->db_mtx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
*db->db_blkptr = *bp;
rw_exit(&dn->dn_struct_rwlock);
}
/* ARGSUSED */
/*
* This function gets called just prior to running through the compression
* stage of the zio pipeline. If we're an indirect block comprised of only
* holes, then we want this indirect to be compressed away to a hole. In
* order to do that we must zero out any information about the holes that
* this indirect points to prior to before we try to compress it.
*/
static void
dbuf_write_children_ready(zio_t *zio, arc_buf_t *buf, void *vdb)
{
dmu_buf_impl_t *db = vdb;
dnode_t *dn;
blkptr_t *bp;
unsigned int epbs, i;
ASSERT3U(db->db_level, >, 0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
ASSERT3U(epbs, <, 31);
/* Determine if all our children are holes */
for (i = 0, bp = db->db.db_data; i < 1ULL << epbs; i++, bp++) {
if (!BP_IS_HOLE(bp))
break;
}
/*
* If all the children are holes, then zero them all out so that
* we may get compressed away.
*/
if (i == 1ULL << epbs) {
/*
* We only found holes. Grab the rwlock to prevent
* anybody from reading the blocks we're about to
* zero out.
*/
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
bzero(db->db.db_data, db->db.db_size);
rw_exit(&dn->dn_struct_rwlock);
}
DB_DNODE_EXIT(db);
}
/*
* The SPA will call this callback several times for each zio - once
* for every physical child i/o (zio->io_phys_children times). This
* allows the DMU to monitor the progress of each logical i/o. For example,
* there may be 2 copies of an indirect block, or many fragments of a RAID-Z
* block. There may be a long delay before all copies/fragments are completed,
* so this callback allows us to retire dirty space gradually, as the physical
* i/os complete.
*/
/* ARGSUSED */
static void
dbuf_write_physdone(zio_t *zio, arc_buf_t *buf, void *arg)
{
dmu_buf_impl_t *db = arg;
objset_t *os = db->db_objset;
dsl_pool_t *dp = dmu_objset_pool(os);
dbuf_dirty_record_t *dr;
int delta = 0;
dr = db->db_data_pending;
ASSERT3U(dr->dr_txg, ==, zio->io_txg);
/*
* The callback will be called io_phys_children times. Retire one
* portion of our dirty space each time we are called. Any rounding
* error will be cleaned up by dsl_pool_sync()'s call to
* dsl_pool_undirty_space().
*/
delta = dr->dr_accounted / zio->io_phys_children;
dsl_pool_undirty_space(dp, delta, zio->io_txg);
}
/* ARGSUSED */
static void
dbuf_write_done(zio_t *zio, arc_buf_t *buf, void *vdb)
{
dmu_buf_impl_t *db = vdb;
blkptr_t *bp_orig = &zio->io_bp_orig;
blkptr_t *bp = db->db_blkptr;
objset_t *os = db->db_objset;
dmu_tx_t *tx = os->os_synctx;
dbuf_dirty_record_t **drp, *dr;
ASSERT0(zio->io_error);
ASSERT(db->db_blkptr == bp);
/*
* For nopwrites and rewrites we ensure that the bp matches our
* original and bypass all the accounting.
*/
if (zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
mutex_enter(&db->db_mtx);
DBUF_VERIFY(db);
drp = &db->db_last_dirty;
while ((dr = *drp) != db->db_data_pending)
drp = &dr->dr_next;
ASSERT(!list_link_active(&dr->dr_dirty_node));
ASSERT(dr->dr_dbuf == db);
ASSERT(dr->dr_next == NULL);
*drp = dr->dr_next;
#ifdef ZFS_DEBUG
if (db->db_blkid == DMU_SPILL_BLKID) {
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
ASSERT(dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR);
ASSERT(!(BP_IS_HOLE(db->db_blkptr)) &&
db->db_blkptr == DN_SPILL_BLKPTR(dn->dn_phys));
DB_DNODE_EXIT(db);
}
#endif
if (db->db_level == 0) {
ASSERT(db->db_blkid != DMU_BONUS_BLKID);
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
if (db->db_state != DB_NOFILL) {
if (dr->dt.dl.dr_data != db->db_buf)
arc_buf_destroy(dr->dt.dl.dr_data, db);
}
} else {
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
ASSERT(list_head(&dr->dt.di.dr_children) == NULL);
ASSERT3U(db->db.db_size, ==, 1 << dn->dn_phys->dn_indblkshift);
if (!BP_IS_HOLE(db->db_blkptr)) {
ASSERTV(int epbs = dn->dn_phys->dn_indblkshift -
SPA_BLKPTRSHIFT);
ASSERT3U(db->db_blkid, <=,
dn->dn_phys->dn_maxblkid >> (db->db_level * epbs));
ASSERT3U(BP_GET_LSIZE(db->db_blkptr), ==,
db->db.db_size);
}
DB_DNODE_EXIT(db);
mutex_destroy(&dr->dt.di.dr_mtx);
list_destroy(&dr->dt.di.dr_children);
}
kmem_free(dr, sizeof (dbuf_dirty_record_t));
cv_broadcast(&db->db_changed);
ASSERT(db->db_dirtycnt > 0);
db->db_dirtycnt -= 1;
db->db_data_pending = NULL;
dbuf_rele_and_unlock(db, (void *)(uintptr_t)tx->tx_txg);
}
static void
dbuf_write_nofill_ready(zio_t *zio)
{
dbuf_write_ready(zio, NULL, zio->io_private);
}
static void
dbuf_write_nofill_done(zio_t *zio)
{
dbuf_write_done(zio, NULL, zio->io_private);
}
static void
dbuf_write_override_ready(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
dbuf_write_ready(zio, NULL, db);
}
static void
dbuf_write_override_done(zio_t *zio)
{
dbuf_dirty_record_t *dr = zio->io_private;
dmu_buf_impl_t *db = dr->dr_dbuf;
blkptr_t *obp = &dr->dt.dl.dr_overridden_by;
mutex_enter(&db->db_mtx);
if (!BP_EQUAL(zio->io_bp, obp)) {
if (!BP_IS_HOLE(obp))
dsl_free(spa_get_dsl(zio->io_spa), zio->io_txg, obp);
arc_release(dr->dt.dl.dr_data, db);
}
mutex_exit(&db->db_mtx);
dbuf_write_done(zio, NULL, db);
if (zio->io_abd != NULL)
abd_put(zio->io_abd);
}
+typedef struct dbuf_remap_impl_callback_arg {
+ objset_t *drica_os;
+ uint64_t drica_blk_birth;
+ dmu_tx_t *drica_tx;
+} dbuf_remap_impl_callback_arg_t;
+
+static void
+dbuf_remap_impl_callback(uint64_t vdev, uint64_t offset, uint64_t size,
+ void *arg)
+{
+ dbuf_remap_impl_callback_arg_t *drica = arg;
+ objset_t *os = drica->drica_os;
+ spa_t *spa = dmu_objset_spa(os);
+ dmu_tx_t *tx = drica->drica_tx;
+
+ ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
+
+ if (os == spa_meta_objset(spa)) {
+ spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
+ } else {
+ dsl_dataset_block_remapped(dmu_objset_ds(os), vdev, offset,
+ size, drica->drica_blk_birth, tx);
+ }
+}
+
+static void
+dbuf_remap_impl(dnode_t *dn, blkptr_t *bp, dmu_tx_t *tx)
+{
+ blkptr_t bp_copy = *bp;
+ spa_t *spa = dmu_objset_spa(dn->dn_objset);
+ dbuf_remap_impl_callback_arg_t drica;
+
+ ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
+
+ drica.drica_os = dn->dn_objset;
+ drica.drica_blk_birth = bp->blk_birth;
+ drica.drica_tx = tx;
+ if (spa_remap_blkptr(spa, &bp_copy, dbuf_remap_impl_callback,
+ &drica)) {
+ /*
+ * The struct_rwlock prevents dbuf_read_impl() from
+ * dereferencing the BP while we are changing it. To
+ * avoid lock contention, only grab it when we are actually
+ * changing the BP.
+ */
+ rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
+ *bp = bp_copy;
+ rw_exit(&dn->dn_struct_rwlock);
+ }
+}
+
+/*
+ * Returns true if a dbuf_remap would modify the dbuf. We do this by attempting
+ * to remap a copy of every bp in the dbuf.
+ */
+boolean_t
+dbuf_can_remap(const dmu_buf_impl_t *db)
+{
+ spa_t *spa = dmu_objset_spa(db->db_objset);
+ blkptr_t *bp = db->db.db_data;
+ boolean_t ret = B_FALSE;
+
+ ASSERT3U(db->db_level, >, 0);
+ ASSERT3S(db->db_state, ==, DB_CACHED);
+
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
+ blkptr_t bp_copy = bp[i];
+ if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
+ ret = B_TRUE;
+ break;
+ }
+ }
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ return (ret);
+}
+
+boolean_t
+dnode_needs_remap(const dnode_t *dn)
+{
+ spa_t *spa = dmu_objset_spa(dn->dn_objset);
+ boolean_t ret = B_FALSE;
+
+ if (dn->dn_phys->dn_nlevels == 0) {
+ return (B_FALSE);
+ }
+
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ for (int j = 0; j < dn->dn_phys->dn_nblkptr; j++) {
+ blkptr_t bp_copy = dn->dn_phys->dn_blkptr[j];
+ if (spa_remap_blkptr(spa, &bp_copy, NULL, NULL)) {
+ ret = B_TRUE;
+ break;
+ }
+ }
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ return (ret);
+}
+
+/*
+ * Remap any existing BP's to concrete vdevs, if possible.
+ */
+static void
+dbuf_remap(dnode_t *dn, dmu_buf_impl_t *db, dmu_tx_t *tx)
+{
+ spa_t *spa = dmu_objset_spa(db->db_objset);
+ ASSERT(dsl_pool_sync_context(spa_get_dsl(spa)));
+
+ if (!spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL))
+ return;
+
+ if (db->db_level > 0) {
+ blkptr_t *bp = db->db.db_data;
+ for (int i = 0; i < db->db.db_size >> SPA_BLKPTRSHIFT; i++) {
+ dbuf_remap_impl(dn, &bp[i], tx);
+ }
+ } else if (db->db.db_object == DMU_META_DNODE_OBJECT) {
+ dnode_phys_t *dnp = db->db.db_data;
+ ASSERT3U(db->db_dnode_handle->dnh_dnode->dn_type, ==,
+ DMU_OT_DNODE);
+ for (int i = 0; i < db->db.db_size >> DNODE_SHIFT;
+ i += dnp[i].dn_extra_slots + 1) {
+ for (int j = 0; j < dnp[i].dn_nblkptr; j++) {
+ dbuf_remap_impl(dn, &dnp[i].dn_blkptr[j], tx);
+ }
+ }
+ }
+}
+
+
/* Issue I/O to commit a dirty buffer to disk. */
static void
dbuf_write(dbuf_dirty_record_t *dr, arc_buf_t *data, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dr->dr_dbuf;
dnode_t *dn;
objset_t *os;
dmu_buf_impl_t *parent = db->db_parent;
uint64_t txg = tx->tx_txg;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *zio;
int wp_flag = 0;
ASSERT(dmu_tx_is_syncing(tx));
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
os = dn->dn_objset;
if (db->db_state != DB_NOFILL) {
if (db->db_level > 0 || dn->dn_type == DMU_OT_DNODE) {
/*
* Private object buffers are released here rather
* than in dbuf_dirty() since they are only modified
* in the syncing context and we don't want the
* overhead of making multiple copies of the data.
*/
if (BP_IS_HOLE(db->db_blkptr)) {
arc_buf_thaw(data);
} else {
dbuf_release_bp(db);
}
+ dbuf_remap(dn, db, tx);
}
}
if (parent != dn->dn_dbuf) {
/* Our parent is an indirect block. */
/* We have a dirty parent that has been scheduled for write. */
ASSERT(parent && parent->db_data_pending);
/* Our parent's buffer is one level closer to the dnode. */
ASSERT(db->db_level == parent->db_level-1);
/*
* We're about to modify our parent's db_data by modifying
* our block pointer, so the parent must be released.
*/
ASSERT(arc_released(parent->db_buf));
zio = parent->db_data_pending->dr_zio;
} else {
/* Our parent is the dnode itself. */
ASSERT((db->db_level == dn->dn_phys->dn_nlevels-1 &&
db->db_blkid != DMU_SPILL_BLKID) ||
(db->db_blkid == DMU_SPILL_BLKID && db->db_level == 0));
if (db->db_blkid != DMU_SPILL_BLKID)
ASSERT3P(db->db_blkptr, ==,
&dn->dn_phys->dn_blkptr[db->db_blkid]);
zio = dn->dn_zio;
}
ASSERT(db->db_level == 0 || data == db->db_buf);
ASSERT3U(db->db_blkptr->blk_birth, <=, txg);
ASSERT(zio);
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
db->db.db_object, db->db_level, db->db_blkid);
if (db->db_blkid == DMU_SPILL_BLKID)
wp_flag = WP_SPILL;
wp_flag |= (db->db_state == DB_NOFILL) ? WP_NOFILL : 0;
dmu_write_policy(os, dn, db->db_level, wp_flag, &zp);
DB_DNODE_EXIT(db);
/*
* We copy the blkptr now (rather than when we instantiate the dirty
* record), because its value can change between open context and
* syncing context. We do not need to hold dn_struct_rwlock to read
* db_blkptr because we are in syncing context.
*/
dr->dr_bp_copy = *db->db_blkptr;
if (db->db_level == 0 &&
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* The BP for this block has been provided by open context
* (by dmu_sync() or dmu_buf_write_embedded()).
*/
abd_t *contents = (data != NULL) ?
abd_get_from_buf(data->b_data, arc_buf_size(data)) : NULL;
dr->dr_zio = zio_write(zio, os->os_spa, txg,
&dr->dr_bp_copy, contents, db->db.db_size, db->db.db_size,
&zp, dbuf_write_override_ready, NULL, NULL,
dbuf_write_override_done,
dr, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
mutex_enter(&db->db_mtx);
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
zio_write_override(dr->dr_zio, &dr->dt.dl.dr_overridden_by,
dr->dt.dl.dr_copies, dr->dt.dl.dr_nopwrite);
mutex_exit(&db->db_mtx);
} else if (db->db_state == DB_NOFILL) {
ASSERT(zp.zp_checksum == ZIO_CHECKSUM_OFF ||
zp.zp_checksum == ZIO_CHECKSUM_NOPARITY);
dr->dr_zio = zio_write(zio, os->os_spa, txg,
&dr->dr_bp_copy, NULL, db->db.db_size, db->db.db_size, &zp,
dbuf_write_nofill_ready, NULL, NULL,
dbuf_write_nofill_done, db,
ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED | ZIO_FLAG_NODATA, &zb);
} else {
ASSERT(arc_released(data));
/*
* For indirect blocks, we want to setup the children
* ready callback so that we can properly handle an indirect
* block that only contains holes.
*/
arc_write_done_func_t *children_ready_cb = NULL;
if (db->db_level != 0)
children_ready_cb = dbuf_write_children_ready;
dr->dr_zio = arc_write(zio, os->os_spa, txg,
&dr->dr_bp_copy, data, DBUF_IS_L2CACHEABLE(db),
&zp, dbuf_write_ready,
children_ready_cb, dbuf_write_physdone,
dbuf_write_done, db, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_MUSTSUCCEED, &zb);
}
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(dbuf_find);
EXPORT_SYMBOL(dbuf_is_metadata);
EXPORT_SYMBOL(dbuf_destroy);
EXPORT_SYMBOL(dbuf_loan_arcbuf);
EXPORT_SYMBOL(dbuf_whichblock);
EXPORT_SYMBOL(dbuf_read);
EXPORT_SYMBOL(dbuf_unoverride);
EXPORT_SYMBOL(dbuf_free_range);
EXPORT_SYMBOL(dbuf_new_size);
EXPORT_SYMBOL(dbuf_release_bp);
EXPORT_SYMBOL(dbuf_dirty);
EXPORT_SYMBOL(dmu_buf_will_change_crypt_params);
EXPORT_SYMBOL(dmu_buf_will_dirty);
EXPORT_SYMBOL(dmu_buf_will_not_fill);
EXPORT_SYMBOL(dmu_buf_will_fill);
EXPORT_SYMBOL(dmu_buf_fill_done);
EXPORT_SYMBOL(dmu_buf_rele);
EXPORT_SYMBOL(dbuf_assign_arcbuf);
EXPORT_SYMBOL(dbuf_prefetch);
EXPORT_SYMBOL(dbuf_hold_impl);
EXPORT_SYMBOL(dbuf_hold);
EXPORT_SYMBOL(dbuf_hold_level);
EXPORT_SYMBOL(dbuf_create_bonus);
EXPORT_SYMBOL(dbuf_spill_set_blksz);
EXPORT_SYMBOL(dbuf_rm_spill);
EXPORT_SYMBOL(dbuf_add_ref);
EXPORT_SYMBOL(dbuf_rele);
EXPORT_SYMBOL(dbuf_rele_and_unlock);
EXPORT_SYMBOL(dbuf_refcount);
EXPORT_SYMBOL(dbuf_sync_list);
EXPORT_SYMBOL(dmu_buf_set_user);
EXPORT_SYMBOL(dmu_buf_set_user_ie);
EXPORT_SYMBOL(dmu_buf_get_user);
EXPORT_SYMBOL(dmu_buf_get_blkptr);
/* BEGIN CSTYLED */
module_param(dbuf_cache_max_bytes, ulong, 0644);
MODULE_PARM_DESC(dbuf_cache_max_bytes,
"Maximum size in bytes of the dbuf cache.");
module_param(dbuf_cache_hiwater_pct, uint, 0644);
MODULE_PARM_DESC(dbuf_cache_hiwater_pct,
"Percentage over dbuf_cache_max_bytes when dbufs must be evicted "
"directly.");
module_param(dbuf_cache_lowater_pct, uint, 0644);
MODULE_PARM_DESC(dbuf_cache_lowater_pct,
"Percentage below dbuf_cache_max_bytes when the evict thread stops "
"evicting dbufs.");
module_param(dbuf_cache_shift, int, 0644);
MODULE_PARM_DESC(dbuf_cache_shift,
"Set the size of the dbuf cache to a log2 fraction of arc size.");
/* END CSTYLED */
#endif
diff --git a/module/zfs/ddt.c b/module/zfs/ddt.c
index 24516834f0f3..681033d71788 100644
--- a/module/zfs/ddt.c
+++ b/module/zfs/ddt.c
@@ -1,1242 +1,1242 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2009, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/ddt.h>
#include <sys/zap.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/dsl_pool.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
static kmem_cache_t *ddt_cache;
static kmem_cache_t *ddt_entry_cache;
/*
* Enable/disable prefetching of dedup-ed blocks which are going to be freed.
*/
int zfs_dedup_prefetch = 0;
static const ddt_ops_t *ddt_ops[DDT_TYPES] = {
&ddt_zap_ops,
};
static const char *ddt_class_name[DDT_CLASSES] = {
"ditto",
"duplicate",
"unique",
};
static void
ddt_object_create(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_tx_t *tx)
{
spa_t *spa = ddt->ddt_spa;
objset_t *os = ddt->ddt_os;
uint64_t *objectp = &ddt->ddt_object[type][class];
boolean_t prehash = zio_checksum_table[ddt->ddt_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP;
char name[DDT_NAMELEN];
ddt_object_name(ddt, type, class, name);
ASSERT(*objectp == 0);
VERIFY(ddt_ops[type]->ddt_op_create(os, objectp, tx, prehash) == 0);
ASSERT(*objectp != 0);
VERIFY(zap_add(os, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, objectp, tx) == 0);
VERIFY(zap_add(os, spa->spa_ddt_stat_object, name,
sizeof (uint64_t), sizeof (ddt_histogram_t) / sizeof (uint64_t),
&ddt->ddt_histogram[type][class], tx) == 0);
}
static void
ddt_object_destroy(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_tx_t *tx)
{
spa_t *spa = ddt->ddt_spa;
objset_t *os = ddt->ddt_os;
uint64_t *objectp = &ddt->ddt_object[type][class];
uint64_t count;
char name[DDT_NAMELEN];
ddt_object_name(ddt, type, class, name);
ASSERT(*objectp != 0);
ASSERT(ddt_histogram_empty(&ddt->ddt_histogram[type][class]));
VERIFY(ddt_object_count(ddt, type, class, &count) == 0 && count == 0);
VERIFY(zap_remove(os, DMU_POOL_DIRECTORY_OBJECT, name, tx) == 0);
VERIFY(zap_remove(os, spa->spa_ddt_stat_object, name, tx) == 0);
VERIFY(ddt_ops[type]->ddt_op_destroy(os, *objectp, tx) == 0);
bzero(&ddt->ddt_object_stats[type][class], sizeof (ddt_object_t));
*objectp = 0;
}
static int
ddt_object_load(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
ddt_object_t *ddo = &ddt->ddt_object_stats[type][class];
dmu_object_info_t doi;
uint64_t count;
char name[DDT_NAMELEN];
int error;
ddt_object_name(ddt, type, class, name);
error = zap_lookup(ddt->ddt_os, DMU_POOL_DIRECTORY_OBJECT, name,
sizeof (uint64_t), 1, &ddt->ddt_object[type][class]);
if (error != 0)
return (error);
error = zap_lookup(ddt->ddt_os, ddt->ddt_spa->spa_ddt_stat_object, name,
sizeof (uint64_t), sizeof (ddt_histogram_t) / sizeof (uint64_t),
&ddt->ddt_histogram[type][class]);
if (error != 0)
return (error);
/*
* Seed the cached statistics.
*/
error = ddt_object_info(ddt, type, class, &doi);
if (error)
return (error);
error = ddt_object_count(ddt, type, class, &count);
if (error)
return (error);
ddo->ddo_count = count;
ddo->ddo_dspace = doi.doi_physical_blocks_512 << 9;
ddo->ddo_mspace = doi.doi_fill_count * doi.doi_data_block_size;
return (0);
}
static void
ddt_object_sync(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_tx_t *tx)
{
ddt_object_t *ddo = &ddt->ddt_object_stats[type][class];
dmu_object_info_t doi;
uint64_t count;
char name[DDT_NAMELEN];
ddt_object_name(ddt, type, class, name);
VERIFY(zap_update(ddt->ddt_os, ddt->ddt_spa->spa_ddt_stat_object, name,
sizeof (uint64_t), sizeof (ddt_histogram_t) / sizeof (uint64_t),
&ddt->ddt_histogram[type][class], tx) == 0);
/*
* Cache DDT statistics; this is the only time they'll change.
*/
VERIFY(ddt_object_info(ddt, type, class, &doi) == 0);
VERIFY(ddt_object_count(ddt, type, class, &count) == 0);
ddo->ddo_count = count;
ddo->ddo_dspace = doi.doi_physical_blocks_512 << 9;
ddo->ddo_mspace = doi.doi_fill_count * doi.doi_data_block_size;
}
static int
ddt_object_lookup(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde)
{
if (!ddt_object_exists(ddt, type, class))
return (SET_ERROR(ENOENT));
return (ddt_ops[type]->ddt_op_lookup(ddt->ddt_os,
ddt->ddt_object[type][class], dde));
}
static void
ddt_object_prefetch(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde)
{
if (!ddt_object_exists(ddt, type, class))
return;
ddt_ops[type]->ddt_op_prefetch(ddt->ddt_os,
ddt->ddt_object[type][class], dde);
}
int
ddt_object_update(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde, dmu_tx_t *tx)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_update(ddt->ddt_os,
ddt->ddt_object[type][class], dde, tx));
}
static int
ddt_object_remove(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
ddt_entry_t *dde, dmu_tx_t *tx)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_remove(ddt->ddt_os,
ddt->ddt_object[type][class], dde, tx));
}
int
ddt_object_walk(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
uint64_t *walk, ddt_entry_t *dde)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_walk(ddt->ddt_os,
ddt->ddt_object[type][class], dde, walk));
}
int
ddt_object_count(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
uint64_t *count)
{
ASSERT(ddt_object_exists(ddt, type, class));
return (ddt_ops[type]->ddt_op_count(ddt->ddt_os,
ddt->ddt_object[type][class], count));
}
int
ddt_object_info(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
dmu_object_info_t *doi)
{
if (!ddt_object_exists(ddt, type, class))
return (SET_ERROR(ENOENT));
return (dmu_object_info(ddt->ddt_os, ddt->ddt_object[type][class],
doi));
}
boolean_t
ddt_object_exists(ddt_t *ddt, enum ddt_type type, enum ddt_class class)
{
return (!!ddt->ddt_object[type][class]);
}
void
ddt_object_name(ddt_t *ddt, enum ddt_type type, enum ddt_class class,
char *name)
{
(void) sprintf(name, DMU_POOL_DDT,
zio_checksum_table[ddt->ddt_checksum].ci_name,
ddt_ops[type]->ddt_op_name, ddt_class_name[class]);
}
void
ddt_bp_fill(const ddt_phys_t *ddp, blkptr_t *bp, uint64_t txg)
{
ASSERT(txg != 0);
for (int d = 0; d < SPA_DVAS_PER_BP; d++)
bp->blk_dva[d] = ddp->ddp_dva[d];
BP_SET_BIRTH(bp, txg, ddp->ddp_phys_birth);
}
/*
* The bp created via this function may be used for repairs and scrub, but it
* will be missing the salt / IV required to do a full decrypting read.
*/
void
ddt_bp_create(enum zio_checksum checksum,
const ddt_key_t *ddk, const ddt_phys_t *ddp, blkptr_t *bp)
{
BP_ZERO(bp);
if (ddp != NULL)
ddt_bp_fill(ddp, bp, ddp->ddp_phys_birth);
bp->blk_cksum = ddk->ddk_cksum;
BP_SET_LSIZE(bp, DDK_GET_LSIZE(ddk));
BP_SET_PSIZE(bp, DDK_GET_PSIZE(ddk));
BP_SET_COMPRESS(bp, DDK_GET_COMPRESS(ddk));
BP_SET_CRYPT(bp, DDK_GET_CRYPT(ddk));
BP_SET_FILL(bp, 1);
BP_SET_CHECKSUM(bp, checksum);
BP_SET_TYPE(bp, DMU_OT_DEDUP);
BP_SET_LEVEL(bp, 0);
BP_SET_DEDUP(bp, 0);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
}
void
ddt_key_fill(ddt_key_t *ddk, const blkptr_t *bp)
{
ddk->ddk_cksum = bp->blk_cksum;
ddk->ddk_prop = 0;
ASSERT(BP_IS_ENCRYPTED(bp) || !BP_USES_CRYPT(bp));
DDK_SET_LSIZE(ddk, BP_GET_LSIZE(bp));
DDK_SET_PSIZE(ddk, BP_GET_PSIZE(bp));
DDK_SET_COMPRESS(ddk, BP_GET_COMPRESS(bp));
DDK_SET_CRYPT(ddk, BP_USES_CRYPT(bp));
}
void
ddt_phys_fill(ddt_phys_t *ddp, const blkptr_t *bp)
{
ASSERT(ddp->ddp_phys_birth == 0);
for (int d = 0; d < SPA_DVAS_PER_BP; d++)
ddp->ddp_dva[d] = bp->blk_dva[d];
ddp->ddp_phys_birth = BP_PHYSICAL_BIRTH(bp);
}
void
ddt_phys_clear(ddt_phys_t *ddp)
{
bzero(ddp, sizeof (*ddp));
}
void
ddt_phys_addref(ddt_phys_t *ddp)
{
ddp->ddp_refcnt++;
}
void
ddt_phys_decref(ddt_phys_t *ddp)
{
if (ddp) {
ASSERT(ddp->ddp_refcnt > 0);
ddp->ddp_refcnt--;
}
}
void
ddt_phys_free(ddt_t *ddt, ddt_key_t *ddk, ddt_phys_t *ddp, uint64_t txg)
{
blkptr_t blk;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
ddt_phys_clear(ddp);
zio_free(ddt->ddt_spa, txg, &blk);
}
ddt_phys_t *
ddt_phys_select(const ddt_entry_t *dde, const blkptr_t *bp)
{
ddt_phys_t *ddp = (ddt_phys_t *)dde->dde_phys;
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (DVA_EQUAL(BP_IDENTITY(bp), &ddp->ddp_dva[0]) &&
BP_PHYSICAL_BIRTH(bp) == ddp->ddp_phys_birth)
return (ddp);
}
return (NULL);
}
uint64_t
ddt_phys_total_refcnt(const ddt_entry_t *dde)
{
uint64_t refcnt = 0;
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++)
refcnt += dde->dde_phys[p].ddp_refcnt;
return (refcnt);
}
static void
ddt_stat_generate(ddt_t *ddt, ddt_entry_t *dde, ddt_stat_t *dds)
{
spa_t *spa = ddt->ddt_spa;
ddt_phys_t *ddp = dde->dde_phys;
ddt_key_t *ddk = &dde->dde_key;
uint64_t lsize = DDK_GET_LSIZE(ddk);
uint64_t psize = DDK_GET_PSIZE(ddk);
bzero(dds, sizeof (*dds));
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
uint64_t dsize = 0;
uint64_t refcnt = ddp->ddp_refcnt;
if (ddp->ddp_phys_birth == 0)
continue;
for (int d = 0; d < DDE_GET_NDVAS(dde); d++)
dsize += dva_get_dsize_sync(spa, &ddp->ddp_dva[d]);
dds->dds_blocks += 1;
dds->dds_lsize += lsize;
dds->dds_psize += psize;
dds->dds_dsize += dsize;
dds->dds_ref_blocks += refcnt;
dds->dds_ref_lsize += lsize * refcnt;
dds->dds_ref_psize += psize * refcnt;
dds->dds_ref_dsize += dsize * refcnt;
}
}
void
ddt_stat_add(ddt_stat_t *dst, const ddt_stat_t *src, uint64_t neg)
{
const uint64_t *s = (const uint64_t *)src;
uint64_t *d = (uint64_t *)dst;
uint64_t *d_end = (uint64_t *)(dst + 1);
ASSERT(neg == 0 || neg == -1ULL); /* add or subtract */
while (d < d_end)
*d++ += (*s++ ^ neg) - neg;
}
static void
ddt_stat_update(ddt_t *ddt, ddt_entry_t *dde, uint64_t neg)
{
ddt_stat_t dds;
ddt_histogram_t *ddh;
int bucket;
ddt_stat_generate(ddt, dde, &dds);
bucket = highbit64(dds.dds_ref_blocks) - 1;
ASSERT(bucket >= 0);
ddh = &ddt->ddt_histogram[dde->dde_type][dde->dde_class];
ddt_stat_add(&ddh->ddh_stat[bucket], &dds, neg);
}
void
ddt_histogram_add(ddt_histogram_t *dst, const ddt_histogram_t *src)
{
for (int h = 0; h < 64; h++)
ddt_stat_add(&dst->ddh_stat[h], &src->ddh_stat[h], 0);
}
void
ddt_histogram_stat(ddt_stat_t *dds, const ddt_histogram_t *ddh)
{
bzero(dds, sizeof (*dds));
for (int h = 0; h < 64; h++)
ddt_stat_add(dds, &ddh->ddh_stat[h], 0);
}
boolean_t
ddt_histogram_empty(const ddt_histogram_t *ddh)
{
const uint64_t *s = (const uint64_t *)ddh;
const uint64_t *s_end = (const uint64_t *)(ddh + 1);
while (s < s_end)
if (*s++ != 0)
return (B_FALSE);
return (B_TRUE);
}
void
ddt_get_dedup_object_stats(spa_t *spa, ddt_object_t *ddo_total)
{
/* Sum the statistics we cached in ddt_object_sync(). */
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
ddt_object_t *ddo =
&ddt->ddt_object_stats[type][class];
ddo_total->ddo_count += ddo->ddo_count;
ddo_total->ddo_dspace += ddo->ddo_dspace;
ddo_total->ddo_mspace += ddo->ddo_mspace;
}
}
}
/* ... and compute the averages. */
if (ddo_total->ddo_count != 0) {
ddo_total->ddo_dspace /= ddo_total->ddo_count;
ddo_total->ddo_mspace /= ddo_total->ddo_count;
}
}
void
ddt_get_dedup_histogram(spa_t *spa, ddt_histogram_t *ddh)
{
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
ddt_histogram_add(ddh,
&ddt->ddt_histogram_cache[type][class]);
}
}
}
}
void
ddt_get_dedup_stats(spa_t *spa, ddt_stat_t *dds_total)
{
ddt_histogram_t *ddh_total;
ddh_total = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
ddt_get_dedup_histogram(spa, ddh_total);
ddt_histogram_stat(dds_total, ddh_total);
kmem_free(ddh_total, sizeof (ddt_histogram_t));
}
uint64_t
ddt_get_dedup_dspace(spa_t *spa)
{
ddt_stat_t dds_total;
if (spa->spa_dedup_dspace != ~0ULL)
return (spa->spa_dedup_dspace);
bzero(&dds_total, sizeof (ddt_stat_t));
/* Calculate and cache the stats */
ddt_get_dedup_stats(spa, &dds_total);
spa->spa_dedup_dspace = dds_total.dds_ref_dsize - dds_total.dds_dsize;
return (spa->spa_dedup_dspace);
}
uint64_t
ddt_get_pool_dedup_ratio(spa_t *spa)
{
ddt_stat_t dds_total = { 0 };
ddt_get_dedup_stats(spa, &dds_total);
if (dds_total.dds_dsize == 0)
return (100);
return (dds_total.dds_ref_dsize * 100 / dds_total.dds_dsize);
}
int
ddt_ditto_copies_needed(ddt_t *ddt, ddt_entry_t *dde, ddt_phys_t *ddp_willref)
{
spa_t *spa = ddt->ddt_spa;
uint64_t total_refcnt = 0;
uint64_t ditto = spa->spa_dedup_ditto;
int total_copies = 0;
int desired_copies = 0;
int copies_needed = 0;
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *zio = dde->dde_lead_zio[p];
uint64_t refcnt = ddp->ddp_refcnt; /* committed refs */
if (zio != NULL)
refcnt += zio->io_parent_count; /* pending refs */
if (ddp == ddp_willref)
refcnt++; /* caller's ref */
if (refcnt != 0) {
total_refcnt += refcnt;
total_copies += p;
}
}
if (ditto == 0 || ditto > UINT32_MAX)
ditto = UINT32_MAX;
if (total_refcnt >= 1)
desired_copies++;
if (total_refcnt >= ditto)
desired_copies++;
if (total_refcnt >= ditto * ditto)
desired_copies++;
copies_needed = MAX(desired_copies, total_copies) - total_copies;
/* encrypted blocks store their IV in DVA[2] */
if (DDK_GET_CRYPT(&dde->dde_key))
copies_needed = MIN(copies_needed, SPA_DVAS_PER_BP - 1);
return (copies_needed);
}
int
ddt_ditto_copies_present(ddt_entry_t *dde)
{
ddt_phys_t *ddp = &dde->dde_phys[DDT_PHYS_DITTO];
dva_t *dva = ddp->ddp_dva;
int copies = 0 - DVA_GET_GANG(dva);
for (int d = 0; d < DDE_GET_NDVAS(dde); d++, dva++)
if (DVA_IS_VALID(dva))
copies++;
ASSERT(copies >= 0 && copies < SPA_DVAS_PER_BP);
return (copies);
}
size_t
ddt_compress(void *src, uchar_t *dst, size_t s_len, size_t d_len)
{
uchar_t *version = dst++;
int cpfunc = ZIO_COMPRESS_ZLE;
zio_compress_info_t *ci = &zio_compress_table[cpfunc];
size_t c_len;
ASSERT(d_len >= s_len + 1); /* no compression plus version byte */
c_len = ci->ci_compress(src, dst, s_len, d_len - 1, ci->ci_level);
if (c_len == s_len) {
cpfunc = ZIO_COMPRESS_OFF;
bcopy(src, dst, s_len);
}
*version = cpfunc;
/* CONSTCOND */
if (ZFS_HOST_BYTEORDER)
*version |= DDT_COMPRESS_BYTEORDER_MASK;
return (c_len + 1);
}
void
ddt_decompress(uchar_t *src, void *dst, size_t s_len, size_t d_len)
{
uchar_t version = *src++;
int cpfunc = version & DDT_COMPRESS_FUNCTION_MASK;
zio_compress_info_t *ci = &zio_compress_table[cpfunc];
if (ci->ci_decompress != NULL)
(void) ci->ci_decompress(src, dst, s_len, d_len, ci->ci_level);
else
bcopy(src, dst, d_len);
if (((version & DDT_COMPRESS_BYTEORDER_MASK) != 0) !=
(ZFS_HOST_BYTEORDER != 0))
byteswap_uint64_array(dst, d_len);
}
ddt_t *
ddt_select_by_checksum(spa_t *spa, enum zio_checksum c)
{
return (spa->spa_ddt[c]);
}
ddt_t *
ddt_select(spa_t *spa, const blkptr_t *bp)
{
return (spa->spa_ddt[BP_GET_CHECKSUM(bp)]);
}
void
ddt_enter(ddt_t *ddt)
{
mutex_enter(&ddt->ddt_lock);
}
void
ddt_exit(ddt_t *ddt)
{
mutex_exit(&ddt->ddt_lock);
}
void
ddt_init(void)
{
ddt_cache = kmem_cache_create("ddt_cache",
sizeof (ddt_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
ddt_entry_cache = kmem_cache_create("ddt_entry_cache",
sizeof (ddt_entry_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
ddt_fini(void)
{
kmem_cache_destroy(ddt_entry_cache);
kmem_cache_destroy(ddt_cache);
}
static ddt_entry_t *
ddt_alloc(const ddt_key_t *ddk)
{
ddt_entry_t *dde;
dde = kmem_cache_alloc(ddt_entry_cache, KM_SLEEP);
bzero(dde, sizeof (ddt_entry_t));
cv_init(&dde->dde_cv, NULL, CV_DEFAULT, NULL);
dde->dde_key = *ddk;
return (dde);
}
static void
ddt_free(ddt_entry_t *dde)
{
ASSERT(!dde->dde_loading);
for (int p = 0; p < DDT_PHYS_TYPES; p++)
ASSERT(dde->dde_lead_zio[p] == NULL);
if (dde->dde_repair_abd != NULL)
abd_free(dde->dde_repair_abd);
cv_destroy(&dde->dde_cv);
kmem_cache_free(ddt_entry_cache, dde);
}
void
ddt_remove(ddt_t *ddt, ddt_entry_t *dde)
{
ASSERT(MUTEX_HELD(&ddt->ddt_lock));
avl_remove(&ddt->ddt_tree, dde);
ddt_free(dde);
}
ddt_entry_t *
ddt_lookup(ddt_t *ddt, const blkptr_t *bp, boolean_t add)
{
ddt_entry_t *dde, dde_search;
enum ddt_type type;
enum ddt_class class;
avl_index_t where;
int error;
ASSERT(MUTEX_HELD(&ddt->ddt_lock));
ddt_key_fill(&dde_search.dde_key, bp);
dde = avl_find(&ddt->ddt_tree, &dde_search, &where);
if (dde == NULL) {
if (!add)
return (NULL);
dde = ddt_alloc(&dde_search.dde_key);
avl_insert(&ddt->ddt_tree, dde, where);
}
while (dde->dde_loading)
cv_wait(&dde->dde_cv, &ddt->ddt_lock);
if (dde->dde_loaded)
return (dde);
dde->dde_loading = B_TRUE;
ddt_exit(ddt);
error = ENOENT;
for (type = 0; type < DDT_TYPES; type++) {
for (class = 0; class < DDT_CLASSES; class++) {
error = ddt_object_lookup(ddt, type, class, dde);
- if (error != ENOENT)
+ if (error != ENOENT) {
+ ASSERT0(error);
break;
+ }
}
if (error != ENOENT)
break;
}
- ASSERT(error == 0 || error == ENOENT);
-
ddt_enter(ddt);
ASSERT(dde->dde_loaded == B_FALSE);
ASSERT(dde->dde_loading == B_TRUE);
dde->dde_type = type; /* will be DDT_TYPES if no entry found */
dde->dde_class = class; /* will be DDT_CLASSES if no entry found */
dde->dde_loaded = B_TRUE;
dde->dde_loading = B_FALSE;
if (error == 0)
ddt_stat_update(ddt, dde, -1ULL);
cv_broadcast(&dde->dde_cv);
return (dde);
}
void
ddt_prefetch(spa_t *spa, const blkptr_t *bp)
{
ddt_t *ddt;
ddt_entry_t dde;
if (!zfs_dedup_prefetch || bp == NULL || !BP_GET_DEDUP(bp))
return;
/*
* We only remove the DDT once all tables are empty and only
* prefetch dedup blocks when there are entries in the DDT.
* Thus no locking is required as the DDT can't disappear on us.
*/
ddt = ddt_select(spa, bp);
ddt_key_fill(&dde.dde_key, bp);
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
ddt_object_prefetch(ddt, type, class, &dde);
}
}
}
/*
* Opaque struct used for ddt_key comparison
*/
#define DDT_KEY_CMP_LEN (sizeof (ddt_key_t) / sizeof (uint16_t))
typedef struct ddt_key_cmp {
uint16_t u16[DDT_KEY_CMP_LEN];
} ddt_key_cmp_t;
int
ddt_entry_compare(const void *x1, const void *x2)
{
const ddt_entry_t *dde1 = x1;
const ddt_entry_t *dde2 = x2;
const ddt_key_cmp_t *k1 = (const ddt_key_cmp_t *)&dde1->dde_key;
const ddt_key_cmp_t *k2 = (const ddt_key_cmp_t *)&dde2->dde_key;
int32_t cmp = 0;
for (int i = 0; i < DDT_KEY_CMP_LEN; i++) {
cmp = (int32_t)k1->u16[i] - (int32_t)k2->u16[i];
if (likely(cmp))
break;
}
return (AVL_ISIGN(cmp));
}
static ddt_t *
ddt_table_alloc(spa_t *spa, enum zio_checksum c)
{
ddt_t *ddt;
ddt = kmem_cache_alloc(ddt_cache, KM_SLEEP);
bzero(ddt, sizeof (ddt_t));
mutex_init(&ddt->ddt_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&ddt->ddt_tree, ddt_entry_compare,
sizeof (ddt_entry_t), offsetof(ddt_entry_t, dde_node));
avl_create(&ddt->ddt_repair_tree, ddt_entry_compare,
sizeof (ddt_entry_t), offsetof(ddt_entry_t, dde_node));
ddt->ddt_checksum = c;
ddt->ddt_spa = spa;
ddt->ddt_os = spa->spa_meta_objset;
return (ddt);
}
static void
ddt_table_free(ddt_t *ddt)
{
ASSERT(avl_numnodes(&ddt->ddt_tree) == 0);
ASSERT(avl_numnodes(&ddt->ddt_repair_tree) == 0);
avl_destroy(&ddt->ddt_tree);
avl_destroy(&ddt->ddt_repair_tree);
mutex_destroy(&ddt->ddt_lock);
kmem_cache_free(ddt_cache, ddt);
}
void
ddt_create(spa_t *spa)
{
spa->spa_dedup_checksum = ZIO_DEDUPCHECKSUM;
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++)
spa->spa_ddt[c] = ddt_table_alloc(spa, c);
}
int
ddt_load(spa_t *spa)
{
int error;
ddt_create(spa);
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DDT_STATS, sizeof (uint64_t), 1,
&spa->spa_ddt_stat_object);
if (error)
return (error == ENOENT ? 0 : error);
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES;
class++) {
error = ddt_object_load(ddt, type, class);
if (error != 0 && error != ENOENT)
return (error);
}
}
/*
* Seed the cached histograms.
*/
bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}
return (0);
}
void
ddt_unload(spa_t *spa)
{
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
if (spa->spa_ddt[c]) {
ddt_table_free(spa->spa_ddt[c]);
spa->spa_ddt[c] = NULL;
}
}
}
boolean_t
ddt_class_contains(spa_t *spa, enum ddt_class max_class, const blkptr_t *bp)
{
ddt_t *ddt;
ddt_entry_t *dde;
if (!BP_GET_DEDUP(bp))
return (B_FALSE);
if (max_class == DDT_CLASS_UNIQUE)
return (B_TRUE);
ddt = spa->spa_ddt[BP_GET_CHECKSUM(bp)];
dde = kmem_cache_alloc(ddt_entry_cache, KM_SLEEP);
ddt_key_fill(&(dde->dde_key), bp);
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class <= max_class; class++) {
if (ddt_object_lookup(ddt, type, class, dde) == 0) {
kmem_cache_free(ddt_entry_cache, dde);
return (B_TRUE);
}
}
}
kmem_cache_free(ddt_entry_cache, dde);
return (B_FALSE);
}
ddt_entry_t *
ddt_repair_start(ddt_t *ddt, const blkptr_t *bp)
{
ddt_key_t ddk;
ddt_entry_t *dde;
ddt_key_fill(&ddk, bp);
dde = ddt_alloc(&ddk);
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
/*
* We can only do repair if there are multiple copies
* of the block. For anything in the UNIQUE class,
* there's definitely only one copy, so don't even try.
*/
if (class != DDT_CLASS_UNIQUE &&
ddt_object_lookup(ddt, type, class, dde) == 0)
return (dde);
}
}
bzero(dde->dde_phys, sizeof (dde->dde_phys));
return (dde);
}
void
ddt_repair_done(ddt_t *ddt, ddt_entry_t *dde)
{
avl_index_t where;
ddt_enter(ddt);
if (dde->dde_repair_abd != NULL && spa_writeable(ddt->ddt_spa) &&
avl_find(&ddt->ddt_repair_tree, dde, &where) == NULL)
avl_insert(&ddt->ddt_repair_tree, dde, where);
else
ddt_free(dde);
ddt_exit(ddt);
}
static void
ddt_repair_entry_done(zio_t *zio)
{
ddt_entry_t *rdde = zio->io_private;
ddt_free(rdde);
}
static void
ddt_repair_entry(ddt_t *ddt, ddt_entry_t *dde, ddt_entry_t *rdde, zio_t *rio)
{
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *rddp = rdde->dde_phys;
ddt_key_t *ddk = &dde->dde_key;
ddt_key_t *rddk = &rdde->dde_key;
zio_t *zio;
blkptr_t blk;
zio = zio_null(rio, rio->io_spa, NULL,
ddt_repair_entry_done, rdde, rio->io_flags);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++, rddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth != rddp->ddp_phys_birth ||
bcmp(ddp->ddp_dva, rddp->ddp_dva, sizeof (ddp->ddp_dva)))
continue;
ddt_bp_create(ddt->ddt_checksum, ddk, ddp, &blk);
zio_nowait(zio_rewrite(zio, zio->io_spa, 0, &blk,
rdde->dde_repair_abd, DDK_GET_PSIZE(rddk), NULL, NULL,
ZIO_PRIORITY_SYNC_WRITE, ZIO_DDT_CHILD_FLAGS(zio), NULL));
}
zio_nowait(zio);
}
static void
ddt_repair_table(ddt_t *ddt, zio_t *rio)
{
spa_t *spa = ddt->ddt_spa;
ddt_entry_t *dde, *rdde_next, *rdde;
avl_tree_t *t = &ddt->ddt_repair_tree;
blkptr_t blk;
if (spa_sync_pass(spa) > 1)
return;
ddt_enter(ddt);
for (rdde = avl_first(t); rdde != NULL; rdde = rdde_next) {
rdde_next = AVL_NEXT(t, rdde);
avl_remove(&ddt->ddt_repair_tree, rdde);
ddt_exit(ddt);
ddt_bp_create(ddt->ddt_checksum, &rdde->dde_key, NULL, &blk);
dde = ddt_repair_start(ddt, &blk);
ddt_repair_entry(ddt, dde, rdde, rio);
ddt_repair_done(ddt, dde);
ddt_enter(ddt);
}
ddt_exit(ddt);
}
static void
ddt_sync_entry(ddt_t *ddt, ddt_entry_t *dde, dmu_tx_t *tx, uint64_t txg)
{
dsl_pool_t *dp = ddt->ddt_spa->spa_dsl_pool;
ddt_phys_t *ddp = dde->dde_phys;
ddt_key_t *ddk = &dde->dde_key;
enum ddt_type otype = dde->dde_type;
enum ddt_type ntype = DDT_TYPE_CURRENT;
enum ddt_class oclass = dde->dde_class;
enum ddt_class nclass;
uint64_t total_refcnt = 0;
ASSERT(dde->dde_loaded);
ASSERT(!dde->dde_loading);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
ASSERT(dde->dde_lead_zio[p] == NULL);
if (ddp->ddp_phys_birth == 0) {
ASSERT(ddp->ddp_refcnt == 0);
continue;
}
if (p == DDT_PHYS_DITTO) {
if (ddt_ditto_copies_needed(ddt, dde, NULL) == 0)
ddt_phys_free(ddt, ddk, ddp, txg);
continue;
}
if (ddp->ddp_refcnt == 0)
ddt_phys_free(ddt, ddk, ddp, txg);
total_refcnt += ddp->ddp_refcnt;
}
if (dde->dde_phys[DDT_PHYS_DITTO].ddp_phys_birth != 0)
nclass = DDT_CLASS_DITTO;
else if (total_refcnt > 1)
nclass = DDT_CLASS_DUPLICATE;
else
nclass = DDT_CLASS_UNIQUE;
if (otype != DDT_TYPES &&
(otype != ntype || oclass != nclass || total_refcnt == 0)) {
VERIFY(ddt_object_remove(ddt, otype, oclass, dde, tx) == 0);
ASSERT(ddt_object_lookup(ddt, otype, oclass, dde) == ENOENT);
}
if (total_refcnt != 0) {
dde->dde_type = ntype;
dde->dde_class = nclass;
ddt_stat_update(ddt, dde, 0);
if (!ddt_object_exists(ddt, ntype, nclass))
ddt_object_create(ddt, ntype, nclass, tx);
VERIFY(ddt_object_update(ddt, ntype, nclass, dde, tx) == 0);
/*
* If the class changes, the order that we scan this bp
* changes. If it decreases, we could miss it, so
* scan it right now. (This covers both class changing
* while we are doing ddt_walk(), and when we are
* traversing.)
*/
if (nclass < oclass) {
dsl_scan_ddt_entry(dp->dp_scan,
ddt->ddt_checksum, dde, tx);
}
}
}
static void
ddt_sync_table(ddt_t *ddt, dmu_tx_t *tx, uint64_t txg)
{
spa_t *spa = ddt->ddt_spa;
ddt_entry_t *dde;
void *cookie = NULL;
if (avl_numnodes(&ddt->ddt_tree) == 0)
return;
ASSERT(spa->spa_uberblock.ub_version >= SPA_VERSION_DEDUP);
if (spa->spa_ddt_stat_object == 0) {
spa->spa_ddt_stat_object = zap_create_link(ddt->ddt_os,
DMU_OT_DDT_STATS, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_DDT_STATS, tx);
}
while ((dde = avl_destroy_nodes(&ddt->ddt_tree, &cookie)) != NULL) {
ddt_sync_entry(ddt, dde, tx, txg);
ddt_free(dde);
}
for (enum ddt_type type = 0; type < DDT_TYPES; type++) {
uint64_t add, count = 0;
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
if (ddt_object_exists(ddt, type, class)) {
ddt_object_sync(ddt, type, class, tx);
VERIFY(ddt_object_count(ddt, type, class,
&add) == 0);
count += add;
}
}
for (enum ddt_class class = 0; class < DDT_CLASSES; class++) {
if (count == 0 && ddt_object_exists(ddt, type, class))
ddt_object_destroy(ddt, type, class, tx);
}
}
bcopy(ddt->ddt_histogram, &ddt->ddt_histogram_cache,
sizeof (ddt->ddt_histogram));
spa->spa_dedup_dspace = ~0ULL;
}
void
ddt_sync(spa_t *spa, uint64_t txg)
{
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
dmu_tx_t *tx;
zio_t *rio;
ASSERT(spa_syncing_txg(spa) == txg);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
rio = zio_root(spa, NULL, NULL,
- ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
+ ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SELF_HEAL);
/*
* This function may cause an immediate scan of ddt blocks (see
* the comment above dsl_scan_ddt() for details). We set the
* scan's root zio here so that we can wait for any scan IOs in
* addition to the regular ddt IOs.
*/
ASSERT3P(scn->scn_zio_root, ==, NULL);
scn->scn_zio_root = rio;
for (enum zio_checksum c = 0; c < ZIO_CHECKSUM_FUNCTIONS; c++) {
ddt_t *ddt = spa->spa_ddt[c];
if (ddt == NULL)
continue;
ddt_sync_table(ddt, tx, txg);
ddt_repair_table(ddt, rio);
}
(void) zio_wait(rio);
scn->scn_zio_root = NULL;
dmu_tx_commit(tx);
}
int
ddt_walk(spa_t *spa, ddt_bookmark_t *ddb, ddt_entry_t *dde)
{
do {
do {
do {
ddt_t *ddt = spa->spa_ddt[ddb->ddb_checksum];
int error = ENOENT;
if (ddt_object_exists(ddt, ddb->ddb_type,
ddb->ddb_class)) {
error = ddt_object_walk(ddt,
ddb->ddb_type, ddb->ddb_class,
&ddb->ddb_cursor, dde);
}
dde->dde_type = ddb->ddb_type;
dde->dde_class = ddb->ddb_class;
if (error == 0)
return (0);
if (error != ENOENT)
return (error);
ddb->ddb_cursor = 0;
} while (++ddb->ddb_checksum < ZIO_CHECKSUM_FUNCTIONS);
ddb->ddb_checksum = 0;
} while (++ddb->ddb_type < DDT_TYPES);
ddb->ddb_type = 0;
} while (++ddb->ddb_class < DDT_CLASSES);
return (SET_ERROR(ENOENT));
}
#if defined(_KERNEL) && defined(HAVE_SPL)
module_param(zfs_dedup_prefetch, int, 0644);
MODULE_PARM_DESC(zfs_dedup_prefetch, "Enable prefetching dedup-ed blks");
#endif
diff --git a/module/zfs/dmu.c b/module/zfs/dmu.c
index 64c898198dea..0352393dc276 100644
--- a/module/zfs/dmu.c
+++ b/module/zfs/dmu.c
@@ -1,2543 +1,2667 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2016, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_prop.h>
#include <sys/dmu_zfetch.h>
#include <sys/zfs_ioctl.h>
#include <sys/zap.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/sa.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/trace_dmu.h>
#include <sys/zfs_rlock.h>
#ifdef _KERNEL
#include <sys/vmsystm.h>
#include <sys/zfs_znode.h>
#endif
/*
* Enable/disable nopwrite feature.
*/
int zfs_nopwrite_enabled = 1;
/*
* Tunable to control percentage of dirtied blocks from frees in one TXG.
* After this threshold is crossed, additional dirty blocks from frees
* wait until the next TXG.
* A value of zero will disable this throttle.
*/
unsigned long zfs_per_txg_dirty_frees_percent = 30;
/*
* Enable/disable forcing txg sync when dirty in dmu_offset_next.
*/
int zfs_dmu_offset_next_sync = 0;
+/*
+ * This can be used for testing, to ensure that certain actions happen
+ * while in the middle of a remap (which might otherwise complete too
+ * quickly).
+ */
+int zfs_object_remap_one_indirect_delay_ticks = 0;
+
const dmu_object_type_info_t dmu_ot[DMU_OT_NUMTYPES] = {
{ DMU_BSWAP_UINT8, TRUE, FALSE, "unallocated" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "object directory" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "object array" },
{ DMU_BSWAP_UINT8, TRUE, FALSE, "packed nvlist" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "packed nvlist size" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj header" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "SPA space map header" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "SPA space map" },
{ DMU_BSWAP_UINT64, TRUE, TRUE, "ZIL intent log" },
{ DMU_BSWAP_DNODE, TRUE, TRUE, "DMU dnode" },
{ DMU_BSWAP_OBJSET, TRUE, FALSE, "DMU objset" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "DSL directory" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL directory child map"},
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL dataset snap map" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL props" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "DSL dataset" },
{ DMU_BSWAP_ZNODE, TRUE, FALSE, "ZFS znode" },
{ DMU_BSWAP_OLDACL, TRUE, TRUE, "ZFS V0 ACL" },
{ DMU_BSWAP_UINT8, FALSE, TRUE, "ZFS plain file" },
{ DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS directory" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "ZFS master node" },
{ DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS delete queue" },
{ DMU_BSWAP_UINT8, FALSE, TRUE, "zvol object" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "zvol prop" },
{ DMU_BSWAP_UINT8, FALSE, TRUE, "other uint8[]" },
{ DMU_BSWAP_UINT64, FALSE, TRUE, "other uint64[]" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "other ZAP" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "persistent error log" },
{ DMU_BSWAP_UINT8, TRUE, FALSE, "SPA history" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "SPA history offsets" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "Pool properties" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL permissions" },
{ DMU_BSWAP_ACL, TRUE, TRUE, "ZFS ACL" },
{ DMU_BSWAP_UINT8, TRUE, TRUE, "ZFS SYSACL" },
{ DMU_BSWAP_UINT8, TRUE, TRUE, "FUID table" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "FUID table size" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL dataset next clones"},
{ DMU_BSWAP_ZAP, TRUE, FALSE, "scan work queue" },
{ DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group/project used" },
{ DMU_BSWAP_ZAP, TRUE, TRUE, "ZFS user/group/project quota"},
{ DMU_BSWAP_ZAP, TRUE, FALSE, "snapshot refcount tags"},
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DDT ZAP algorithm" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DDT statistics" },
{ DMU_BSWAP_UINT8, TRUE, TRUE, "System attributes" },
{ DMU_BSWAP_ZAP, TRUE, TRUE, "SA master node" },
{ DMU_BSWAP_ZAP, TRUE, TRUE, "SA attr registration" },
{ DMU_BSWAP_ZAP, TRUE, TRUE, "SA attr layouts" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "scan translations" },
{ DMU_BSWAP_UINT8, FALSE, TRUE, "deduplicated block" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL deadlist map" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "DSL deadlist map hdr" },
{ DMU_BSWAP_ZAP, TRUE, FALSE, "DSL dir clones" },
{ DMU_BSWAP_UINT64, TRUE, FALSE, "bpobj subobj" }
};
const dmu_object_byteswap_info_t dmu_ot_byteswap[DMU_BSWAP_NUMFUNCS] = {
{ byteswap_uint8_array, "uint8" },
{ byteswap_uint16_array, "uint16" },
{ byteswap_uint32_array, "uint32" },
{ byteswap_uint64_array, "uint64" },
{ zap_byteswap, "zap" },
{ dnode_buf_byteswap, "dnode" },
{ dmu_objset_byteswap, "objset" },
{ zfs_znode_byteswap, "znode" },
{ zfs_oldacl_byteswap, "oldacl" },
{ zfs_acl_byteswap, "acl" }
};
int
dmu_buf_hold_noread_by_dnode(dnode_t *dn, uint64_t offset,
void *tag, dmu_buf_t **dbp)
{
uint64_t blkid;
dmu_buf_impl_t *db;
blkid = dbuf_whichblock(dn, 0, offset);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (0);
}
int
dmu_buf_hold_noread(objset_t *os, uint64_t object, uint64_t offset,
void *tag, dmu_buf_t **dbp)
{
dnode_t *dn;
uint64_t blkid;
dmu_buf_impl_t *db;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
blkid = dbuf_whichblock(dn, 0, offset);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold(dn, blkid, tag);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
*dbp = &db->db;
return (err);
}
int
dmu_buf_hold_by_dnode(dnode_t *dn, uint64_t offset,
void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread_by_dnode(dn, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_buf_hold(objset_t *os, uint64_t object, uint64_t offset,
void *tag, dmu_buf_t **dbp, int flags)
{
int err;
int db_flags = DB_RF_CANFAIL;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
err = dmu_buf_hold_noread(os, object, offset, tag, dbp);
if (err == 0) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)(*dbp);
err = dbuf_read(db, NULL, db_flags);
if (err != 0) {
dbuf_rele(db, tag);
*dbp = NULL;
}
}
return (err);
}
int
dmu_bonus_max(void)
{
return (DN_OLD_MAX_BONUSLEN);
}
int
dmu_set_bonus(dmu_buf_t *db_fake, int newsize, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else if (newsize < 0 || newsize > db_fake->db_size) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonuslen(dn, newsize, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
int
dmu_set_bonustype(dmu_buf_t *db_fake, dmu_object_type_t type, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int error;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (!DMU_OT_IS_VALID(type)) {
error = SET_ERROR(EINVAL);
} else if (dn->dn_bonus != db) {
error = SET_ERROR(EINVAL);
} else {
dnode_setbonus_type(dn, type, tx);
error = 0;
}
DB_DNODE_EXIT(db);
return (error);
}
dmu_object_type_t
dmu_get_bonustype(dmu_buf_t *db_fake)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
dmu_object_type_t type;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
type = dn->dn_bonustype;
DB_DNODE_EXIT(db);
return (type);
}
int
dmu_rm_spill(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
dnode_t *dn;
int error;
error = dnode_hold(os, object, FTAG, &dn);
dbuf_rm_spill(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_rm_spill(dn, tx);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (error);
}
/*
* returns ENOENT, EIO, or 0.
*/
int
dmu_bonus_hold_impl(objset_t *os, uint64_t object, void *tag, uint32_t flags,
dmu_buf_t **dbp)
{
dnode_t *dn;
dmu_buf_impl_t *db;
int error;
uint32_t db_flags = DB_RF_MUST_SUCCEED;
if (flags & DMU_READ_NO_PREFETCH)
db_flags |= DB_RF_NOPREFETCH;
if (flags & DMU_READ_NO_DECRYPT)
db_flags |= DB_RF_NO_DECRYPT;
error = dnode_hold(os, object, FTAG, &dn);
if (error)
return (error);
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_bonus == NULL) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_bonus == NULL)
dbuf_create_bonus(dn);
}
db = dn->dn_bonus;
/* as long as the bonus buf is held, the dnode will be held */
if (refcount_add(&db->db_holds, tag) == 1) {
VERIFY(dnode_add_ref(dn, db));
atomic_inc_32(&dn->dn_dbufs_count);
}
/*
* Wait to drop dn_struct_rwlock until after adding the bonus dbuf's
* hold and incrementing the dbuf count to ensure that dnode_move() sees
* a dnode hold for every dbuf.
*/
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
error = dbuf_read(db, NULL, db_flags);
if (error) {
dnode_evict_bonus(dn);
dbuf_rele(db, tag);
*dbp = NULL;
return (error);
}
*dbp = &db->db;
return (0);
}
int
dmu_bonus_hold(objset_t *os, uint64_t obj, void *tag, dmu_buf_t **dbp)
{
return (dmu_bonus_hold_impl(os, obj, tag, DMU_READ_NO_PREFETCH, dbp));
}
/*
* returns ENOENT, EIO, or 0.
*
* This interface will allocate a blank spill dbuf when a spill blk
* doesn't already exist on the dnode.
*
* if you only want to find an already existing spill db, then
* dmu_spill_hold_existing() should be used.
*/
int
dmu_spill_hold_by_dnode(dnode_t *dn, uint32_t flags, void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = NULL;
int err;
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold(dn, DMU_SPILL_BLKID, tag);
if ((flags & DB_RF_HAVESTRUCT) == 0)
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL) {
*dbp = NULL;
return (SET_ERROR(EIO));
}
err = dbuf_read(db, NULL, flags);
if (err == 0)
*dbp = &db->db;
else {
dbuf_rele(db, tag);
*dbp = NULL;
}
return (err);
}
int
dmu_spill_hold_existing(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_SA) {
err = SET_ERROR(EINVAL);
} else {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (!dn->dn_have_spill) {
err = SET_ERROR(ENOENT);
} else {
err = dmu_spill_hold_by_dnode(dn,
DB_RF_HAVESTRUCT | DB_RF_CANFAIL, tag, dbp);
}
rw_exit(&dn->dn_struct_rwlock);
}
DB_DNODE_EXIT(db);
return (err);
}
int
dmu_spill_hold_by_bonus(dmu_buf_t *bonus, void *tag, dmu_buf_t **dbp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)bonus;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_spill_hold_by_dnode(dn, DB_RF_CANFAIL, tag, dbp);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Note: longer-term, we should modify all of the dmu_buf_*() interfaces
* to take a held dnode rather than <os, object> -- the lookup is wasteful,
* and can induce severe lock contention when writing to several files
* whose dnodes are in the same block.
*/
static int
dmu_buf_hold_array_by_dnode(dnode_t *dn, uint64_t offset, uint64_t length,
boolean_t read, void *tag, int *numbufsp, dmu_buf_t ***dbpp, uint32_t flags)
{
dmu_buf_t **dbp;
uint64_t blkid, nblks, i;
uint32_t dbuf_flags;
int err;
zio_t *zio;
ASSERT(length <= DMU_MAX_ACCESS);
/*
* Note: We directly notify the prefetch code of this read, so that
* we can tell it about the multi-block read. dbuf_read() only knows
* about the one block it is accessing.
*/
dbuf_flags = DB_RF_CANFAIL | DB_RF_NEVERWAIT | DB_RF_HAVESTRUCT |
DB_RF_NOPREFETCH;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_datablkshift) {
int blkshift = dn->dn_datablkshift;
nblks = (P2ROUNDUP(offset + length, 1ULL << blkshift) -
P2ALIGN(offset, 1ULL << blkshift)) >> blkshift;
} else {
if (offset + length > dn->dn_datablksz) {
zfs_panic_recover("zfs: accessing past end of object "
"%llx/%llx (size=%u access=%llu+%llu)",
(longlong_t)dn->dn_objset->
os_dsl_dataset->ds_object,
(longlong_t)dn->dn_object, dn->dn_datablksz,
(longlong_t)offset, (longlong_t)length);
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(EIO));
}
nblks = 1;
}
dbp = kmem_zalloc(sizeof (dmu_buf_t *) * nblks, KM_SLEEP);
zio = zio_root(dn->dn_objset->os_spa, NULL, NULL, ZIO_FLAG_CANFAIL);
blkid = dbuf_whichblock(dn, 0, offset);
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = dbuf_hold(dn, blkid + i, tag);
if (db == NULL) {
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_rele_array(dbp, nblks, tag);
zio_nowait(zio);
return (SET_ERROR(EIO));
}
/* initiate async i/o */
if (read)
(void) dbuf_read(db, zio, dbuf_flags);
dbp[i] = &db->db;
}
if ((flags & DMU_READ_NO_PREFETCH) == 0 &&
DNODE_META_IS_CACHEABLE(dn) && length <= zfetch_array_rd_sz) {
dmu_zfetch(&dn->dn_zfetch, blkid, nblks,
read && DNODE_IS_CACHEABLE(dn));
}
rw_exit(&dn->dn_struct_rwlock);
/* wait for async i/o */
err = zio_wait(zio);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
/* wait for other io to complete */
if (read) {
for (i = 0; i < nblks; i++) {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)dbp[i];
mutex_enter(&db->db_mtx);
while (db->db_state == DB_READ ||
db->db_state == DB_FILL)
cv_wait(&db->db_changed, &db->db_mtx);
if (db->db_state == DB_UNCACHED)
err = SET_ERROR(EIO);
mutex_exit(&db->db_mtx);
if (err) {
dmu_buf_rele_array(dbp, nblks, tag);
return (err);
}
}
}
*numbufsp = nblks;
*dbpp = dbp;
return (0);
}
static int
dmu_buf_hold_array(objset_t *os, uint64_t object, uint64_t offset,
uint64_t length, int read, void *tag, int *numbufsp, dmu_buf_t ***dbpp)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_buf_hold_array_by_bonus(dmu_buf_t *db_fake, uint64_t offset,
uint64_t length, boolean_t read, void *tag, int *numbufsp,
dmu_buf_t ***dbpp)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
int err;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_buf_hold_array_by_dnode(dn, offset, length, read, tag,
numbufsp, dbpp, DMU_READ_PREFETCH);
DB_DNODE_EXIT(db);
return (err);
}
void
dmu_buf_rele_array(dmu_buf_t **dbp_fake, int numbufs, void *tag)
{
int i;
dmu_buf_impl_t **dbp = (dmu_buf_impl_t **)dbp_fake;
if (numbufs == 0)
return;
for (i = 0; i < numbufs; i++) {
if (dbp[i])
dbuf_rele(dbp[i], tag);
}
kmem_free(dbp, sizeof (dmu_buf_t *) * numbufs);
}
/*
* Issue prefetch i/os for the given blocks. If level is greater than 0, the
* indirect blocks prefeteched will be those that point to the blocks containing
* the data starting at offset, and continuing to offset + len.
*
* Note that if the indirect blocks above the blocks being prefetched are not
* in cache, they will be asychronously read in.
*/
void
dmu_prefetch(objset_t *os, uint64_t object, int64_t level, uint64_t offset,
uint64_t len, zio_priority_t pri)
{
dnode_t *dn;
uint64_t blkid;
int nblks, err;
if (len == 0) { /* they're interested in the bonus buffer */
dn = DMU_META_DNODE(os);
if (object == 0 || object >= DN_MAX_OBJECT)
return;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, level,
object * sizeof (dnode_phys_t));
dbuf_prefetch(dn, level, blkid, pri, 0);
rw_exit(&dn->dn_struct_rwlock);
return;
}
/*
* XXX - Note, if the dnode for the requested object is not
* already cached, we will do a *synchronous* read in the
* dnode_hold() call. The same is true for any indirects.
*/
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
/*
* offset + len - 1 is the last byte we want to prefetch for, and offset
* is the first. Then dbuf_whichblk(dn, level, off + len - 1) is the
* last block we want to prefetch, and dbuf_whichblock(dn, level,
* offset) is the first. Then the number we need to prefetch is the
* last - first + 1.
*/
if (level > 0 || dn->dn_datablkshift != 0) {
nblks = dbuf_whichblock(dn, level, offset + len - 1) -
dbuf_whichblock(dn, level, offset) + 1;
} else {
nblks = (offset < dn->dn_datablksz);
}
if (nblks != 0) {
blkid = dbuf_whichblock(dn, level, offset);
for (int i = 0; i < nblks; i++)
dbuf_prefetch(dn, level, blkid + i, pri, 0);
}
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
}
/*
* Get the next "chunk" of file data to free. We traverse the file from
* the end so that the file gets shorter over time (if we crashes in the
* middle, this will leave us in a better state). We find allocated file
* data by simply searching the allocated level 1 indirects.
*
* On input, *start should be the first offset that does not need to be
* freed (e.g. "offset + length"). On return, *start will be the first
* offset that should be freed.
*/
static int
get_next_chunk(dnode_t *dn, uint64_t *start, uint64_t minimum)
{
uint64_t maxblks = DMU_MAX_ACCESS >> (dn->dn_indblkshift + 1);
/* bytes of data covered by a level-1 indirect block */
uint64_t iblkrange =
dn->dn_datablksz * EPB(dn->dn_indblkshift, SPA_BLKPTRSHIFT);
ASSERT3U(minimum, <=, *start);
if (*start - minimum <= iblkrange * maxblks) {
*start = minimum;
return (0);
}
ASSERT(ISP2(iblkrange));
for (uint64_t blks = 0; *start > minimum && blks < maxblks; blks++) {
int err;
/*
* dnode_next_offset(BACKWARDS) will find an allocated L1
* indirect block at or before the input offset. We must
* decrement *start so that it is at the end of the region
* to search.
*/
(*start)--;
err = dnode_next_offset(dn,
DNODE_FIND_BACKWARDS, start, 2, 1, 0);
/* if there are no indirect blocks before start, we are done */
if (err == ESRCH) {
*start = minimum;
break;
} else if (err != 0) {
return (err);
}
/* set start to the beginning of this L1 indirect */
*start = P2ALIGN(*start, iblkrange);
}
if (*start < minimum)
*start = minimum;
return (0);
}
/*
* If this objset is of type OST_ZFS return true if vfs's unmounted flag is set,
* otherwise return false.
* Used below in dmu_free_long_range_impl() to enable abort when unmounting
*/
/*ARGSUSED*/
static boolean_t
dmu_objset_zfs_unmounting(objset_t *os)
{
#ifdef _KERNEL
if (dmu_objset_type(os) == DMU_OST_ZFS)
return (zfs_get_vfs_flag_unmounted(os));
#endif
return (B_FALSE);
}
static int
dmu_free_long_range_impl(objset_t *os, dnode_t *dn, uint64_t offset,
uint64_t length, boolean_t raw)
{
uint64_t object_size;
int err;
uint64_t dirty_frees_threshold;
dsl_pool_t *dp = dmu_objset_pool(os);
if (dn == NULL)
return (SET_ERROR(EINVAL));
object_size = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
if (offset >= object_size)
return (0);
if (zfs_per_txg_dirty_frees_percent <= 100)
dirty_frees_threshold =
zfs_per_txg_dirty_frees_percent * zfs_dirty_data_max / 100;
else
dirty_frees_threshold = zfs_dirty_data_max / 4;
if (length == DMU_OBJECT_END || offset + length > object_size)
length = object_size - offset;
while (length != 0) {
uint64_t chunk_end, chunk_begin, chunk_len;
uint64_t long_free_dirty_all_txgs = 0;
dmu_tx_t *tx;
if (dmu_objset_zfs_unmounting(dn->dn_objset))
return (SET_ERROR(EINTR));
chunk_end = chunk_begin = offset + length;
/* move chunk_begin backwards to the beginning of this chunk */
err = get_next_chunk(dn, &chunk_begin, offset);
if (err)
return (err);
ASSERT3U(chunk_begin, >=, offset);
ASSERT3U(chunk_begin, <=, chunk_end);
chunk_len = chunk_end - chunk_begin;
mutex_enter(&dp->dp_lock);
for (int t = 0; t < TXG_SIZE; t++) {
long_free_dirty_all_txgs +=
dp->dp_long_free_dirty_pertxg[t];
}
mutex_exit(&dp->dp_lock);
/*
* To avoid filling up a TXG with just frees wait for
* the next TXG to open before freeing more chunks if
* we have reached the threshold of frees
*/
if (dirty_frees_threshold != 0 &&
long_free_dirty_all_txgs >= dirty_frees_threshold) {
txg_wait_open(dp, 0);
continue;
}
tx = dmu_tx_create(os);
dmu_tx_hold_free(tx, dn->dn_object, chunk_begin, chunk_len);
/*
* Mark this transaction as typically resulting in a net
* reduction in space used.
*/
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err) {
dmu_tx_abort(tx);
return (err);
}
mutex_enter(&dp->dp_lock);
dp->dp_long_free_dirty_pertxg[dmu_tx_get_txg(tx) & TXG_MASK] +=
chunk_len;
mutex_exit(&dp->dp_lock);
DTRACE_PROBE3(free__long__range,
uint64_t, long_free_dirty_all_txgs, uint64_t, chunk_len,
uint64_t, dmu_tx_get_txg(tx));
dnode_free_range(dn, chunk_begin, chunk_len, tx);
/* if this is a raw free, mark the dirty record as such */
if (raw) {
dbuf_dirty_record_t *dr = dn->dn_dbuf->db_last_dirty;
while (dr != NULL && dr->dr_txg > tx->tx_txg)
dr = dr->dr_next;
if (dr != NULL && dr->dr_txg == tx->tx_txg) {
dr->dt.dl.dr_raw = B_TRUE;
dn->dn_objset->os_next_write_raw
[tx->tx_txg & TXG_MASK] = B_TRUE;
}
}
dmu_tx_commit(tx);
length -= chunk_len;
}
return (0);
}
int
dmu_free_long_range(objset_t *os, uint64_t object,
uint64_t offset, uint64_t length)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_free_long_range_impl(os, dn, offset, length, B_FALSE);
/*
* It is important to zero out the maxblkid when freeing the entire
* file, so that (a) subsequent calls to dmu_free_long_range_impl()
* will take the fast path, and (b) dnode_reallocate() can verify
* that the entire file has been freed.
*/
if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
dn->dn_maxblkid = 0;
dnode_rele(dn, FTAG);
return (err);
}
/*
* This function is equivalent to dmu_free_long_range(), but also
* marks the new dirty record as a raw write.
*/
int
dmu_free_long_range_raw(objset_t *os, uint64_t object,
uint64_t offset, uint64_t length)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_free_long_range_impl(os, dn, offset, length, B_TRUE);
/*
* It is important to zero out the maxblkid when freeing the entire
* file, so that (a) subsequent calls to dmu_free_long_range_impl()
* will take the fast path, and (b) dnode_reallocate() can verify
* that the entire file has been freed.
*/
if (err == 0 && offset == 0 && length == DMU_OBJECT_END)
dn->dn_maxblkid = 0;
dnode_rele(dn, FTAG);
return (err);
}
static int
dmu_free_long_object_impl(objset_t *os, uint64_t object, boolean_t raw)
{
dmu_tx_t *tx;
int err;
err = dmu_free_long_range(os, object, 0, DMU_OBJECT_END);
if (err != 0)
return (err);
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, object);
dmu_tx_hold_free(tx, object, 0, DMU_OBJECT_END);
dmu_tx_mark_netfree(tx);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err == 0) {
if (raw)
err = dmu_object_dirty_raw(os, object, tx);
if (err == 0)
err = dmu_object_free(os, object, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
return (err);
}
int
dmu_free_long_object(objset_t *os, uint64_t object)
{
return (dmu_free_long_object_impl(os, object, B_FALSE));
}
int
dmu_free_long_object_raw(objset_t *os, uint64_t object)
{
return (dmu_free_long_object_impl(os, object, B_TRUE));
}
int
dmu_free_range(objset_t *os, uint64_t object, uint64_t offset,
uint64_t size, dmu_tx_t *tx)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
ASSERT(offset < UINT64_MAX);
ASSERT(size == DMU_OBJECT_END || size <= UINT64_MAX - offset);
dnode_free_range(dn, offset, size, tx);
dnode_rele(dn, FTAG);
return (0);
}
static int
dmu_read_impl(dnode_t *dn, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dmu_buf_t **dbp;
int numbufs, err = 0;
/*
* Deal with odd block sizes, where there can't be data past the first
* block. If we ever do the tail block optimization, we will need to
* handle that here as well.
*/
if (dn->dn_maxblkid == 0) {
uint64_t newsz = offset > dn->dn_datablksz ? 0 :
MIN(size, dn->dn_datablksz - offset);
bzero((char *)buf + newsz, size - newsz);
size = newsz;
}
while (size > 0) {
uint64_t mylen = MIN(size, DMU_MAX_ACCESS / 2);
int i;
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, offset, mylen,
TRUE, FTAG, &numbufs, &dbp, flags);
if (err)
break;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
(void) memcpy(buf, (char *)db->db_data + bufoff, tocpy);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
return (err);
}
int
dmu_read(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
void *buf, uint32_t flags)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0)
return (err);
err = dmu_read_impl(dn, offset, size, buf, flags);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_read_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size, void *buf,
uint32_t flags)
{
return (dmu_read_impl(dn, offset, size, buf, flags));
}
static void
dmu_write_impl(dmu_buf_t **dbp, int numbufs, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
int i;
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = offset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
dmu_buf_will_fill(db, tx);
else
dmu_buf_will_dirty(db, tx);
(void) memcpy((char *)db->db_data + bufoff, buf, tocpy);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
offset += tocpy;
size -= tocpy;
buf = (char *)buf + tocpy;
}
}
void
dmu_write(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_write_by_dnode(dnode_t *dn, uint64_t offset, uint64_t size,
const void *buf, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
if (size == 0)
return;
VERIFY0(dmu_buf_hold_array_by_dnode(dn, offset, size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH));
dmu_write_impl(dbp, numbufs, offset, size, buf, tx);
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
+static int
+dmu_object_remap_one_indirect(objset_t *os, dnode_t *dn,
+ uint64_t last_removal_txg, uint64_t offset)
+{
+ uint64_t l1blkid = dbuf_whichblock(dn, 1, offset);
+ int err = 0;
+
+ rw_enter(&dn->dn_struct_rwlock, RW_READER);
+ dmu_buf_impl_t *dbuf = dbuf_hold_level(dn, 1, l1blkid, FTAG);
+ ASSERT3P(dbuf, !=, NULL);
+
+ /*
+ * If the block hasn't been written yet, this default will ensure
+ * we don't try to remap it.
+ */
+ uint64_t birth = UINT64_MAX;
+ ASSERT3U(last_removal_txg, !=, UINT64_MAX);
+ if (dbuf->db_blkptr != NULL)
+ birth = dbuf->db_blkptr->blk_birth;
+ rw_exit(&dn->dn_struct_rwlock);
+
+ /*
+ * If this L1 was already written after the last removal, then we've
+ * already tried to remap it.
+ */
+ if (birth <= last_removal_txg &&
+ dbuf_read(dbuf, NULL, DB_RF_MUST_SUCCEED) == 0 &&
+ dbuf_can_remap(dbuf)) {
+ dmu_tx_t *tx = dmu_tx_create(os);
+ dmu_tx_hold_remap_l1indirect(tx, dn->dn_object);
+ err = dmu_tx_assign(tx, TXG_WAIT);
+ if (err == 0) {
+ (void) dbuf_dirty(dbuf, tx);
+ dmu_tx_commit(tx);
+ } else {
+ dmu_tx_abort(tx);
+ }
+ }
+
+ dbuf_rele(dbuf, FTAG);
+
+ delay(zfs_object_remap_one_indirect_delay_ticks);
+
+ return (err);
+}
+
+/*
+ * Remap all blockpointers in the object, if possible, so that they reference
+ * only concrete vdevs.
+ *
+ * To do this, iterate over the L0 blockpointers and remap any that reference
+ * an indirect vdev. Note that we only examine L0 blockpointers; since we
+ * cannot guarantee that we can remap all blockpointer anyways (due to split
+ * blocks), we do not want to make the code unnecessarily complicated to
+ * catch the unlikely case that there is an L1 block on an indirect vdev that
+ * contains no indirect blockpointers.
+ */
+int
+dmu_object_remap_indirects(objset_t *os, uint64_t object,
+ uint64_t last_removal_txg)
+{
+ uint64_t offset, l1span;
+ int err;
+ dnode_t *dn;
+
+ err = dnode_hold(os, object, FTAG, &dn);
+ if (err != 0) {
+ return (err);
+ }
+
+ if (dn->dn_nlevels <= 1) {
+ if (issig(JUSTLOOKING) && issig(FORREAL)) {
+ err = SET_ERROR(EINTR);
+ }
+
+ /*
+ * If the dnode has no indirect blocks, we cannot dirty them.
+ * We still want to remap the blkptr(s) in the dnode if
+ * appropriate, so mark it as dirty.
+ */
+ if (err == 0 && dnode_needs_remap(dn)) {
+ dmu_tx_t *tx = dmu_tx_create(os);
+ dmu_tx_hold_bonus(tx, dn->dn_object);
+ if ((err = dmu_tx_assign(tx, TXG_WAIT)) == 0) {
+ dnode_setdirty(dn, tx);
+ dmu_tx_commit(tx);
+ } else {
+ dmu_tx_abort(tx);
+ }
+ }
+
+ dnode_rele(dn, FTAG);
+ return (err);
+ }
+
+ offset = 0;
+ l1span = 1ULL << (dn->dn_indblkshift - SPA_BLKPTRSHIFT +
+ dn->dn_datablkshift);
+ /*
+ * Find the next L1 indirect that is not a hole.
+ */
+ while (dnode_next_offset(dn, 0, &offset, 2, 1, 0) == 0) {
+ if (issig(JUSTLOOKING) && issig(FORREAL)) {
+ err = SET_ERROR(EINTR);
+ break;
+ }
+ if ((err = dmu_object_remap_one_indirect(os, dn,
+ last_removal_txg, offset)) != 0) {
+ break;
+ }
+ offset += l1span;
+ }
+
+ dnode_rele(dn, FTAG);
+ return (err);
+}
+
void
dmu_prealloc(objset_t *os, uint64_t object, uint64_t offset, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs, i;
if (size == 0)
return;
VERIFY(0 == dmu_buf_hold_array(os, object, offset, size,
FALSE, FTAG, &numbufs, &dbp));
for (i = 0; i < numbufs; i++) {
dmu_buf_t *db = dbp[i];
dmu_buf_will_not_fill(db, tx);
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
}
void
dmu_write_embedded(objset_t *os, uint64_t object, uint64_t offset,
void *data, uint8_t etype, uint8_t comp, int uncompressed_size,
int compressed_size, int byteorder, dmu_tx_t *tx)
{
dmu_buf_t *db;
ASSERT3U(etype, <, NUM_BP_EMBEDDED_TYPES);
ASSERT3U(comp, <, ZIO_COMPRESS_FUNCTIONS);
VERIFY0(dmu_buf_hold_noread(os, object, offset,
FTAG, &db));
dmu_buf_write_embedded(db,
data, (bp_embedded_type_t)etype, (enum zio_compress)comp,
uncompressed_size, compressed_size, byteorder, tx);
dmu_buf_rele(db, FTAG);
}
/*
* DMU support for xuio
*/
kstat_t *xuio_ksp = NULL;
typedef struct xuio_stats {
/* loaned yet not returned arc_buf */
kstat_named_t xuiostat_onloan_rbuf;
kstat_named_t xuiostat_onloan_wbuf;
/* whether a copy is made when loaning out a read buffer */
kstat_named_t xuiostat_rbuf_copied;
kstat_named_t xuiostat_rbuf_nocopy;
/* whether a copy is made when assigning a write buffer */
kstat_named_t xuiostat_wbuf_copied;
kstat_named_t xuiostat_wbuf_nocopy;
} xuio_stats_t;
static xuio_stats_t xuio_stats = {
{ "onloan_read_buf", KSTAT_DATA_UINT64 },
{ "onloan_write_buf", KSTAT_DATA_UINT64 },
{ "read_buf_copied", KSTAT_DATA_UINT64 },
{ "read_buf_nocopy", KSTAT_DATA_UINT64 },
{ "write_buf_copied", KSTAT_DATA_UINT64 },
{ "write_buf_nocopy", KSTAT_DATA_UINT64 }
};
#define XUIOSTAT_INCR(stat, val) \
atomic_add_64(&xuio_stats.stat.value.ui64, (val))
#define XUIOSTAT_BUMP(stat) XUIOSTAT_INCR(stat, 1)
#ifdef HAVE_UIO_ZEROCOPY
int
dmu_xuio_init(xuio_t *xuio, int nblk)
{
dmu_xuio_t *priv;
uio_t *uio = &xuio->xu_uio;
uio->uio_iovcnt = nblk;
uio->uio_iov = kmem_zalloc(nblk * sizeof (iovec_t), KM_SLEEP);
priv = kmem_zalloc(sizeof (dmu_xuio_t), KM_SLEEP);
priv->cnt = nblk;
priv->bufs = kmem_zalloc(nblk * sizeof (arc_buf_t *), KM_SLEEP);
priv->iovp = (iovec_t *)uio->uio_iov;
XUIO_XUZC_PRIV(xuio) = priv;
if (XUIO_XUZC_RW(xuio) == UIO_READ)
XUIOSTAT_INCR(xuiostat_onloan_rbuf, nblk);
else
XUIOSTAT_INCR(xuiostat_onloan_wbuf, nblk);
return (0);
}
void
dmu_xuio_fini(xuio_t *xuio)
{
dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
int nblk = priv->cnt;
kmem_free(priv->iovp, nblk * sizeof (iovec_t));
kmem_free(priv->bufs, nblk * sizeof (arc_buf_t *));
kmem_free(priv, sizeof (dmu_xuio_t));
if (XUIO_XUZC_RW(xuio) == UIO_READ)
XUIOSTAT_INCR(xuiostat_onloan_rbuf, -nblk);
else
XUIOSTAT_INCR(xuiostat_onloan_wbuf, -nblk);
}
/*
* Initialize iov[priv->next] and priv->bufs[priv->next] with { off, n, abuf }
* and increase priv->next by 1.
*/
int
dmu_xuio_add(xuio_t *xuio, arc_buf_t *abuf, offset_t off, size_t n)
{
struct iovec *iov;
uio_t *uio = &xuio->xu_uio;
dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
int i = priv->next++;
ASSERT(i < priv->cnt);
ASSERT(off + n <= arc_buf_lsize(abuf));
iov = (iovec_t *)uio->uio_iov + i;
iov->iov_base = (char *)abuf->b_data + off;
iov->iov_len = n;
priv->bufs[i] = abuf;
return (0);
}
int
dmu_xuio_cnt(xuio_t *xuio)
{
dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
return (priv->cnt);
}
arc_buf_t *
dmu_xuio_arcbuf(xuio_t *xuio, int i)
{
dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
ASSERT(i < priv->cnt);
return (priv->bufs[i]);
}
void
dmu_xuio_clear(xuio_t *xuio, int i)
{
dmu_xuio_t *priv = XUIO_XUZC_PRIV(xuio);
ASSERT(i < priv->cnt);
priv->bufs[i] = NULL;
}
#endif /* HAVE_UIO_ZEROCOPY */
static void
xuio_stat_init(void)
{
xuio_ksp = kstat_create("zfs", 0, "xuio_stats", "misc",
KSTAT_TYPE_NAMED, sizeof (xuio_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (xuio_ksp != NULL) {
xuio_ksp->ks_data = &xuio_stats;
kstat_install(xuio_ksp);
}
}
static void
xuio_stat_fini(void)
{
if (xuio_ksp != NULL) {
kstat_delete(xuio_ksp);
xuio_ksp = NULL;
}
}
void
xuio_stat_wbuf_copied(void)
{
XUIOSTAT_BUMP(xuiostat_wbuf_copied);
}
void
xuio_stat_wbuf_nocopy(void)
{
XUIOSTAT_BUMP(xuiostat_wbuf_nocopy);
}
#ifdef _KERNEL
int
dmu_read_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size)
{
dmu_buf_t **dbp;
int numbufs, i, err;
#ifdef HAVE_UIO_ZEROCOPY
xuio_t *xuio = NULL;
#endif
/*
* NB: we could do this block-at-a-time, but it's nice
* to be reading in parallel.
*/
err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
TRUE, FTAG, &numbufs, &dbp, 0);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = uio->uio_loffset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
#ifdef HAVE_UIO_ZEROCOPY
if (xuio) {
dmu_buf_impl_t *dbi = (dmu_buf_impl_t *)db;
arc_buf_t *dbuf_abuf = dbi->db_buf;
arc_buf_t *abuf = dbuf_loan_arcbuf(dbi);
err = dmu_xuio_add(xuio, abuf, bufoff, tocpy);
if (!err) {
uio->uio_resid -= tocpy;
uio->uio_loffset += tocpy;
}
if (abuf == dbuf_abuf)
XUIOSTAT_BUMP(xuiostat_rbuf_nocopy);
else
XUIOSTAT_BUMP(xuiostat_rbuf_copied);
} else
#endif
err = uiomove((char *)db->db_data + bufoff, tocpy,
UIO_READ, uio);
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From object zdb->db_object.
* Starting at offset uio->uio_loffset.
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_read_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_read_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_read_uio_dnode(dn, uio, size);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Read 'size' bytes into the uio buffer.
* From the specified object
* Starting at offset uio->uio_loffset.
*/
int
dmu_read_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_read_uio_dnode(dn, uio, size);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_write_uio_dnode(dnode_t *dn, uio_t *uio, uint64_t size, dmu_tx_t *tx)
{
dmu_buf_t **dbp;
int numbufs;
int err = 0;
int i;
err = dmu_buf_hold_array_by_dnode(dn, uio->uio_loffset, size,
FALSE, FTAG, &numbufs, &dbp, DMU_READ_PREFETCH);
if (err)
return (err);
for (i = 0; i < numbufs; i++) {
uint64_t tocpy;
int64_t bufoff;
dmu_buf_t *db = dbp[i];
ASSERT(size > 0);
bufoff = uio->uio_loffset - db->db_offset;
tocpy = MIN(db->db_size - bufoff, size);
ASSERT(i == 0 || i == numbufs-1 || tocpy == db->db_size);
if (tocpy == db->db_size)
dmu_buf_will_fill(db, tx);
else
dmu_buf_will_dirty(db, tx);
/*
* XXX uiomove could block forever (eg.nfs-backed
* pages). There needs to be a uiolockdown() function
* to lock the pages in memory, so that uiomove won't
* block.
*/
err = uiomove((char *)db->db_data + bufoff, tocpy,
UIO_WRITE, uio);
if (tocpy == db->db_size)
dmu_buf_fill_done(db, tx);
if (err)
break;
size -= tocpy;
}
dmu_buf_rele_array(dbp, numbufs, FTAG);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To object zdb->db_object.
* Starting at offset uio->uio_loffset.
*
* If the caller already has a dbuf in the target object
* (e.g. its bonus buffer), this routine is faster than dmu_write_uio(),
* because we don't have to find the dnode_t for the object.
*/
int
dmu_write_uio_dbuf(dmu_buf_t *zdb, uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zdb;
dnode_t *dn;
int err;
if (size == 0)
return (0);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
err = dmu_write_uio_dnode(dn, uio, size, tx);
DB_DNODE_EXIT(db);
return (err);
}
/*
* Write 'size' bytes from the uio buffer.
* To the specified object.
* Starting at offset uio->uio_loffset.
*/
int
dmu_write_uio(objset_t *os, uint64_t object, uio_t *uio, uint64_t size,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
if (size == 0)
return (0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dmu_write_uio_dnode(dn, uio, size, tx);
dnode_rele(dn, FTAG);
return (err);
}
#endif /* _KERNEL */
/*
* Allocate a loaned anonymous arc buffer.
*/
arc_buf_t *
dmu_request_arcbuf(dmu_buf_t *handle, int size)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)handle;
return (arc_loan_buf(db->db_objset->os_spa, B_FALSE, size));
}
/*
* Free a loaned arc buffer.
*/
void
dmu_return_arcbuf(arc_buf_t *buf)
{
arc_return_buf(buf, FTAG);
arc_buf_destroy(buf, FTAG);
}
int
dmu_convert_mdn_block_to_raw(objset_t *os, uint64_t firstobj,
boolean_t byteorder, const uint8_t *salt, const uint8_t *iv,
const uint8_t *mac, dmu_tx_t *tx)
{
int ret;
dmu_buf_t *handle = NULL;
dmu_buf_impl_t *db = NULL;
uint64_t offset = firstobj * DNODE_MIN_SIZE;
uint64_t dsobj = dmu_objset_id(os);
ret = dmu_buf_hold_by_dnode(DMU_META_DNODE(os), offset, FTAG, &handle,
DMU_READ_PREFETCH | DMU_READ_NO_DECRYPT);
if (ret != 0)
return (ret);
dmu_buf_will_change_crypt_params(handle, tx);
db = (dmu_buf_impl_t *)handle;
ASSERT3P(db->db_buf, !=, NULL);
ASSERT3U(dsobj, !=, 0);
/*
* This technically violates the assumption the dmu code makes
* that dnode blocks are only released in syncing context.
*/
(void) arc_release(db->db_buf, db);
arc_convert_to_raw(db->db_buf, dsobj, byteorder, DMU_OT_DNODE,
salt, iv, mac);
dmu_buf_rele(handle, FTAG);
return (0);
}
void
dmu_copy_from_buf(objset_t *os, uint64_t object, uint64_t offset,
dmu_buf_t *handle, dmu_tx_t *tx)
{
dmu_buf_t *dst_handle;
dmu_buf_impl_t *dstdb;
dmu_buf_impl_t *srcdb = (dmu_buf_impl_t *)handle;
arc_buf_t *abuf;
uint64_t datalen;
boolean_t byteorder;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
ASSERT3P(srcdb->db_buf, !=, NULL);
/* hold the db that we want to write to */
VERIFY0(dmu_buf_hold(os, object, offset, FTAG, &dst_handle,
DMU_READ_NO_DECRYPT));
dstdb = (dmu_buf_impl_t *)dst_handle;
datalen = arc_buf_size(srcdb->db_buf);
/* allocated an arc buffer that matches the type of srcdb->db_buf */
if (arc_is_encrypted(srcdb->db_buf)) {
arc_get_raw_params(srcdb->db_buf, &byteorder, salt, iv, mac);
abuf = arc_loan_raw_buf(os->os_spa, dmu_objset_id(os),
byteorder, salt, iv, mac, DB_DNODE(dstdb)->dn_type,
datalen, arc_buf_lsize(srcdb->db_buf),
arc_get_compression(srcdb->db_buf));
} else {
/* we won't get a compressed db back from dmu_buf_hold() */
ASSERT3U(arc_get_compression(srcdb->db_buf),
==, ZIO_COMPRESS_OFF);
abuf = arc_loan_buf(os->os_spa,
DMU_OT_IS_METADATA(DB_DNODE(dstdb)->dn_type), datalen);
}
ASSERT3U(datalen, ==, arc_buf_size(abuf));
/* copy the data to the new buffer and assign it to the dstdb */
bcopy(srcdb->db_buf->b_data, abuf->b_data, datalen);
dbuf_assign_arcbuf(dstdb, abuf, tx);
dmu_buf_rele(dst_handle, FTAG);
}
/*
* When possible directly assign passed loaned arc buffer to a dbuf.
* If this is not possible copy the contents of passed arc buf via
* dmu_write().
*/
void
dmu_assign_arcbuf_by_dnode(dnode_t *dn, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
objset_t *os = dn->dn_objset;
uint64_t object = dn->dn_object;
uint32_t blksz = (uint32_t)arc_buf_lsize(buf);
uint64_t blkid;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
blkid = dbuf_whichblock(dn, 0, offset);
VERIFY((db = dbuf_hold(dn, blkid, FTAG)) != NULL);
rw_exit(&dn->dn_struct_rwlock);
/*
* We can only assign if the offset is aligned, the arc buf is the
* same size as the dbuf, and the dbuf is not metadata.
*/
if (offset == db->db.db_offset && blksz == db->db.db_size) {
dbuf_assign_arcbuf(db, buf, tx);
dbuf_rele(db, FTAG);
} else {
/* compressed bufs must always be assignable to their dbuf */
ASSERT3U(arc_get_compression(buf), ==, ZIO_COMPRESS_OFF);
ASSERT(!(buf->b_flags & ARC_BUF_FLAG_COMPRESSED));
dbuf_rele(db, FTAG);
dmu_write(os, object, offset, blksz, buf->b_data, tx);
dmu_return_arcbuf(buf);
XUIOSTAT_BUMP(xuiostat_wbuf_copied);
}
}
void
dmu_assign_arcbuf_by_dbuf(dmu_buf_t *handle, uint64_t offset, arc_buf_t *buf,
dmu_tx_t *tx)
{
dmu_buf_impl_t *dbuf = (dmu_buf_impl_t *)handle;
DB_DNODE_ENTER(dbuf);
dmu_assign_arcbuf_by_dnode(DB_DNODE(dbuf), offset, buf, tx);
DB_DNODE_EXIT(dbuf);
}
typedef struct {
dbuf_dirty_record_t *dsa_dr;
dmu_sync_cb_t *dsa_done;
zgd_t *dsa_zgd;
dmu_tx_t *dsa_tx;
} dmu_sync_arg_t;
/* ARGSUSED */
static void
dmu_sync_ready(zio_t *zio, arc_buf_t *buf, void *varg)
{
dmu_sync_arg_t *dsa = varg;
dmu_buf_t *db = dsa->dsa_zgd->zgd_db;
blkptr_t *bp = zio->io_bp;
if (zio->io_error == 0) {
if (BP_IS_HOLE(bp)) {
/*
* A block of zeros may compress to a hole, but the
* block size still needs to be known for replay.
*/
BP_SET_LSIZE(bp, db->db_size);
} else if (!BP_IS_EMBEDDED(bp)) {
ASSERT(BP_GET_LEVEL(bp) == 0);
BP_SET_FILL(bp, 1);
}
}
}
static void
dmu_sync_late_arrival_ready(zio_t *zio)
{
dmu_sync_ready(zio, NULL, zio->io_private);
}
/* ARGSUSED */
static void
dmu_sync_done(zio_t *zio, arc_buf_t *buf, void *varg)
{
dmu_sync_arg_t *dsa = varg;
dbuf_dirty_record_t *dr = dsa->dsa_dr;
dmu_buf_impl_t *db = dr->dr_dbuf;
mutex_enter(&db->db_mtx);
ASSERT(dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC);
if (zio->io_error == 0) {
dr->dt.dl.dr_nopwrite = !!(zio->io_flags & ZIO_FLAG_NOPWRITE);
if (dr->dt.dl.dr_nopwrite) {
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
uint8_t chksum = BP_GET_CHECKSUM(bp_orig);
ASSERT(BP_EQUAL(bp, bp_orig));
VERIFY(BP_EQUAL(bp, db->db_blkptr));
ASSERT(zio->io_prop.zp_compress != ZIO_COMPRESS_OFF);
VERIFY(zio_checksum_table[chksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
}
dr->dt.dl.dr_overridden_by = *zio->io_bp;
dr->dt.dl.dr_override_state = DR_OVERRIDDEN;
dr->dt.dl.dr_copies = zio->io_prop.zp_copies;
/*
* Old style holes are filled with all zeros, whereas
* new-style holes maintain their lsize, type, level,
* and birth time (see zio_write_compress). While we
* need to reset the BP_SET_LSIZE() call that happened
* in dmu_sync_ready for old style holes, we do *not*
* want to wipe out the information contained in new
* style holes. Thus, only zero out the block pointer if
* it's an old style hole.
*/
if (BP_IS_HOLE(&dr->dt.dl.dr_overridden_by) &&
dr->dt.dl.dr_overridden_by.blk_birth == 0)
BP_ZERO(&dr->dt.dl.dr_overridden_by);
} else {
dr->dt.dl.dr_override_state = DR_NOT_OVERRIDDEN;
}
cv_broadcast(&db->db_changed);
mutex_exit(&db->db_mtx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
kmem_free(dsa, sizeof (*dsa));
}
static void
dmu_sync_late_arrival_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
dmu_sync_arg_t *dsa = zio->io_private;
ASSERTV(blkptr_t *bp_orig = &zio->io_bp_orig);
if (zio->io_error == 0 && !BP_IS_HOLE(bp)) {
ASSERT(!(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(BP_IS_HOLE(bp_orig) || !BP_EQUAL(bp, bp_orig));
ASSERT(zio->io_bp->blk_birth == zio->io_txg);
ASSERT(zio->io_txg > spa_syncing_txg(zio->io_spa));
zio_free(zio->io_spa, zio->io_txg, zio->io_bp);
}
dmu_tx_commit(dsa->dsa_tx);
dsa->dsa_done(dsa->dsa_zgd, zio->io_error);
abd_put(zio->io_abd);
kmem_free(dsa, sizeof (*dsa));
}
static int
dmu_sync_late_arrival(zio_t *pio, objset_t *os, dmu_sync_cb_t *done, zgd_t *zgd,
zio_prop_t *zp, zbookmark_phys_t *zb)
{
dmu_sync_arg_t *dsa;
dmu_tx_t *tx;
tx = dmu_tx_create(os);
dmu_tx_hold_space(tx, zgd->zgd_db->db_size);
if (dmu_tx_assign(tx, TXG_WAIT) != 0) {
dmu_tx_abort(tx);
/* Make zl_get_data do txg_waited_synced() */
return (SET_ERROR(EIO));
}
/*
* In order to prevent the zgd's lwb from being free'd prior to
* dmu_sync_late_arrival_done() being called, we have to ensure
* the lwb's "max txg" takes this tx's txg into account.
*/
zil_lwb_add_txg(zgd->zgd_lwb, dmu_tx_get_txg(tx));
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = NULL;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = tx;
/*
* Since we are currently syncing this txg, it's nontrivial to
* determine what BP to nopwrite against, so we disable nopwrite.
*
* When syncing, the db_blkptr is initially the BP of the previous
* txg. We can not nopwrite against it because it will be changed
* (this is similar to the non-late-arrival case where the dbuf is
* dirty in a future txg).
*
* Then dbuf_write_ready() sets bp_blkptr to the location we will write.
* We can not nopwrite against it because although the BP will not
* (typically) be changed, the data has not yet been persisted to this
* location.
*
* Finally, when dbuf_write_done() is called, it is theoretically
* possible to always nopwrite, because the data that was written in
* this txg is the same data that we are trying to write. However we
* would need to check that this dbuf is not dirty in any future
* txg's (as we do in the normal dmu_sync() path). For simplicity, we
* don't nopwrite in this case.
*/
zp->zp_nopwrite = B_FALSE;
zio_nowait(zio_write(pio, os->os_spa, dmu_tx_get_txg(tx), zgd->zgd_bp,
abd_get_from_buf(zgd->zgd_db->db_data, zgd->zgd_db->db_size),
zgd->zgd_db->db_size, zgd->zgd_db->db_size, zp,
dmu_sync_late_arrival_ready, NULL, NULL, dmu_sync_late_arrival_done,
dsa, ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, zb));
return (0);
}
/*
* Intent log support: sync the block associated with db to disk.
* N.B. and XXX: the caller is responsible for making sure that the
* data isn't changing while dmu_sync() is writing it.
*
* Return values:
*
* EEXIST: this txg has already been synced, so there's nothing to do.
* The caller should not log the write.
*
* ENOENT: the block was dbuf_free_range()'d, so there's nothing to do.
* The caller should not log the write.
*
* EALREADY: this block is already in the process of being synced.
* The caller should track its progress (somehow).
*
* EIO: could not do the I/O.
* The caller should do a txg_wait_synced().
*
* 0: the I/O has been initiated.
* The caller should log this blkptr in the done callback.
* It is possible that the I/O will fail, in which case
* the error will be reported to the done callback and
* propagated to pio from zio_done().
*/
int
dmu_sync(zio_t *pio, uint64_t txg, dmu_sync_cb_t *done, zgd_t *zgd)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)zgd->zgd_db;
objset_t *os = db->db_objset;
dsl_dataset_t *ds = os->os_dsl_dataset;
dbuf_dirty_record_t *dr;
dmu_sync_arg_t *dsa;
zbookmark_phys_t zb;
zio_prop_t zp;
dnode_t *dn;
ASSERT(pio != NULL);
ASSERT(txg != 0);
/* dbuf is within the locked range */
ASSERT3U(db->db.db_offset, >=, zgd->zgd_rl->r_off);
ASSERT3U(db->db.db_offset + db->db.db_size, <=,
zgd->zgd_rl->r_off + zgd->zgd_rl->r_len);
SET_BOOKMARK(&zb, ds->ds_object,
db->db.db_object, db->db_level, db->db_blkid);
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
dmu_write_policy(os, dn, db->db_level, WP_DMU_SYNC, &zp);
DB_DNODE_EXIT(db);
/*
* If we're frozen (running ziltest), we always need to generate a bp.
*/
if (txg > spa_freeze_txg(os->os_spa))
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
/*
* Grabbing db_mtx now provides a barrier between dbuf_sync_leaf()
* and us. If we determine that this txg is not yet syncing,
* but it begins to sync a moment later, that's OK because the
* sync thread will block in dbuf_sync_leaf() until we drop db_mtx.
*/
mutex_enter(&db->db_mtx);
if (txg <= spa_last_synced_txg(os->os_spa)) {
/*
* This txg has already synced. There's nothing to do.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EEXIST));
}
if (txg <= spa_syncing_txg(os->os_spa)) {
/*
* This txg is currently syncing, so we can't mess with
* the dirty record anymore; just write a new log block.
*/
mutex_exit(&db->db_mtx);
return (dmu_sync_late_arrival(pio, os, done, zgd, &zp, &zb));
}
dr = db->db_last_dirty;
while (dr && dr->dr_txg != txg)
dr = dr->dr_next;
if (dr == NULL) {
/*
* There's no dr for this dbuf, so it must have been freed.
* There's no need to log writes to freed blocks, so we're done.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(ENOENT));
}
ASSERT(dr->dr_next == NULL || dr->dr_next->dr_txg < txg);
if (db->db_blkptr != NULL) {
/*
* We need to fill in zgd_bp with the current blkptr so that
* the nopwrite code can check if we're writing the same
* data that's already on disk. We can only nopwrite if we
* are sure that after making the copy, db_blkptr will not
* change until our i/o completes. We ensure this by
* holding the db_mtx, and only allowing nopwrite if the
* block is not already dirty (see below). This is verified
* by dmu_sync_done(), which VERIFYs that the db_blkptr has
* not changed.
*/
*zgd->zgd_bp = *db->db_blkptr;
}
/*
* Assume the on-disk data is X, the current syncing data (in
* txg - 1) is Y, and the current in-memory data is Z (currently
* in dmu_sync).
*
* We usually want to perform a nopwrite if X and Z are the
* same. However, if Y is different (i.e. the BP is going to
* change before this write takes effect), then a nopwrite will
* be incorrect - we would override with X, which could have
* been freed when Y was written.
*
* (Note that this is not a concern when we are nop-writing from
* syncing context, because X and Y must be identical, because
* all previous txgs have been synced.)
*
* Therefore, we disable nopwrite if the current BP could change
* before this TXG. There are two ways it could change: by
* being dirty (dr_next is non-NULL), or by being freed
* (dnode_block_freed()). This behavior is verified by
* zio_done(), which VERIFYs that the override BP is identical
* to the on-disk BP.
*/
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dr->dr_next != NULL || dnode_block_freed(dn, db->db_blkid))
zp.zp_nopwrite = B_FALSE;
DB_DNODE_EXIT(db);
ASSERT(dr->dr_txg == txg);
if (dr->dt.dl.dr_override_state == DR_IN_DMU_SYNC ||
dr->dt.dl.dr_override_state == DR_OVERRIDDEN) {
/*
* We have already issued a sync write for this buffer,
* or this buffer has already been synced. It could not
* have been dirtied since, or we would have cleared the state.
*/
mutex_exit(&db->db_mtx);
return (SET_ERROR(EALREADY));
}
ASSERT(dr->dt.dl.dr_override_state == DR_NOT_OVERRIDDEN);
dr->dt.dl.dr_override_state = DR_IN_DMU_SYNC;
mutex_exit(&db->db_mtx);
dsa = kmem_alloc(sizeof (dmu_sync_arg_t), KM_SLEEP);
dsa->dsa_dr = dr;
dsa->dsa_done = done;
dsa->dsa_zgd = zgd;
dsa->dsa_tx = NULL;
zio_nowait(arc_write(pio, os->os_spa, txg,
zgd->zgd_bp, dr->dt.dl.dr_data, DBUF_IS_L2CACHEABLE(db),
&zp, dmu_sync_ready, NULL, NULL, dmu_sync_done, dsa,
ZIO_PRIORITY_SYNC_WRITE, ZIO_FLAG_CANFAIL, &zb));
return (0);
}
int
dmu_object_set_nlevels(objset_t *os, uint64_t object, int nlevels, dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_nlevels(dn, nlevels, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_blocksize(objset_t *os, uint64_t object, uint64_t size, int ibs,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
err = dnode_set_blksz(dn, size, ibs, tx);
dnode_rele(dn, FTAG);
return (err);
}
int
dmu_object_set_maxblkid(objset_t *os, uint64_t object, uint64_t maxblkid,
dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_new_blkid(dn, maxblkid, tx, B_FALSE);
rw_exit(&dn->dn_struct_rwlock);
dnode_rele(dn, FTAG);
return (0);
}
void
dmu_object_set_checksum(objset_t *os, uint64_t object, uint8_t checksum,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's checksum function. This
* check ensures that the receiving system can understand the
* checksum function transmitted.
*/
ASSERT3U(checksum, <, ZIO_CHECKSUM_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
ASSERT3U(checksum, <, ZIO_CHECKSUM_FUNCTIONS);
dn->dn_checksum = checksum;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
void
dmu_object_set_compress(objset_t *os, uint64_t object, uint8_t compress,
dmu_tx_t *tx)
{
dnode_t *dn;
/*
* Send streams include each object's compression function. This
* check ensures that the receiving system can understand the
* compression function transmitted.
*/
ASSERT3U(compress, <, ZIO_COMPRESS_LEGACY_FUNCTIONS);
VERIFY0(dnode_hold(os, object, FTAG, &dn));
dn->dn_compress = compress;
dnode_setdirty(dn, tx);
dnode_rele(dn, FTAG);
}
/*
* Dirty an object and set the dirty record's raw flag. This is used
* when writing raw data to an object that will not effect the
* encryption parameters, specifically during raw receives.
*/
int
dmu_object_dirty_raw(objset_t *os, uint64_t object, dmu_tx_t *tx)
{
dnode_t *dn;
int err;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
dmu_buf_will_change_crypt_params((dmu_buf_t *)dn->dn_dbuf, tx);
dnode_rele(dn, FTAG);
return (err);
}
/*
* When the "redundant_metadata" property is set to "most", only indirect
* blocks of this level and higher will have an additional ditto block.
*/
int zfs_redundant_metadata_most_ditto_level = 2;
void
dmu_write_policy(objset_t *os, dnode_t *dn, int level, int wp, zio_prop_t *zp)
{
dmu_object_type_t type = dn ? dn->dn_type : DMU_OT_OBJSET;
boolean_t ismd = (level > 0 || DMU_OT_IS_METADATA(type) ||
(wp & WP_SPILL));
enum zio_checksum checksum = os->os_checksum;
enum zio_compress compress = os->os_compress;
enum zio_checksum dedup_checksum = os->os_dedup_checksum;
boolean_t dedup = B_FALSE;
boolean_t nopwrite = B_FALSE;
boolean_t dedup_verify = os->os_dedup_verify;
boolean_t encrypt = B_FALSE;
int copies = os->os_copies;
/*
* We maintain different write policies for each of the following
* types of data:
* 1. metadata
* 2. preallocated blocks (i.e. level-0 blocks of a dump device)
* 3. all other level 0 blocks
*/
if (ismd) {
/*
* XXX -- we should design a compression algorithm
* that specializes in arrays of bps.
*/
compress = zio_compress_select(os->os_spa,
ZIO_COMPRESS_ON, ZIO_COMPRESS_ON);
/*
* Metadata always gets checksummed. If the data
* checksum is multi-bit correctable, and it's not a
* ZBT-style checksum, then it's suitable for metadata
* as well. Otherwise, the metadata checksum defaults
* to fletcher4.
*/
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_METADATA) ||
(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_EMBEDDED))
checksum = ZIO_CHECKSUM_FLETCHER_4;
if (os->os_redundant_metadata == ZFS_REDUNDANT_METADATA_ALL ||
(os->os_redundant_metadata ==
ZFS_REDUNDANT_METADATA_MOST &&
(level >= zfs_redundant_metadata_most_ditto_level ||
DMU_OT_IS_METADATA(type) || (wp & WP_SPILL))))
copies++;
} else if (wp & WP_NOFILL) {
ASSERT(level == 0);
/*
* If we're writing preallocated blocks, we aren't actually
* writing them so don't set any policy properties. These
* blocks are currently only used by an external subsystem
* outside of zfs (i.e. dump) and not written by the zio
* pipeline.
*/
compress = ZIO_COMPRESS_OFF;
checksum = ZIO_CHECKSUM_OFF;
} else {
compress = zio_compress_select(os->os_spa, dn->dn_compress,
compress);
checksum = (dedup_checksum == ZIO_CHECKSUM_OFF) ?
zio_checksum_select(dn->dn_checksum, checksum) :
dedup_checksum;
/*
* Determine dedup setting. If we are in dmu_sync(),
* we won't actually dedup now because that's all
* done in syncing context; but we do want to use the
* dedup checkum. If the checksum is not strong
* enough to ensure unique signatures, force
* dedup_verify.
*/
if (dedup_checksum != ZIO_CHECKSUM_OFF) {
dedup = (wp & WP_DMU_SYNC) ? B_FALSE : B_TRUE;
if (!(zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP))
dedup_verify = B_TRUE;
}
/*
* Enable nopwrite if we have secure enough checksum
* algorithm (see comment in zio_nop_write) and
* compression is enabled. We don't enable nopwrite if
* dedup is enabled as the two features are mutually
* exclusive.
*/
nopwrite = (!dedup && (zio_checksum_table[checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) &&
compress != ZIO_COMPRESS_OFF && zfs_nopwrite_enabled);
}
/*
* All objects in an encrypted objset are protected from modification
* via a MAC. Encrypted objects store their IV and salt in the last DVA
* in the bp, so we cannot use all copies. Encrypted objects are also
* not subject to nopwrite since writing the same data will still
* result in a new ciphertext. Only encrypted blocks can be dedup'd
* to avoid ambiguity in the dedup code since the DDT does not store
* object types.
*/
if (os->os_encrypted && (wp & WP_NOFILL) == 0) {
encrypt = B_TRUE;
if (DMU_OT_IS_ENCRYPTED(type)) {
copies = MIN(copies, SPA_DVAS_PER_BP - 1);
nopwrite = B_FALSE;
} else {
dedup = B_FALSE;
}
if (level <= 0 &&
(type == DMU_OT_DNODE || type == DMU_OT_OBJSET)) {
compress = ZIO_COMPRESS_EMPTY;
}
}
zp->zp_compress = compress;
zp->zp_checksum = checksum;
zp->zp_type = (wp & WP_SPILL) ? dn->dn_bonustype : type;
zp->zp_level = level;
zp->zp_copies = MIN(copies, spa_max_replication(os->os_spa));
zp->zp_dedup = dedup;
zp->zp_dedup_verify = dedup && dedup_verify;
zp->zp_nopwrite = nopwrite;
zp->zp_encrypt = encrypt;
zp->zp_byteorder = ZFS_HOST_BYTEORDER;
bzero(zp->zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp->zp_iv, ZIO_DATA_IV_LEN);
bzero(zp->zp_mac, ZIO_DATA_MAC_LEN);
ASSERT3U(zp->zp_compress, !=, ZIO_COMPRESS_INHERIT);
}
/*
* This function is only called from zfs_holey_common() for zpl_llseek()
* in order to determine the location of holes. In order to accurately
* report holes all dirty data must be synced to disk. This causes extremely
* poor performance when seeking for holes in a dirty file. As a compromise,
* only provide hole data when the dnode is clean. When a dnode is dirty
* report the dnode as having no holes which is always a safe thing to do.
*/
int
dmu_offset_next(objset_t *os, uint64_t object, boolean_t hole, uint64_t *off)
{
dnode_t *dn;
int i, err;
boolean_t clean = B_TRUE;
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
/*
* Check if dnode is dirty
*/
for (i = 0; i < TXG_SIZE; i++) {
if (multilist_link_active(&dn->dn_dirty_link[i])) {
clean = B_FALSE;
break;
}
}
/*
* If compatibility option is on, sync any current changes before
* we go trundling through the block pointers.
*/
if (!clean && zfs_dmu_offset_next_sync) {
clean = B_TRUE;
dnode_rele(dn, FTAG);
txg_wait_synced(dmu_objset_pool(os), 0);
err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
}
if (clean)
err = dnode_next_offset(dn,
(hole ? DNODE_FIND_HOLE : 0), off, 1, 1, 0);
else
err = SET_ERROR(EBUSY);
dnode_rele(dn, FTAG);
return (err);
}
void
__dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
dnode_phys_t *dnp = dn->dn_phys;
doi->doi_data_block_size = dn->dn_datablksz;
doi->doi_metadata_block_size = dn->dn_indblkshift ?
1ULL << dn->dn_indblkshift : 0;
doi->doi_type = dn->dn_type;
doi->doi_bonus_type = dn->dn_bonustype;
doi->doi_bonus_size = dn->dn_bonuslen;
doi->doi_dnodesize = dn->dn_num_slots << DNODE_SHIFT;
doi->doi_indirection = dn->dn_nlevels;
doi->doi_checksum = dn->dn_checksum;
doi->doi_compress = dn->dn_compress;
doi->doi_nblkptr = dn->dn_nblkptr;
doi->doi_physical_blocks_512 = (DN_USED_BYTES(dnp) + 256) >> 9;
doi->doi_max_offset = (dn->dn_maxblkid + 1) * dn->dn_datablksz;
doi->doi_fill_count = 0;
for (int i = 0; i < dnp->dn_nblkptr; i++)
doi->doi_fill_count += BP_GET_FILL(&dnp->dn_blkptr[i]);
}
void
dmu_object_info_from_dnode(dnode_t *dn, dmu_object_info_t *doi)
{
rw_enter(&dn->dn_struct_rwlock, RW_READER);
mutex_enter(&dn->dn_mtx);
__dmu_object_info_from_dnode(dn, doi);
mutex_exit(&dn->dn_mtx);
rw_exit(&dn->dn_struct_rwlock);
}
/*
* Get information on a DMU object.
* If doi is NULL, just indicates whether the object exists.
*/
int
dmu_object_info(objset_t *os, uint64_t object, dmu_object_info_t *doi)
{
dnode_t *dn;
int err = dnode_hold(os, object, FTAG, &dn);
if (err)
return (err);
if (doi != NULL)
dmu_object_info_from_dnode(dn, doi);
dnode_rele(dn, FTAG);
return (0);
}
/*
* As above, but faster; can be used when you have a held dbuf in hand.
*/
void
dmu_object_info_from_db(dmu_buf_t *db_fake, dmu_object_info_t *doi)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
DB_DNODE_ENTER(db);
dmu_object_info_from_dnode(DB_DNODE(db), doi);
DB_DNODE_EXIT(db);
}
/*
* Faster still when you only care about the size.
* This is specifically optimized for zfs_getattr().
*/
void
dmu_object_size_from_db(dmu_buf_t *db_fake, uint32_t *blksize,
u_longlong_t *nblk512)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*blksize = dn->dn_datablksz;
/* add in number of slots used for the dnode itself */
*nblk512 = ((DN_USED_BYTES(dn->dn_phys) + SPA_MINBLOCKSIZE/2) >>
SPA_MINBLOCKSHIFT) + dn->dn_num_slots;
DB_DNODE_EXIT(db);
}
void
dmu_object_dnsize_from_db(dmu_buf_t *db_fake, int *dnsize)
{
dmu_buf_impl_t *db = (dmu_buf_impl_t *)db_fake;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
*dnsize = dn->dn_num_slots << DNODE_SHIFT;
DB_DNODE_EXIT(db);
}
void
byteswap_uint64_array(void *vbuf, size_t size)
{
uint64_t *buf = vbuf;
size_t count = size >> 3;
int i;
ASSERT((size & 7) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_64(buf[i]);
}
void
byteswap_uint32_array(void *vbuf, size_t size)
{
uint32_t *buf = vbuf;
size_t count = size >> 2;
int i;
ASSERT((size & 3) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_32(buf[i]);
}
void
byteswap_uint16_array(void *vbuf, size_t size)
{
uint16_t *buf = vbuf;
size_t count = size >> 1;
int i;
ASSERT((size & 1) == 0);
for (i = 0; i < count; i++)
buf[i] = BSWAP_16(buf[i]);
}
/* ARGSUSED */
void
byteswap_uint8_array(void *vbuf, size_t size)
{
}
void
dmu_init(void)
{
abd_init();
zfs_dbgmsg_init();
sa_cache_init();
xuio_stat_init();
dmu_objset_init();
dnode_init();
zfetch_init();
dmu_tx_init();
l2arc_init();
arc_init();
dbuf_init();
}
void
dmu_fini(void)
{
arc_fini(); /* arc depends on l2arc, so arc must go first */
l2arc_fini();
dmu_tx_fini();
zfetch_fini();
dbuf_fini();
dnode_fini();
dmu_objset_fini();
xuio_stat_fini();
sa_cache_fini();
zfs_dbgmsg_fini();
abd_fini();
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(dmu_bonus_hold);
EXPORT_SYMBOL(dmu_buf_hold_array_by_bonus);
EXPORT_SYMBOL(dmu_buf_rele_array);
EXPORT_SYMBOL(dmu_prefetch);
EXPORT_SYMBOL(dmu_free_range);
EXPORT_SYMBOL(dmu_free_long_range);
EXPORT_SYMBOL(dmu_free_long_range_raw);
EXPORT_SYMBOL(dmu_free_long_object);
EXPORT_SYMBOL(dmu_free_long_object_raw);
EXPORT_SYMBOL(dmu_read);
EXPORT_SYMBOL(dmu_read_by_dnode);
EXPORT_SYMBOL(dmu_write);
EXPORT_SYMBOL(dmu_write_by_dnode);
EXPORT_SYMBOL(dmu_prealloc);
EXPORT_SYMBOL(dmu_object_info);
EXPORT_SYMBOL(dmu_object_info_from_dnode);
EXPORT_SYMBOL(dmu_object_info_from_db);
EXPORT_SYMBOL(dmu_object_size_from_db);
EXPORT_SYMBOL(dmu_object_dnsize_from_db);
EXPORT_SYMBOL(dmu_object_set_nlevels);
EXPORT_SYMBOL(dmu_object_set_blocksize);
EXPORT_SYMBOL(dmu_object_set_maxblkid);
EXPORT_SYMBOL(dmu_object_set_checksum);
EXPORT_SYMBOL(dmu_object_set_compress);
EXPORT_SYMBOL(dmu_write_policy);
EXPORT_SYMBOL(dmu_sync);
EXPORT_SYMBOL(dmu_request_arcbuf);
EXPORT_SYMBOL(dmu_return_arcbuf);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dnode);
EXPORT_SYMBOL(dmu_assign_arcbuf_by_dbuf);
EXPORT_SYMBOL(dmu_buf_hold);
EXPORT_SYMBOL(dmu_ot);
/* BEGIN CSTYLED */
module_param(zfs_nopwrite_enabled, int, 0644);
MODULE_PARM_DESC(zfs_nopwrite_enabled, "Enable NOP writes");
module_param(zfs_per_txg_dirty_frees_percent, ulong, 0644);
MODULE_PARM_DESC(zfs_per_txg_dirty_frees_percent,
"percentage of dirtied blocks from frees in one TXG");
module_param(zfs_dmu_offset_next_sync, int, 0644);
MODULE_PARM_DESC(zfs_dmu_offset_next_sync,
"Enable forcing txg sync to find holes");
/* END CSTYLED */
#endif
diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c
index a0982f6ec176..a44f485b7b60 100644
--- a/module/zfs/dmu_objset.c
+++ b/module/zfs/dmu_objset.c
@@ -1,2886 +1,2997 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2015, STRATO AG, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfeature.h>
#include <sys/cred.h>
#include <sys/zfs_context.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dnode.h>
#include <sys/dbuf.h>
#include <sys/zvol.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/dmu_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/sa.h>
#include <sys/zfs_onexit.h>
#include <sys/dsl_destroy.h>
#include <sys/vdev.h>
+#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/spa_impl.h>
#include <sys/dmu_send.h>
#include <sys/zfs_project.h>
/*
* Needed to close a window in dnode_move() that allows the objset to be freed
* before it can be safely accessed.
*/
krwlock_t os_lock;
/*
* Tunable to overwrite the maximum number of threads for the parallelization
* of dmu_objset_find_dp, needed to speed up the import of pools with many
* datasets.
* Default is 4 times the number of leaf vdevs.
*/
int dmu_find_threads = 0;
/*
* Backfill lower metadnode objects after this many have been freed.
* Backfilling negatively impacts object creation rates, so only do it
* if there are enough holes to fill.
*/
int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT;
static char *upgrade_tag = "upgrade_tag";
static void dmu_objset_find_dp_cb(void *arg);
static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb);
static void dmu_objset_upgrade_stop(objset_t *os);
void
dmu_objset_init(void)
{
rw_init(&os_lock, NULL, RW_DEFAULT, NULL);
}
void
dmu_objset_fini(void)
{
rw_destroy(&os_lock);
}
spa_t *
dmu_objset_spa(objset_t *os)
{
return (os->os_spa);
}
zilog_t *
dmu_objset_zil(objset_t *os)
{
return (os->os_zil);
}
dsl_pool_t *
dmu_objset_pool(objset_t *os)
{
dsl_dataset_t *ds;
if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir)
return (ds->ds_dir->dd_pool);
else
return (spa_get_dsl(os->os_spa));
}
dsl_dataset_t *
dmu_objset_ds(objset_t *os)
{
return (os->os_dsl_dataset);
}
dmu_objset_type_t
dmu_objset_type(objset_t *os)
{
return (os->os_phys->os_type);
}
void
dmu_objset_name(objset_t *os, char *buf)
{
dsl_dataset_name(os->os_dsl_dataset, buf);
}
uint64_t
dmu_objset_id(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
return (ds ? ds->ds_object : 0);
}
uint64_t
dmu_objset_dnodesize(objset_t *os)
{
return (os->os_dnodesize);
}
zfs_sync_type_t
dmu_objset_syncprop(objset_t *os)
{
return (os->os_sync);
}
zfs_logbias_op_t
dmu_objset_logbias(objset_t *os)
{
return (os->os_logbias);
}
static void
checksum_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE);
}
static void
compression_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval != ZIO_COMPRESS_INHERIT);
os->os_compress = zio_compress_select(os->os_spa, newval,
ZIO_COMPRESS_ON);
}
static void
copies_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval > 0);
ASSERT(newval <= spa_max_replication(os->os_spa));
os->os_copies = newval;
}
static void
dedup_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
spa_t *spa = os->os_spa;
enum zio_checksum checksum;
/*
* Inheritance should have been done by now.
*/
ASSERT(newval != ZIO_CHECKSUM_INHERIT);
checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF);
os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK;
os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY);
}
static void
primary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_primary_cache = newval;
}
static void
secondary_cache_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE ||
newval == ZFS_CACHE_METADATA);
os->os_secondary_cache = newval;
}
static void
sync_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS ||
newval == ZFS_SYNC_DISABLED);
os->os_sync = newval;
if (os->os_zil)
zil_set_sync(os->os_zil, newval);
}
static void
redundant_metadata_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
/*
* Inheritance and range checking should have been done by now.
*/
ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL ||
newval == ZFS_REDUNDANT_METADATA_MOST);
os->os_redundant_metadata = newval;
}
static void
dnodesize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
switch (newval) {
case ZFS_DNSIZE_LEGACY:
os->os_dnodesize = DNODE_MIN_SIZE;
break;
case ZFS_DNSIZE_AUTO:
/*
* Choose a dnode size that will work well for most
* workloads if the user specified "auto". Future code
* improvements could dynamically select a dnode size
* based on observed workload patterns.
*/
os->os_dnodesize = DNODE_MIN_SIZE * 2;
break;
case ZFS_DNSIZE_1K:
case ZFS_DNSIZE_2K:
case ZFS_DNSIZE_4K:
case ZFS_DNSIZE_8K:
case ZFS_DNSIZE_16K:
os->os_dnodesize = newval;
break;
}
}
static void
logbias_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
ASSERT(newval == ZFS_LOGBIAS_LATENCY ||
newval == ZFS_LOGBIAS_THROUGHPUT);
os->os_logbias = newval;
if (os->os_zil)
zil_set_logbias(os->os_zil, newval);
}
static void
recordsize_changed_cb(void *arg, uint64_t newval)
{
objset_t *os = arg;
os->os_recordsize = newval;
}
void
dmu_objset_byteswap(void *buf, size_t size)
{
objset_phys_t *osp = buf;
ASSERT(size == OBJSET_PHYS_SIZE_V1 || size == OBJSET_PHYS_SIZE_V2 ||
size == sizeof (objset_phys_t));
dnode_byteswap(&osp->os_meta_dnode);
byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t));
osp->os_type = BSWAP_64(osp->os_type);
osp->os_flags = BSWAP_64(osp->os_flags);
if (size >= OBJSET_PHYS_SIZE_V2) {
dnode_byteswap(&osp->os_userused_dnode);
dnode_byteswap(&osp->os_groupused_dnode);
if (size >= sizeof (objset_phys_t))
dnode_byteswap(&osp->os_projectused_dnode);
}
}
/*
* The hash is a CRC-based hash of the objset_t pointer and the object number.
*/
static uint64_t
dnode_hash(const objset_t *os, uint64_t obj)
{
uintptr_t osv = (uintptr_t)os;
uint64_t crc = -1ULL;
ASSERT(zfs_crc64_table[128] == ZFS_CRC64_POLY);
/*
* The low 6 bits of the pointer don't have much entropy, because
* the objset_t is larger than 2^6 bytes long.
*/
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (osv >> 6)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 0)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 8)) & 0xFF];
crc = (crc >> 8) ^ zfs_crc64_table[(crc ^ (obj >> 16)) & 0xFF];
crc ^= (osv>>14) ^ (obj>>24);
return (crc);
}
unsigned int
dnode_multilist_index_func(multilist_t *ml, void *obj)
{
dnode_t *dn = obj;
return (dnode_hash(dn->dn_objset, dn->dn_object) %
multilist_get_num_sublists(ml));
}
+/*
+ * Instantiates the objset_t in-memory structure corresponding to the
+ * objset_phys_t that's pointed to by the specified blkptr_t.
+ */
int
dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
objset_t **osp)
{
objset_t *os;
int i, err;
ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock));
+ /*
+ * The $ORIGIN dataset (if it exists) doesn't have an associated
+ * objset, so there's no reason to open it. The $ORIGIN dataset
+ * will not exist on pools older than SPA_VERSION_ORIGIN.
+ */
+ if (ds != NULL && spa_get_dsl(spa) != NULL &&
+ spa_get_dsl(spa)->dp_origin_snap != NULL) {
+ ASSERT3P(ds->ds_dir, !=,
+ spa_get_dsl(spa)->dp_origin_snap->ds_dir);
+ }
+
os = kmem_zalloc(sizeof (objset_t), KM_SLEEP);
os->os_dsl_dataset = ds;
os->os_spa = spa;
os->os_rootbp = bp;
if (!BP_IS_HOLE(os->os_rootbp)) {
arc_flags_t aflags = ARC_FLAG_WAIT;
zbookmark_phys_t zb;
int size;
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (DMU_OS_IS_L2CACHEABLE(os))
aflags |= ARC_FLAG_L2CACHE;
if (ds != NULL && ds->ds_dir->dd_crypto_obj != 0) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
ASSERT(BP_IS_AUTHENTICATED(bp));
zio_flags |= ZIO_FLAG_RAW;
}
dprintf_bp(os->os_rootbp, "reading %s", "");
err = arc_read(NULL, spa, os->os_rootbp,
arc_getbuf_func, &os->os_phys_buf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (err != 0) {
kmem_free(os, sizeof (objset_t));
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = SET_ERROR(EIO);
return (err);
}
if (spa_version(spa) < SPA_VERSION_USERSPACE)
size = OBJSET_PHYS_SIZE_V1;
else if (!spa_feature_is_enabled(spa,
SPA_FEATURE_PROJECT_QUOTA))
size = OBJSET_PHYS_SIZE_V2;
else
size = sizeof (objset_phys_t);
/* Increase the blocksize if we are permitted. */
if (arc_buf_size(os->os_phys_buf) < size) {
arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
bzero(buf->b_data, size);
bcopy(os->os_phys_buf->b_data, buf->b_data,
arc_buf_size(os->os_phys_buf));
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
os->os_phys_buf = buf;
}
os->os_phys = os->os_phys_buf->b_data;
os->os_flags = os->os_phys->os_flags;
} else {
int size = spa_version(spa) >= SPA_VERSION_USERSPACE ?
sizeof (objset_phys_t) : OBJSET_PHYS_SIZE_V1;
os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf,
ARC_BUFC_METADATA, size);
os->os_phys = os->os_phys_buf->b_data;
bzero(os->os_phys, size);
}
/*
* Note: the changed_cb will be called once before the register
* func returns, thus changing the checksum/compression from the
* default (fletcher2/off). Snapshots don't need to know about
* checksum/compression/copies.
*/
if (ds != NULL) {
boolean_t needlock = B_FALSE;
os->os_encrypted = (ds->ds_dir->dd_crypto_obj != 0);
/*
* Note: it's valid to open the objset if the dataset is
* long-held, in which case the pool_config lock will not
* be held.
*/
if (!dsl_pool_config_held(dmu_objset_pool(os))) {
needlock = B_TRUE;
dsl_pool_config_enter(dmu_objset_pool(os), FTAG);
}
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE),
primary_cache_changed_cb, os);
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE),
secondary_cache_changed_cb, os);
}
if (!ds->ds_is_snapshot) {
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_CHECKSUM),
checksum_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
compression_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_COPIES),
copies_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DEDUP),
dedup_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_LOGBIAS),
logbias_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_SYNC),
sync_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(
ZFS_PROP_REDUNDANT_METADATA),
redundant_metadata_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_RECORDSIZE),
recordsize_changed_cb, os);
}
if (err == 0) {
err = dsl_prop_register(ds,
zfs_prop_to_name(ZFS_PROP_DNODESIZE),
dnodesize_changed_cb, os);
}
}
if (needlock)
dsl_pool_config_exit(dmu_objset_pool(os), FTAG);
if (err != 0) {
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
kmem_free(os, sizeof (objset_t));
return (err);
}
} else {
/* It's the meta-objset. */
os->os_checksum = ZIO_CHECKSUM_FLETCHER_4;
os->os_compress = ZIO_COMPRESS_ON;
os->os_encrypted = B_FALSE;
os->os_copies = spa_max_replication(spa);
os->os_dedup_checksum = ZIO_CHECKSUM_OFF;
os->os_dedup_verify = B_FALSE;
os->os_logbias = ZFS_LOGBIAS_LATENCY;
os->os_sync = ZFS_SYNC_STANDARD;
os->os_primary_cache = ZFS_CACHE_ALL;
os->os_secondary_cache = ZFS_CACHE_ALL;
os->os_dnodesize = DNODE_MIN_SIZE;
}
if (ds == NULL || !ds->ds_is_snapshot)
os->os_zil_header = os->os_phys->os_zil_header;
os->os_zil = zil_alloc(os, &os->os_zil_header);
for (i = 0; i < TXG_SIZE; i++) {
os->os_dirty_dnodes[i] = multilist_create(sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[i]),
dnode_multilist_index_func);
}
list_create(&os->os_dnodes, sizeof (dnode_t),
offsetof(dnode_t, dn_link));
list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
list_link_init(&os->os_evicting_node);
mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_userused_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL);
os->os_obj_next_percpu_len = boot_ncpus;
os->os_obj_next_percpu = kmem_zalloc(os->os_obj_next_percpu_len *
sizeof (os->os_obj_next_percpu[0]), KM_SLEEP);
dnode_special_open(os, &os->os_phys->os_meta_dnode,
DMU_META_DNODE_OBJECT, &os->os_meta_dnode);
if (OBJSET_BUF_HAS_USERUSED(os->os_phys_buf)) {
dnode_special_open(os, &os->os_phys->os_userused_dnode,
DMU_USERUSED_OBJECT, &os->os_userused_dnode);
dnode_special_open(os, &os->os_phys->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode);
if (OBJSET_BUF_HAS_PROJECTUSED(os->os_phys_buf))
dnode_special_open(os,
&os->os_phys->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, &os->os_projectused_dnode);
}
mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL);
*osp = os;
return (0);
}
int
dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp)
{
int err = 0;
/*
* We shouldn't be doing anything with dsl_dataset_t's unless the
* pool_config lock is held, or the dataset is long-held.
*/
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) ||
dsl_dataset_long_held(ds));
mutex_enter(&ds->ds_opening_lock);
if (ds->ds_objset == NULL) {
objset_t *os;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
err = dmu_objset_open_impl(dsl_dataset_get_spa(ds),
ds, dsl_dataset_get_blkptr(ds), &os);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (err == 0) {
mutex_enter(&ds->ds_lock);
ASSERT(ds->ds_objset == NULL);
ds->ds_objset = os;
mutex_exit(&ds->ds_lock);
}
}
*osp = ds->ds_objset;
mutex_exit(&ds->ds_opening_lock);
return (err);
}
/*
* Holds the pool while the objset is held. Therefore only one objset
* can be held at a time.
*/
int
dmu_objset_hold_flags(const char *name, boolean_t decrypt, void *tag,
objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0;
err = dsl_pool_hold(name, tag, &dp);
if (err != 0)
return (err);
err = dsl_dataset_hold_flags(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, tag);
return (err);
}
err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
dsl_dataset_rele(ds, tag);
dsl_pool_rele(dp, tag);
}
return (err);
}
int
dmu_objset_hold(const char *name, void *tag, objset_t **osp)
{
return (dmu_objset_hold_flags(name, B_FALSE, tag, osp));
}
static int
dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
int err;
err = dmu_objset_from_ds(ds, osp);
if (err != 0) {
return (err);
} else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) {
return (SET_ERROR(EINVAL));
} else if (!readonly && dsl_dataset_is_snapshot(ds)) {
return (SET_ERROR(EROFS));
} else if (!readonly && decrypt &&
dsl_dir_incompatible_encryption_version(ds->ds_dir)) {
return (SET_ERROR(EROFS));
}
/* if we are decrypting, we can now check MACs in os->os_phys_buf */
if (decrypt && arc_is_unauthenticated((*osp)->os_phys_buf)) {
zbookmark_phys_t zb;
SET_BOOKMARK(&zb, ds->ds_object, ZB_ROOT_OBJECT,
ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
err = arc_untransform((*osp)->os_phys_buf, (*osp)->os_spa,
&zb, B_FALSE);
if (err != 0)
return (err);
ASSERT0(arc_is_unauthenticated((*osp)->os_phys_buf));
}
return (0);
}
/*
* dsl_pool must not be held when this is called.
* Upon successful return, there will be a longhold on the dataset,
* and the dsl_pool will not be held.
*/
int
dmu_objset_own(const char *name, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0;
err = dsl_pool_hold(name, FTAG, &dp);
if (err != 0)
return (err);
err = dsl_dataset_own(dp, name, flags, tag, &ds);
if (err != 0) {
dsl_pool_rele(dp, FTAG);
return (err);
}
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
dsl_pool_rele(dp, FTAG);
return (err);
}
/*
* User accounting requires the dataset to be decrypted and rw.
* We also don't begin user accounting during claiming to help
* speed up pool import times and to keep this txg reserved
* completely for recovery work.
*/
if ((dmu_objset_userobjspace_upgradable(*osp) ||
dmu_objset_projectquota_upgradable(*osp)) &&
!readonly && !dp->dp_spa->spa_claiming &&
(ds->ds_dir->dd_crypto_obj == 0 || decrypt))
dmu_objset_id_quota_upgrade(*osp);
dsl_pool_rele(dp, FTAG);
return (0);
}
int
dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type,
boolean_t readonly, boolean_t decrypt, void *tag, objset_t **osp)
{
dsl_dataset_t *ds;
int err;
ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0;
err = dsl_dataset_own_obj(dp, obj, flags, tag, &ds);
if (err != 0)
return (err);
err = dmu_objset_own_impl(ds, type, readonly, decrypt, tag, osp);
if (err != 0) {
dsl_dataset_disown(ds, flags, tag);
return (err);
}
return (0);
}
void
dmu_objset_rele_flags(objset_t *os, boolean_t decrypt, void *tag)
{
ds_hold_flags_t flags = (decrypt) ? DS_HOLD_FLAG_DECRYPT : 0;
dsl_pool_t *dp = dmu_objset_pool(os);
dsl_dataset_rele_flags(os->os_dsl_dataset, flags, tag);
dsl_pool_rele(dp, tag);
}
void
dmu_objset_rele(objset_t *os, void *tag)
{
dmu_objset_rele_flags(os, B_FALSE, tag);
}
/*
* When we are called, os MUST refer to an objset associated with a dataset
* that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner
* == tag. We will then release and reacquire ownership of the dataset while
* holding the pool config_rwlock to avoid intervening namespace or ownership
* changes may occur.
*
* This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to
* release the hold on its dataset and acquire a new one on the dataset of the
* same name so that it can be partially torn down and reconstructed.
*/
void
dmu_objset_refresh_ownership(dsl_dataset_t *ds, dsl_dataset_t **newds,
boolean_t decrypt, void *tag)
{
dsl_pool_t *dp;
char name[ZFS_MAX_DATASET_NAME_LEN];
VERIFY3P(ds, !=, NULL);
VERIFY3P(ds->ds_owner, ==, tag);
VERIFY(dsl_dataset_long_held(ds));
dsl_dataset_name(ds, name);
dp = ds->ds_dir->dd_pool;
dsl_pool_config_enter(dp, FTAG);
dsl_dataset_disown(ds, decrypt, tag);
VERIFY0(dsl_dataset_own(dp, name,
(decrypt) ? DS_HOLD_FLAG_DECRYPT : 0, tag, newds));
dsl_pool_config_exit(dp, FTAG);
}
void
dmu_objset_disown(objset_t *os, boolean_t decrypt, void *tag)
{
/*
* Stop upgrading thread
*/
dmu_objset_upgrade_stop(os);
dsl_dataset_disown(os->os_dsl_dataset,
(decrypt) ? DS_HOLD_FLAG_DECRYPT : 0, tag);
}
void
dmu_objset_evict_dbufs(objset_t *os)
{
dnode_t *dn_marker;
dnode_t *dn;
dn_marker = kmem_alloc(sizeof (dnode_t), KM_SLEEP);
mutex_enter(&os->os_lock);
dn = list_head(&os->os_dnodes);
while (dn != NULL) {
/*
* Skip dnodes without holds. We have to do this dance
* because dnode_add_ref() only works if there is already a
* hold. If the dnode has no holds, then it has no dbufs.
*/
if (dnode_add_ref(dn, FTAG)) {
list_insert_after(&os->os_dnodes, dn, dn_marker);
mutex_exit(&os->os_lock);
dnode_evict_dbufs(dn);
dnode_rele(dn, FTAG);
mutex_enter(&os->os_lock);
dn = list_next(&os->os_dnodes, dn_marker);
list_remove(&os->os_dnodes, dn_marker);
} else {
dn = list_next(&os->os_dnodes, dn);
}
}
mutex_exit(&os->os_lock);
kmem_free(dn_marker, sizeof (dnode_t));
if (DMU_USERUSED_DNODE(os) != NULL) {
if (DMU_PROJECTUSED_DNODE(os) != NULL)
dnode_evict_dbufs(DMU_PROJECTUSED_DNODE(os));
dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os));
dnode_evict_dbufs(DMU_USERUSED_DNODE(os));
}
dnode_evict_dbufs(DMU_META_DNODE(os));
}
/*
* Objset eviction processing is split into into two pieces.
* The first marks the objset as evicting, evicts any dbufs that
* have a refcount of zero, and then queues up the objset for the
* second phase of eviction. Once os->os_dnodes has been cleared by
* dnode_buf_pageout()->dnode_destroy(), the second phase is executed.
* The second phase closes the special dnodes, dequeues the objset from
* the list of those undergoing eviction, and finally frees the objset.
*
* NOTE: Due to asynchronous eviction processing (invocation of
* dnode_buf_pageout()), it is possible for the meta dnode for the
* objset to have no holds even though os->os_dnodes is not empty.
*/
void
dmu_objset_evict(objset_t *os)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
for (int t = 0; t < TXG_SIZE; t++)
ASSERT(!dmu_objset_is_dirty(os, t));
if (ds)
dsl_prop_unregister_all(ds, os);
if (os->os_sa)
sa_tear_down(os);
dmu_objset_evict_dbufs(os);
mutex_enter(&os->os_lock);
spa_evicting_os_register(os->os_spa, os);
if (list_is_empty(&os->os_dnodes)) {
mutex_exit(&os->os_lock);
dmu_objset_evict_done(os);
} else {
mutex_exit(&os->os_lock);
}
}
void
dmu_objset_evict_done(objset_t *os)
{
ASSERT3P(list_head(&os->os_dnodes), ==, NULL);
dnode_special_close(&os->os_meta_dnode);
if (DMU_USERUSED_DNODE(os)) {
if (DMU_PROJECTUSED_DNODE(os))
dnode_special_close(&os->os_projectused_dnode);
dnode_special_close(&os->os_userused_dnode);
dnode_special_close(&os->os_groupused_dnode);
}
zil_free(os->os_zil);
arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf);
/*
* This is a barrier to prevent the objset from going away in
* dnode_move() until we can safely ensure that the objset is still in
* use. We consider the objset valid before the barrier and invalid
* after the barrier.
*/
rw_enter(&os_lock, RW_READER);
rw_exit(&os_lock);
kmem_free(os->os_obj_next_percpu,
os->os_obj_next_percpu_len * sizeof (os->os_obj_next_percpu[0]));
mutex_destroy(&os->os_lock);
mutex_destroy(&os->os_userused_lock);
mutex_destroy(&os->os_obj_lock);
mutex_destroy(&os->os_user_ptr_lock);
mutex_destroy(&os->os_upgrade_lock);
for (int i = 0; i < TXG_SIZE; i++) {
multilist_destroy(os->os_dirty_dnodes[i]);
}
spa_evicting_os_deregister(os->os_spa, os);
kmem_free(os, sizeof (objset_t));
}
timestruc_t
dmu_objset_snap_cmtime(objset_t *os)
{
return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir));
}
objset_t *
dmu_objset_create_impl_dnstats(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, int levels, int blksz, int ibs, dmu_tx_t *tx)
{
objset_t *os;
dnode_t *mdn;
ASSERT(dmu_tx_is_syncing(tx));
if (blksz == 0)
blksz = DNODE_BLOCK_SIZE;
if (ibs == 0)
ibs = DN_MAX_INDBLKSHIFT;
if (ds != NULL)
VERIFY0(dmu_objset_from_ds(ds, &os));
else
VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os));
mdn = DMU_META_DNODE(os);
dnode_allocate(mdn, DMU_OT_DNODE, blksz, ibs, DMU_OT_NONE, 0,
DNODE_MIN_SLOTS, tx);
/*
* We don't want to have to increase the meta-dnode's nlevels
* later, because then we could do it in quescing context while
* we are also accessing it in open context.
*
* This precaution is not necessary for the MOS (ds == NULL),
* because the MOS is only updated in syncing context.
* This is most fortunate: the MOS is the only objset that
* needs to be synced multiple times as spa_sync() iterates
* to convergence, so minimizing its dn_nlevels matters.
*/
if (ds != NULL) {
if (levels == 0) {
levels = 1;
/*
* Determine the number of levels necessary for the
* meta-dnode to contain DN_MAX_OBJECT dnodes. Note
* that in order to ensure that we do not overflow
* 64 bits, there has to be a nlevels that gives us a
* number of blocks > DN_MAX_OBJECT but < 2^64.
* Therefore, (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)
* (10) must be less than (64 - log2(DN_MAX_OBJECT))
* (16).
*/
while ((uint64_t)mdn->dn_nblkptr <<
(mdn->dn_datablkshift - DNODE_SHIFT + (levels - 1) *
(mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) <
DN_MAX_OBJECT)
levels++;
}
mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] =
mdn->dn_nlevels = levels;
}
ASSERT(type != DMU_OST_NONE);
ASSERT(type != DMU_OST_ANY);
ASSERT(type < DMU_OST_NUMTYPES);
os->os_phys->os_type = type;
/*
* Enable user accounting if it is enabled and this is not an
* encrypted receive.
*/
if (dmu_objset_userused_enabled(os) &&
(!os->os_encrypted || !dmu_objset_is_receiving(os))) {
os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
if (dmu_objset_userobjused_enabled(os)) {
ds->ds_feature_activation_needed[
SPA_FEATURE_USEROBJ_ACCOUNTING] = B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
}
if (dmu_objset_projectquota_enabled(os)) {
ds->ds_feature_activation_needed[
SPA_FEATURE_PROJECT_QUOTA] = B_TRUE;
os->os_phys->os_flags |=
OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
}
os->os_flags = os->os_phys->os_flags;
}
dsl_dataset_dirty(ds, tx);
return (os);
}
/* called from dsl for meta-objset */
objset_t *
dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp,
dmu_objset_type_t type, dmu_tx_t *tx)
{
return (dmu_objset_create_impl_dnstats(spa, ds, bp, type, 0, 0, 0, tx));
}
typedef struct dmu_objset_create_arg {
const char *doca_name;
cred_t *doca_cred;
void (*doca_userfunc)(objset_t *os, void *arg,
cred_t *cr, dmu_tx_t *tx);
void *doca_userarg;
dmu_objset_type_t doca_type;
uint64_t doca_flags;
dsl_crypto_params_t *doca_dcp;
} dmu_objset_create_arg_t;
/*ARGSUSED*/
static int
dmu_objset_create_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
const char *tail;
int error;
if (strchr(doca->doca_name, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dmu_objset_create_crypt_check(pdd, doca->doca_dcp);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred);
dsl_dir_rele(pdd, FTAG);
return (error);
}
static void
dmu_objset_create_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_create_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *ds;
uint64_t obj;
blkptr_t *bp;
objset_t *os;
zio_t *rzio;
VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail));
obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags,
doca->doca_cred, doca->doca_dcp, tx);
VERIFY0(dsl_dataset_hold_obj_flags(pdd->dd_pool, obj,
DS_HOLD_FLAG_DECRYPT, FTAG, &ds));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
bp = dsl_dataset_get_blkptr(ds);
os = dmu_objset_create_impl(pdd->dd_pool->dp_spa,
ds, bp, doca->doca_type, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (doca->doca_userfunc != NULL) {
doca->doca_userfunc(os, doca->doca_userarg,
doca->doca_cred, tx);
}
/*
* The doca_userfunc() may write out some data that needs to be
* encrypted if the dataset is encrypted (specifically the root
* directory). This data must be written out before the encryption
* key mapping is removed by dsl_dataset_rele_flags(). Force the
* I/O to occur immediately by invoking the relevant sections of
* dsl_pool_sync().
*/
if (os->os_encrypted) {
dsl_dataset_t *tmpds = NULL;
boolean_t need_sync_done = B_FALSE;
mutex_enter(&ds->ds_lock);
ds->ds_owner = FTAG;
mutex_exit(&ds->ds_lock);
rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dsl_dataset_sync(ds, rzio, tx);
need_sync_done = B_TRUE;
}
VERIFY0(zio_wait(rzio));
dmu_objset_do_userquota_updates(os, tx);
taskq_wait(dp->dp_sync_taskq);
rzio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
tmpds = txg_list_remove_this(&dp->dp_dirty_datasets, ds,
tx->tx_txg);
if (tmpds != NULL) {
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, rzio, tx);
}
VERIFY0(zio_wait(rzio));
if (need_sync_done)
dsl_dataset_sync_done(ds, tx);
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
}
spa_history_log_internal_ds(ds, "create", tx, "");
zvol_create_minors(dp->dp_spa, doca->doca_name, B_TRUE);
dsl_dataset_rele_flags(ds, DS_HOLD_FLAG_DECRYPT, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags,
dsl_crypto_params_t *dcp, dmu_objset_create_sync_func_t func, void *arg)
{
dmu_objset_create_arg_t doca;
dsl_crypto_params_t tmp_dcp = { 0 };
doca.doca_name = name;
doca.doca_cred = CRED();
doca.doca_flags = flags;
doca.doca_userfunc = func;
doca.doca_userarg = arg;
doca.doca_type = type;
/*
* Some callers (mostly for testing) do not provide a dcp on their
* own but various code inside the sync task will require it to be
* allocated. Rather than adding NULL checks throughout this code
* or adding dummy dcp's to all of the callers we simply create a
* dummy one here and use that. This zero dcp will have the same
* effect as asking for inheritence of all encryption params.
*/
doca.doca_dcp = (dcp != NULL) ? dcp : &tmp_dcp;
return (dsl_sync_task(name,
dmu_objset_create_check, dmu_objset_create_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL));
}
typedef struct dmu_objset_clone_arg {
const char *doca_clone;
const char *doca_origin;
cred_t *doca_cred;
} dmu_objset_clone_arg_t;
/*ARGSUSED*/
static int
dmu_objset_clone_check(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_dir_t *pdd;
const char *tail;
int error;
dsl_dataset_t *origin;
dsl_pool_t *dp = dmu_tx_pool(tx);
if (strchr(doca->doca_clone, '@') != NULL)
return (SET_ERROR(EINVAL));
if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail);
if (error != 0)
return (error);
if (tail == NULL) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EEXIST));
}
error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL,
doca->doca_cred);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EDQUOT));
}
error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin);
if (error != 0) {
dsl_dir_rele(pdd, FTAG);
return (error);
}
/* You can only clone snapshots, not the head datasets. */
if (!origin->ds_is_snapshot) {
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (SET_ERROR(EINVAL));
}
error = dmu_objset_clone_crypt_check(pdd, origin->ds_dir);
if (error != 0) {
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (error);
}
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
return (0);
}
static void
dmu_objset_clone_sync(void *arg, dmu_tx_t *tx)
{
dmu_objset_clone_arg_t *doca = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *pdd;
const char *tail;
dsl_dataset_t *origin, *ds;
uint64_t obj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail));
VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin));
obj = dsl_dataset_create_sync(pdd, tail, origin, 0,
doca->doca_cred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds));
dsl_dataset_name(origin, namebuf);
spa_history_log_internal_ds(ds, "clone", tx,
"origin=%s (%llu)", namebuf, origin->ds_object);
zvol_create_minors(dp->dp_spa, doca->doca_clone, B_TRUE);
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(origin, FTAG);
dsl_dir_rele(pdd, FTAG);
}
int
dmu_objset_clone(const char *clone, const char *origin)
{
dmu_objset_clone_arg_t doca;
doca.doca_clone = clone;
doca.doca_origin = origin;
doca.doca_cred = CRED();
return (dsl_sync_task(clone,
dmu_objset_clone_check, dmu_objset_clone_sync, &doca,
6, ZFS_SPACE_CHECK_NORMAL));
}
+static int
+dmu_objset_remap_indirects_impl(objset_t *os, uint64_t last_removed_txg)
+{
+ int error = 0;
+ uint64_t object = 0;
+ while ((error = dmu_object_next(os, &object, B_FALSE, 0)) == 0) {
+ error = dmu_object_remap_indirects(os, object,
+ last_removed_txg);
+ /*
+ * If the ZPL removed the object before we managed to dnode_hold
+ * it, we would get an ENOENT. If the ZPL declares its intent
+ * to remove the object (dnode_free) before we manage to
+ * dnode_hold it, we would get an EEXIST. In either case, we
+ * want to continue remapping the other objects in the objset;
+ * in all other cases, we want to break early.
+ */
+ if (error != 0 && error != ENOENT && error != EEXIST) {
+ break;
+ }
+ }
+ if (error == ESRCH) {
+ error = 0;
+ }
+ return (error);
+}
+
+int
+dmu_objset_remap_indirects(const char *fsname)
+{
+ int error = 0;
+ objset_t *os = NULL;
+ uint64_t last_removed_txg;
+ uint64_t remap_start_txg;
+ dsl_dir_t *dd;
+
+ error = dmu_objset_hold(fsname, FTAG, &os);
+ if (error != 0) {
+ return (error);
+ }
+ dd = dmu_objset_ds(os)->ds_dir;
+
+ if (!spa_feature_is_enabled(dmu_objset_spa(os),
+ SPA_FEATURE_OBSOLETE_COUNTS)) {
+ dmu_objset_rele(os, FTAG);
+ return (SET_ERROR(ENOTSUP));
+ }
+
+ if (dsl_dataset_is_snapshot(dmu_objset_ds(os))) {
+ dmu_objset_rele(os, FTAG);
+ return (SET_ERROR(EINVAL));
+ }
+
+ /*
+ * If there has not been a removal, we're done.
+ */
+ last_removed_txg = spa_get_last_removal_txg(dmu_objset_spa(os));
+ if (last_removed_txg == -1ULL) {
+ dmu_objset_rele(os, FTAG);
+ return (0);
+ }
+
+ /*
+ * If we have remapped since the last removal, we're done.
+ */
+ if (dsl_dir_is_zapified(dd)) {
+ uint64_t last_remap_txg;
+ if (zap_lookup(spa_meta_objset(dmu_objset_spa(os)),
+ dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
+ sizeof (last_remap_txg), 1, &last_remap_txg) == 0 &&
+ last_remap_txg > last_removed_txg) {
+ dmu_objset_rele(os, FTAG);
+ return (0);
+ }
+ }
+
+ dsl_dataset_long_hold(dmu_objset_ds(os), FTAG);
+ dsl_pool_rele(dmu_objset_pool(os), FTAG);
+
+ remap_start_txg = spa_last_synced_txg(dmu_objset_spa(os));
+ error = dmu_objset_remap_indirects_impl(os, last_removed_txg);
+ if (error == 0) {
+ /*
+ * We update the last_remap_txg to be the start txg so that
+ * we can guarantee that every block older than last_remap_txg
+ * that can be remapped has been remapped.
+ */
+ error = dsl_dir_update_last_remap_txg(dd, remap_start_txg);
+ }
+
+ dsl_dataset_long_rele(dmu_objset_ds(os), FTAG);
+ dsl_dataset_rele(dmu_objset_ds(os), FTAG);
+
+ return (error);
+}
+
int
dmu_objset_snapshot_one(const char *fsname, const char *snapname)
{
int err;
char *longsnap = kmem_asprintf("%s@%s", fsname, snapname);
nvlist_t *snaps = fnvlist_alloc();
fnvlist_add_boolean(snaps, longsnap);
strfree(longsnap);
err = dsl_dataset_snapshot(snaps, NULL, NULL);
fnvlist_free(snaps);
return (err);
}
static void
dmu_objset_upgrade_task_cb(void *data)
{
objset_t *os = data;
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_status = EINTR;
if (!os->os_upgrade_exit) {
mutex_exit(&os->os_upgrade_lock);
os->os_upgrade_status = os->os_upgrade_cb(os);
mutex_enter(&os->os_upgrade_lock);
}
os->os_upgrade_exit = B_TRUE;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
static void
dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb)
{
if (os->os_upgrade_id != 0)
return;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
dsl_dataset_long_hold(dmu_objset_ds(os), upgrade_tag);
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0 && os->os_upgrade_status == 0) {
os->os_upgrade_exit = B_FALSE;
os->os_upgrade_cb = cb;
os->os_upgrade_id = taskq_dispatch(
os->os_spa->spa_upgrade_taskq,
dmu_objset_upgrade_task_cb, os, TQ_SLEEP);
if (os->os_upgrade_id == TASKQID_INVALID) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
os->os_upgrade_status = ENOMEM;
}
}
mutex_exit(&os->os_upgrade_lock);
}
static void
dmu_objset_upgrade_stop(objset_t *os)
{
mutex_enter(&os->os_upgrade_lock);
os->os_upgrade_exit = B_TRUE;
if (os->os_upgrade_id != 0) {
taskqid_t id = os->os_upgrade_id;
os->os_upgrade_id = 0;
mutex_exit(&os->os_upgrade_lock);
if ((taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id)) == 0) {
dsl_dataset_long_rele(dmu_objset_ds(os), upgrade_tag);
}
txg_wait_synced(os->os_spa->spa_dsl_pool, 0);
} else {
mutex_exit(&os->os_upgrade_lock);
}
}
static void
dmu_objset_sync_dnodes(multilist_sublist_t *list, dmu_tx_t *tx)
{
dnode_t *dn;
while ((dn = multilist_sublist_head(list)) != NULL) {
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT);
ASSERT(dn->dn_dbuf->db_data_pending);
/*
* Initialize dn_zio outside dnode_sync() because the
* meta-dnode needs to set it ouside dnode_sync().
*/
dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio;
ASSERT(dn->dn_zio);
ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS);
multilist_sublist_remove(list, dn);
/*
* If we are not doing useraccounting (os_synced_dnodes == NULL)
* we are done with this dnode for this txg. Unset dn_dirty_txg
* if later txgs aren't dirtying it so that future holders do
* not get a stale value. Otherwise, we will do this in
* userquota_updates_task() when processing has completely
* finished for this txg.
*/
multilist_t *newlist = dn->dn_objset->os_synced_dnodes;
if (newlist != NULL) {
(void) dnode_add_ref(dn, newlist);
multilist_insert(newlist, dn);
} else {
mutex_enter(&dn->dn_mtx);
if (dn->dn_dirty_txg == tx->tx_txg)
dn->dn_dirty_txg = 0;
mutex_exit(&dn->dn_mtx);
}
dnode_sync(dn, tx);
}
}
/* ARGSUSED */
static void
dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg)
{
blkptr_t *bp = zio->io_bp;
objset_t *os = arg;
dnode_phys_t *dnp = &os->os_phys->os_meta_dnode;
uint64_t fill = 0;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET);
ASSERT0(BP_GET_LEVEL(bp));
/*
* Update rootbp fill count: it should be the number of objects
* allocated in the object set (not counting the "special"
* objects that are stored in the objset_phys_t -- the meta
* dnode and user/group/project accounting objects).
*/
for (int i = 0; i < dnp->dn_nblkptr; i++)
fill += BP_GET_FILL(&dnp->dn_blkptr[i]);
BP_SET_FILL(bp, fill);
if (os->os_dsl_dataset != NULL)
rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG);
*os->os_rootbp = *bp;
if (os->os_dsl_dataset != NULL)
rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG);
}
/* ARGSUSED */
static void
dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
objset_t *os = arg;
if (zio->io_flags & ZIO_FLAG_IO_REWRITE) {
ASSERT(BP_EQUAL(bp, bp_orig));
} else {
dsl_dataset_t *ds = os->os_dsl_dataset;
dmu_tx_t *tx = os->os_synctx;
(void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE);
dsl_dataset_block_born(ds, bp, tx);
}
kmem_free(bp, sizeof (*bp));
}
typedef struct sync_dnodes_arg {
multilist_t *sda_list;
int sda_sublist_idx;
multilist_t *sda_newlist;
dmu_tx_t *sda_tx;
} sync_dnodes_arg_t;
static void
sync_dnodes_task(void *arg)
{
sync_dnodes_arg_t *sda = arg;
multilist_sublist_t *ms =
multilist_sublist_lock(sda->sda_list, sda->sda_sublist_idx);
dmu_objset_sync_dnodes(ms, sda->sda_tx);
multilist_sublist_unlock(ms);
kmem_free(sda, sizeof (*sda));
}
/* called from dsl */
void
dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx)
{
int txgoff;
zbookmark_phys_t zb;
zio_prop_t zp;
zio_t *zio;
list_t *list;
dbuf_dirty_record_t *dr;
blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP);
*blkptr_copy = *os->os_rootbp;
dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg);
ASSERT(dmu_tx_is_syncing(tx));
/* XXX the write_done callback should really give us the tx... */
os->os_synctx = tx;
if (os->os_dsl_dataset == NULL) {
/*
* This is the MOS. If we have upgraded,
* spa_max_replication() could change, so reset
* os_copies here.
*/
os->os_copies = spa_max_replication(os->os_spa);
}
/*
* Create the root block IO
*/
SET_BOOKMARK(&zb, os->os_dsl_dataset ?
os->os_dsl_dataset->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
arc_release(os->os_phys_buf, &os->os_phys_buf);
dmu_write_policy(os, NULL, 0, 0, &zp);
/*
* If we are either claiming the ZIL or doing a raw receive write out
* the os_phys_buf raw. Neither of these actions will effect the MAC
* at this point.
*/
if (os->os_next_write_raw[tx->tx_txg & TXG_MASK]) {
ASSERT(os->os_encrypted);
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_FALSE;
arc_convert_to_raw(os->os_phys_buf,
os->os_dsl_dataset->ds_object, ZFS_HOST_BYTEORDER,
DMU_OT_OBJSET, NULL, NULL, NULL);
}
zio = arc_write(pio, os->os_spa, tx->tx_txg,
blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os),
&zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done,
os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb);
/*
* Sync special dnodes - the parent IO for the sync is the root block
*/
DMU_META_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_META_DNODE(os), tx);
os->os_phys->os_flags = os->os_flags;
if (DMU_USERUSED_DNODE(os) &&
DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_USERUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_USERUSED_DNODE(os), tx);
DMU_GROUPUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_GROUPUSED_DNODE(os), tx);
}
if (DMU_PROJECTUSED_DNODE(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type != DMU_OT_NONE) {
DMU_PROJECTUSED_DNODE(os)->dn_zio = zio;
dnode_sync(DMU_PROJECTUSED_DNODE(os), tx);
}
txgoff = tx->tx_txg & TXG_MASK;
if (dmu_objset_userused_enabled(os) &&
(!os->os_encrypted || !dmu_objset_is_receiving(os))) {
/*
* We must create the list here because it uses the
* dn_dirty_link[] of this txg. But it may already
* exist because we call dsl_dataset_sync() twice per txg.
*/
if (os->os_synced_dnodes == NULL) {
os->os_synced_dnodes =
multilist_create(sizeof (dnode_t),
offsetof(dnode_t, dn_dirty_link[txgoff]),
dnode_multilist_index_func);
} else {
ASSERT3U(os->os_synced_dnodes->ml_offset, ==,
offsetof(dnode_t, dn_dirty_link[txgoff]));
}
}
for (int i = 0;
i < multilist_get_num_sublists(os->os_dirty_dnodes[txgoff]); i++) {
sync_dnodes_arg_t *sda = kmem_alloc(sizeof (*sda), KM_SLEEP);
sda->sda_list = os->os_dirty_dnodes[txgoff];
sda->sda_sublist_idx = i;
sda->sda_tx = tx;
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
sync_dnodes_task, sda, 0);
/* callback frees sda */
}
taskq_wait(dmu_objset_pool(os)->dp_sync_taskq);
list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff];
while ((dr = list_head(list)) != NULL) {
ASSERT0(dr->dr_dbuf->db_level);
list_remove(list, dr);
if (dr->dr_zio)
zio_nowait(dr->dr_zio);
}
/* Enable dnode backfill if enough objects have been freed. */
if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) {
os->os_rescan_dnodes = B_TRUE;
os->os_freed_dnodes = 0;
}
/*
* Free intent log blocks up to this tx.
*/
zil_sync(os->os_zil, tx);
os->os_phys->os_zil_header = os->os_zil_header;
zio_nowait(zio);
}
boolean_t
dmu_objset_is_dirty(objset_t *os, uint64_t txg)
{
return (!multilist_is_empty(os->os_dirty_dnodes[txg & TXG_MASK]));
}
static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES];
void
dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb)
{
used_cbs[ost] = cb;
}
boolean_t
dmu_objset_userused_enabled(objset_t *os)
{
return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE &&
used_cbs[os->os_phys->os_type] != NULL &&
DMU_USERUSED_DNODE(os) != NULL);
}
boolean_t
dmu_objset_userobjused_enabled(objset_t *os)
{
return (dmu_objset_userused_enabled(os) &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING));
}
boolean_t
dmu_objset_projectquota_enabled(objset_t *os)
{
return (used_cbs[os->os_phys->os_type] != NULL &&
DMU_PROJECTUSED_DNODE(os) != NULL &&
spa_feature_is_enabled(os->os_spa, SPA_FEATURE_PROJECT_QUOTA));
}
typedef struct userquota_node {
/* must be in the first filed, see userquota_update_cache() */
char uqn_id[20 + DMU_OBJACCT_PREFIX_LEN];
int64_t uqn_delta;
avl_node_t uqn_node;
} userquota_node_t;
typedef struct userquota_cache {
avl_tree_t uqc_user_deltas;
avl_tree_t uqc_group_deltas;
avl_tree_t uqc_project_deltas;
} userquota_cache_t;
static int
userquota_compare(const void *l, const void *r)
{
const userquota_node_t *luqn = l;
const userquota_node_t *ruqn = r;
int rv;
/*
* NB: can only access uqn_id because userquota_update_cache() doesn't
* pass in an entire userquota_node_t.
*/
rv = strcmp(luqn->uqn_id, ruqn->uqn_id);
return (AVL_ISIGN(rv));
}
static void
do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx)
{
void *cookie;
userquota_node_t *uqn;
ASSERT(dmu_tx_is_syncing(tx));
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas,
&cookie)) != NULL) {
/*
* os_userused_lock protects against concurrent calls to
* zap_increment_int(). It's needed because zap_increment_int()
* is not thread-safe (i.e. not atomic).
*/
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_USERUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_user_deltas);
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_GROUPUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_group_deltas);
if (dmu_objset_projectquota_enabled(os)) {
cookie = NULL;
while ((uqn = avl_destroy_nodes(&cache->uqc_project_deltas,
&cookie)) != NULL) {
mutex_enter(&os->os_userused_lock);
VERIFY0(zap_increment(os, DMU_PROJECTUSED_OBJECT,
uqn->uqn_id, uqn->uqn_delta, tx));
mutex_exit(&os->os_userused_lock);
kmem_free(uqn, sizeof (*uqn));
}
avl_destroy(&cache->uqc_project_deltas);
}
}
static void
userquota_update_cache(avl_tree_t *avl, const char *id, int64_t delta)
{
userquota_node_t *uqn;
avl_index_t idx;
ASSERT(strlen(id) < sizeof (uqn->uqn_id));
/*
* Use id directly for searching because uqn_id is the first field of
* userquota_node_t and fields after uqn_id won't be accessed in
* avl_find().
*/
uqn = avl_find(avl, (const void *)id, &idx);
if (uqn == NULL) {
uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP);
strlcpy(uqn->uqn_id, id, sizeof (uqn->uqn_id));
avl_insert(avl, uqn, idx);
}
uqn->uqn_delta += delta;
}
static void
do_userquota_update(objset_t *os, userquota_cache_t *cache, uint64_t used,
uint64_t flags, uint64_t user, uint64_t group, uint64_t project,
boolean_t subtract)
{
if (flags & DNODE_FLAG_USERUSED_ACCOUNTED) {
int64_t delta = DNODE_MIN_SIZE + used;
char name[20];
if (subtract)
delta = -delta;
(void) sprintf(name, "%llx", (longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) sprintf(name, "%llx", (longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) sprintf(name, "%llx", (longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
static void
do_userobjquota_update(objset_t *os, userquota_cache_t *cache, uint64_t flags,
uint64_t user, uint64_t group, uint64_t project, boolean_t subtract)
{
if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) {
char name[20 + DMU_OBJACCT_PREFIX_LEN];
int delta = subtract ? -1 : 1;
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)user);
userquota_update_cache(&cache->uqc_user_deltas, name, delta);
(void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx",
(longlong_t)group);
userquota_update_cache(&cache->uqc_group_deltas, name, delta);
if (dmu_objset_projectquota_enabled(os)) {
(void) snprintf(name, sizeof (name),
DMU_OBJACCT_PREFIX "%llx", (longlong_t)project);
userquota_update_cache(&cache->uqc_project_deltas,
name, delta);
}
}
}
typedef struct userquota_updates_arg {
objset_t *uua_os;
int uua_sublist_idx;
dmu_tx_t *uua_tx;
} userquota_updates_arg_t;
static void
userquota_updates_task(void *arg)
{
userquota_updates_arg_t *uua = arg;
objset_t *os = uua->uua_os;
dmu_tx_t *tx = uua->uua_tx;
dnode_t *dn;
userquota_cache_t cache = { { 0 } };
multilist_sublist_t *list =
multilist_sublist_lock(os->os_synced_dnodes, uua->uua_sublist_idx);
ASSERT(multilist_sublist_head(list) == NULL ||
dmu_objset_userused_enabled(os));
avl_create(&cache.uqc_user_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
avl_create(&cache.uqc_group_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node));
if (dmu_objset_projectquota_enabled(os))
avl_create(&cache.uqc_project_deltas, userquota_compare,
sizeof (userquota_node_t), offsetof(userquota_node_t,
uqn_node));
while ((dn = multilist_sublist_head(list)) != NULL) {
int flags;
ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object));
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE ||
dn->dn_phys->dn_flags &
DNODE_FLAG_USERUSED_ACCOUNTED);
flags = dn->dn_id_flags;
ASSERT(flags);
if (flags & DN_ID_OLD_EXIST) {
do_userquota_update(os, &cache, dn->dn_oldused,
dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
do_userobjquota_update(os, &cache, dn->dn_oldflags,
dn->dn_olduid, dn->dn_oldgid,
dn->dn_oldprojid, B_TRUE);
}
if (flags & DN_ID_NEW_EXIST) {
do_userquota_update(os, &cache,
DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags,
dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
do_userobjquota_update(os, &cache,
dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid,
dn->dn_newprojid, B_FALSE);
}
mutex_enter(&dn->dn_mtx);
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
if (dn->dn_id_flags & DN_ID_NEW_EXIST) {
dn->dn_olduid = dn->dn_newuid;
dn->dn_oldgid = dn->dn_newgid;
dn->dn_oldprojid = dn->dn_newprojid;
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (dn->dn_bonuslen == 0)
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
else
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
dn->dn_id_flags &= ~(DN_ID_NEW_EXIST);
if (dn->dn_dirty_txg == spa_syncing_txg(os->os_spa))
dn->dn_dirty_txg = 0;
mutex_exit(&dn->dn_mtx);
multilist_sublist_remove(list, dn);
dnode_rele(dn, os->os_synced_dnodes);
}
do_userquota_cacheflush(os, &cache, tx);
multilist_sublist_unlock(list);
kmem_free(uua, sizeof (*uua));
}
void
dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx)
{
if (!dmu_objset_userused_enabled(os))
return;
/*
* If this is a raw receive just return and handle accounting
* later when we have the keys loaded. We also don't do user
* accounting during claiming since the datasets are not owned
* for the duration of claiming and this txg should only be
* used for recovery.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return;
if (tx->tx_txg <= os->os_spa->spa_claim_max_txg)
return;
/* Allocate the user/group/project used objects if necessary. */
if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os,
DMU_USERUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
VERIFY0(zap_create_claim(os,
DMU_GROUPUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
if (dmu_objset_projectquota_enabled(os) &&
DMU_PROJECTUSED_DNODE(os)->dn_type == DMU_OT_NONE) {
VERIFY0(zap_create_claim(os, DMU_PROJECTUSED_OBJECT,
DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx));
}
for (int i = 0;
i < multilist_get_num_sublists(os->os_synced_dnodes); i++) {
userquota_updates_arg_t *uua =
kmem_alloc(sizeof (*uua), KM_SLEEP);
uua->uua_os = os;
uua->uua_sublist_idx = i;
uua->uua_tx = tx;
/* note: caller does taskq_wait() */
(void) taskq_dispatch(dmu_objset_pool(os)->dp_sync_taskq,
userquota_updates_task, uua, 0);
/* callback frees uua */
}
}
/*
* Returns a pointer to data to find uid/gid from
*
* If a dirty record for transaction group that is syncing can't
* be found then NULL is returned. In the NULL case it is assumed
* the uid/gid aren't changing.
*/
static void *
dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx)
{
dbuf_dirty_record_t *dr, **drp;
void *data;
if (db->db_dirtycnt == 0)
return (db->db.db_data); /* Nothing is changing */
for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next)
if (dr->dr_txg == tx->tx_txg)
break;
if (dr == NULL) {
data = NULL;
} else {
dnode_t *dn;
DB_DNODE_ENTER(dr->dr_dbuf);
dn = DB_DNODE(dr->dr_dbuf);
if (dn->dn_bonuslen == 0 &&
dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID)
data = dr->dt.dl.dr_data->b_data;
else
data = dr->dt.dl.dr_data;
DB_DNODE_EXIT(dr->dr_dbuf);
}
return (data);
}
void
dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
void *data = NULL;
dmu_buf_impl_t *db = NULL;
uint64_t *user = NULL;
uint64_t *group = NULL;
uint64_t *project = NULL;
int flags = dn->dn_id_flags;
int error;
boolean_t have_spill = B_FALSE;
if (!dmu_objset_userused_enabled(dn->dn_objset))
return;
/*
* Raw receives introduce a problem with user accounting. Raw
* receives cannot update the user accounting info because the
* user ids and the sizes are encrypted. To guarantee that we
* never end up with bad user accounting, we simply disable it
* during raw receives. We also disable this for normal receives
* so that an incremental raw receive may be done on top of an
* existing non-raw receive.
*/
if (os->os_encrypted && dmu_objset_is_receiving(os))
return;
if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST|
DN_ID_CHKED_SPILL)))
return;
if (before && dn->dn_bonuslen != 0)
data = DN_BONUS(dn->dn_phys);
else if (!before && dn->dn_bonuslen != 0) {
if (dn->dn_bonus) {
db = dn->dn_bonus;
mutex_enter(&db->db_mtx);
data = dmu_objset_userquota_find_data(db, tx);
} else {
data = DN_BONUS(dn->dn_phys);
}
} else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) {
int rf = 0;
if (RW_WRITE_HELD(&dn->dn_struct_rwlock))
rf |= DB_RF_HAVESTRUCT;
error = dmu_spill_hold_by_dnode(dn,
rf | DB_RF_MUST_SUCCEED,
FTAG, (dmu_buf_t **)&db);
ASSERT(error == 0);
mutex_enter(&db->db_mtx);
data = (before) ? db->db.db_data :
dmu_objset_userquota_find_data(db, tx);
have_spill = B_TRUE;
} else {
mutex_enter(&dn->dn_mtx);
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
mutex_exit(&dn->dn_mtx);
return;
}
if (before) {
ASSERT(data);
user = &dn->dn_olduid;
group = &dn->dn_oldgid;
project = &dn->dn_oldprojid;
} else if (data) {
user = &dn->dn_newuid;
group = &dn->dn_newgid;
project = &dn->dn_newprojid;
}
/*
* Must always call the callback in case the object
* type has changed and that type isn't an object type to track
*/
error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data,
user, group, project);
/*
* Preserve existing uid/gid when the callback can't determine
* what the new uid/gid are and the callback returned EEXIST.
* The EEXIST error tells us to just use the existing uid/gid.
* If we don't know what the old values are then just assign
* them to 0, since that is a new file being created.
*/
if (!before && data == NULL && error == EEXIST) {
if (flags & DN_ID_OLD_EXIST) {
dn->dn_newuid = dn->dn_olduid;
dn->dn_newgid = dn->dn_oldgid;
dn->dn_newprojid = dn->dn_oldprojid;
} else {
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
}
error = 0;
}
if (db)
mutex_exit(&db->db_mtx);
mutex_enter(&dn->dn_mtx);
if (error == 0 && before)
dn->dn_id_flags |= DN_ID_OLD_EXIST;
if (error == 0 && !before)
dn->dn_id_flags |= DN_ID_NEW_EXIST;
if (have_spill) {
dn->dn_id_flags |= DN_ID_CHKED_SPILL;
} else {
dn->dn_id_flags |= DN_ID_CHKED_BONUS;
}
mutex_exit(&dn->dn_mtx);
if (have_spill)
dmu_buf_rele((dmu_buf_t *)db, FTAG);
}
boolean_t
dmu_objset_userspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USERACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_userobjspace_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE);
}
boolean_t
dmu_objset_projectquota_present(objset_t *os)
{
return (os->os_phys->os_flags &
OBJSET_FLAG_PROJECTQUOTA_COMPLETE);
}
static int
dmu_objset_space_upgrade(objset_t *os)
{
uint64_t obj;
int err = 0;
/*
* We simply need to mark every object dirty, so that it will be
* synced out and now accounted. If this is called
* concurrently, or if we already did some work before crashing,
* that's fine, since we track each object's accounted state
* independently.
*/
for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) {
dmu_tx_t *tx;
dmu_buf_t *db;
int objerr;
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_exit)
err = SET_ERROR(EINTR);
mutex_exit(&os->os_upgrade_lock);
if (err != 0)
return (err);
if (issig(JUSTLOOKING) && issig(FORREAL))
return (SET_ERROR(EINTR));
objerr = dmu_bonus_hold(os, obj, FTAG, &db);
if (objerr != 0)
continue;
tx = dmu_tx_create(os);
dmu_tx_hold_bonus(tx, obj);
objerr = dmu_tx_assign(tx, TXG_WAIT);
if (objerr != 0) {
dmu_buf_rele(db, FTAG);
dmu_tx_abort(tx);
continue;
}
dmu_buf_will_dirty(db, tx);
dmu_buf_rele(db, FTAG);
dmu_tx_commit(tx);
}
return (0);
}
int
dmu_objset_userspace_upgrade(objset_t *os)
{
int err = 0;
if (dmu_objset_userspace_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userused_enabled(os))
return (SET_ERROR(ENOTSUP));
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
static int
dmu_objset_id_quota_upgrade_cb(objset_t *os)
{
int err = 0;
if (dmu_objset_userobjspace_present(os) &&
dmu_objset_projectquota_present(os))
return (0);
if (dmu_objset_is_snapshot(os))
return (SET_ERROR(EINVAL));
if (!dmu_objset_userobjused_enabled(os))
return (SET_ERROR(ENOTSUP));
if (!dmu_objset_projectquota_enabled(os) &&
dmu_objset_userobjspace_present(os))
return (SET_ERROR(ENOTSUP));
dmu_objset_ds(os)->ds_feature_activation_needed[
SPA_FEATURE_USEROBJ_ACCOUNTING] = B_TRUE;
if (dmu_objset_projectquota_enabled(os))
dmu_objset_ds(os)->ds_feature_activation_needed[
SPA_FEATURE_PROJECT_QUOTA] = B_TRUE;
err = dmu_objset_space_upgrade(os);
if (err)
return (err);
os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE;
if (dmu_objset_projectquota_enabled(os))
os->os_flags |= OBJSET_FLAG_PROJECTQUOTA_COMPLETE;
txg_wait_synced(dmu_objset_pool(os), 0);
return (0);
}
void
dmu_objset_id_quota_upgrade(objset_t *os)
{
dmu_objset_upgrade(os, dmu_objset_id_quota_upgrade_cb);
}
boolean_t
dmu_objset_userobjspace_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_userobjused_enabled(os) &&
!dmu_objset_userobjspace_present(os));
}
boolean_t
dmu_objset_projectquota_upgradable(objset_t *os)
{
return (dmu_objset_type(os) == DMU_OST_ZFS &&
!dmu_objset_is_snapshot(os) &&
dmu_objset_projectquota_enabled(os) &&
!dmu_objset_projectquota_present(os));
}
void
dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp,
usedobjsp, availobjsp);
}
uint64_t
dmu_objset_fsid_guid(objset_t *os)
{
return (dsl_dataset_fsid_guid(os->os_dsl_dataset));
}
void
dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat)
{
stat->dds_type = os->os_phys->os_type;
if (os->os_dsl_dataset)
dsl_dataset_fast_stat(os->os_dsl_dataset, stat);
}
void
dmu_objset_stats(objset_t *os, nvlist_t *nv)
{
ASSERT(os->os_dsl_dataset ||
os->os_phys->os_type == DMU_OST_META);
if (os->os_dsl_dataset != NULL)
dsl_dataset_stats(os->os_dsl_dataset, nv);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE,
os->os_phys->os_type);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING,
dmu_objset_userspace_present(os));
}
int
dmu_objset_is_snapshot(objset_t *os)
{
if (os->os_dsl_dataset != NULL)
return (os->os_dsl_dataset->ds_is_snapshot);
else
return (B_FALSE);
}
int
dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen,
boolean_t *conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
uint64_t ignored;
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored,
MT_NORMALIZE, real, maxlen, conflict));
}
int
dmu_snapshot_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp, boolean_t *case_conflict)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
zap_cursor_t cursor;
zap_attribute_t attr;
ASSERT(dsl_pool_config_held(dmu_objset_pool(os)));
if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strcpy(name, attr.za_name);
if (idp)
*idp = attr.za_first_integer;
if (case_conflict)
*case_conflict = attr.za_normalization_conflict;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
int
dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value)
{
return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value));
}
int
dmu_dir_list_next(objset_t *os, int namelen, char *name,
uint64_t *idp, uint64_t *offp)
{
dsl_dir_t *dd = os->os_dsl_dataset->ds_dir;
zap_cursor_t cursor;
zap_attribute_t attr;
/* there is no next dir on a snapshot! */
if (os->os_dsl_dataset->ds_object !=
dsl_dir_phys(dd)->dd_head_dataset_obj)
return (SET_ERROR(ENOENT));
zap_cursor_init_serialized(&cursor,
dd->dd_pool->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp);
if (zap_cursor_retrieve(&cursor, &attr) != 0) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENOENT));
}
if (strlen(attr.za_name) + 1 > namelen) {
zap_cursor_fini(&cursor);
return (SET_ERROR(ENAMETOOLONG));
}
(void) strcpy(name, attr.za_name);
if (idp)
*idp = attr.za_first_integer;
zap_cursor_advance(&cursor);
*offp = zap_cursor_serialize(&cursor);
zap_cursor_fini(&cursor);
return (0);
}
typedef struct dmu_objset_find_ctx {
taskq_t *dc_tq;
dsl_pool_t *dc_dp;
uint64_t dc_ddobj;
char *dc_ddname; /* last component of ddobj's name */
int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *);
void *dc_arg;
int dc_flags;
kmutex_t *dc_error_lock;
int *dc_error;
} dmu_objset_find_ctx_t;
static void
dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp)
{
dsl_pool_t *dp = dcp->dc_dp;
dsl_dir_t *dd;
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
uint64_t thisobj;
int err = 0;
/* don't process if there already was an error */
if (*dcp->dc_error != 0)
goto out;
/*
* Note: passing the name (dc_ddname) here is optional, but it
* improves performance because we don't need to call
* zap_value_search() to determine the name.
*/
err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd);
if (err != 0)
goto out;
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
goto out;
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (dcp->dc_flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
dmu_objset_find_ctx_t *child_dcp =
kmem_alloc(sizeof (*child_dcp), KM_SLEEP);
*child_dcp = *dcp;
child_dcp->dc_ddobj = attr->za_first_integer;
child_dcp->dc_ddname = spa_strdup(attr->za_name);
if (dcp->dc_tq != NULL)
(void) taskq_dispatch(dcp->dc_tq,
dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP);
else
dmu_objset_find_dp_impl(child_dcp);
}
zap_cursor_fini(&zc);
}
/*
* Iterate over all snapshots.
*/
if (dcp->dc_flags & DS_FIND_SNAPSHOTS) {
dsl_dataset_t *ds;
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
err = dsl_dataset_hold_obj(dp,
attr->za_first_integer, FTAG, &ds);
if (err != 0)
break;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
kmem_free(attr, sizeof (zap_attribute_t));
if (err != 0) {
dsl_dir_rele(dd, FTAG);
goto out;
}
/*
* Apply to self.
*/
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
/*
* Note: we hold the dir while calling dsl_dataset_hold_obj() so
* that the dir will remain cached, and we won't have to re-instantiate
* it (which could be expensive due to finding its name via
* zap_value_search()).
*/
dsl_dir_rele(dd, FTAG);
if (err != 0)
goto out;
err = dcp->dc_func(dp, ds, dcp->dc_arg);
dsl_dataset_rele(ds, FTAG);
out:
if (err != 0) {
mutex_enter(dcp->dc_error_lock);
/* only keep first error */
if (*dcp->dc_error == 0)
*dcp->dc_error = err;
mutex_exit(dcp->dc_error_lock);
}
if (dcp->dc_ddname != NULL)
spa_strfree(dcp->dc_ddname);
kmem_free(dcp, sizeof (*dcp));
}
static void
dmu_objset_find_dp_cb(void *arg)
{
dmu_objset_find_ctx_t *dcp = arg;
dsl_pool_t *dp = dcp->dc_dp;
/*
* We need to get a pool_config_lock here, as there are several
* asssert(pool_config_held) down the stack. Getting a lock via
* dsl_pool_config_enter is risky, as it might be stalled by a
* pending writer. This would deadlock, as the write lock can
* only be granted when our parent thread gives up the lock.
* The _prio interface gives us priority over a pending writer.
*/
dsl_pool_config_enter_prio(dp, FTAG);
dmu_objset_find_dp_impl(dcp);
dsl_pool_config_exit(dp, FTAG);
}
/*
* Find objsets under and including ddobj, call func(ds) on each.
* The order for the enumeration is completely undefined.
* func is called with dsl_pool_config held.
*/
int
dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj,
int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags)
{
int error = 0;
taskq_t *tq = NULL;
int ntasks;
dmu_objset_find_ctx_t *dcp;
kmutex_t err_lock;
mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL);
dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP);
dcp->dc_tq = NULL;
dcp->dc_dp = dp;
dcp->dc_ddobj = ddobj;
dcp->dc_ddname = NULL;
dcp->dc_func = func;
dcp->dc_arg = arg;
dcp->dc_flags = flags;
dcp->dc_error_lock = &err_lock;
dcp->dc_error = &error;
if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) {
/*
* In case a write lock is held we can't make use of
* parallelism, as down the stack of the worker threads
* the lock is asserted via dsl_pool_config_held.
* In case of a read lock this is solved by getting a read
* lock in each worker thread, which isn't possible in case
* of a writer lock. So we fall back to the synchronous path
* here.
* In the future it might be possible to get some magic into
* dsl_pool_config_held in a way that it returns true for
* the worker threads so that a single lock held from this
* thread suffices. For now, stay single threaded.
*/
dmu_objset_find_dp_impl(dcp);
mutex_destroy(&err_lock);
return (error);
}
ntasks = dmu_find_threads;
if (ntasks == 0)
ntasks = vdev_count_leaves(dp->dp_spa) * 4;
tq = taskq_create("dmu_objset_find", ntasks, maxclsyspri, ntasks,
INT_MAX, 0);
if (tq == NULL) {
kmem_free(dcp, sizeof (*dcp));
mutex_destroy(&err_lock);
return (SET_ERROR(ENOMEM));
}
dcp->dc_tq = tq;
/* dcp will be freed by task */
(void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP);
/*
* PORTING: this code relies on the property of taskq_wait to wait
* until no more tasks are queued and no more tasks are active. As
* we always queue new tasks from within other tasks, task_wait
* reliably waits for the full recursion to finish, even though we
* enqueue new tasks after taskq_wait has been called.
* On platforms other than illumos, taskq_wait may not have this
* property.
*/
taskq_wait(tq);
taskq_destroy(tq);
mutex_destroy(&err_lock);
return (error);
}
/*
* Find all objsets under name, and for each, call 'func(child_name, arg)'.
* The dp_config_rwlock must not be held when this is called, and it
* will not be held when the callback is called.
* Therefore this function should only be used when the pool is not changing
* (e.g. in syncing context), or the callback can deal with the possible races.
*/
static int
dmu_objset_find_impl(spa_t *spa, const char *name,
int func(const char *, void *), void *arg, int flags)
{
dsl_dir_t *dd;
dsl_pool_t *dp = spa_get_dsl(spa);
dsl_dataset_t *ds;
zap_cursor_t zc;
zap_attribute_t *attr;
char *child;
uint64_t thisobj;
int err;
dsl_pool_config_enter(dp, FTAG);
err = dsl_dir_hold(dp, name, FTAG, &dd, NULL);
if (err != 0) {
dsl_pool_config_exit(dp, FTAG);
return (err);
}
/* Don't visit hidden ($MOS & $ORIGIN) objsets. */
if (dd->dd_myname[0] == '$') {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
return (0);
}
thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj;
attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/*
* Iterate over all children.
*/
if (flags & DS_FIND_CHILDREN) {
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s/%s", name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = dmu_objset_find_impl(spa, child,
func, arg, flags);
dsl_pool_config_enter(dp, FTAG);
strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
if (err != 0) {
dsl_dir_rele(dd, FTAG);
dsl_pool_config_exit(dp, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
return (err);
}
}
/*
* Iterate over all snapshots.
*/
if (flags & DS_FIND_SNAPSHOTS) {
err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds);
if (err == 0) {
uint64_t snapobj;
snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
dsl_dataset_rele(ds, FTAG);
for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj);
zap_cursor_retrieve(&zc, attr) == 0;
(void) zap_cursor_advance(&zc)) {
ASSERT3U(attr->za_integer_length, ==,
sizeof (uint64_t));
ASSERT3U(attr->za_num_integers, ==, 1);
child = kmem_asprintf("%s@%s",
name, attr->za_name);
dsl_pool_config_exit(dp, FTAG);
err = func(child, arg);
dsl_pool_config_enter(dp, FTAG);
strfree(child);
if (err != 0)
break;
}
zap_cursor_fini(&zc);
}
}
dsl_dir_rele(dd, FTAG);
kmem_free(attr, sizeof (zap_attribute_t));
dsl_pool_config_exit(dp, FTAG);
if (err != 0)
return (err);
/* Apply to self. */
return (func(name, arg));
}
/*
* See comment above dmu_objset_find_impl().
*/
int
dmu_objset_find(char *name, int func(const char *, void *), void *arg,
int flags)
{
spa_t *spa;
int error;
error = spa_open(name, &spa, FTAG);
if (error != 0)
return (error);
error = dmu_objset_find_impl(spa, name, func, arg, flags);
spa_close(spa, FTAG);
return (error);
}
boolean_t
dmu_objset_incompatible_encryption_version(objset_t *os)
{
return (dsl_dir_incompatible_encryption_version(
os->os_dsl_dataset->ds_dir));
}
void
dmu_objset_set_user(objset_t *os, void *user_ptr)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
os->os_user_ptr = user_ptr;
}
void *
dmu_objset_get_user(objset_t *os)
{
ASSERT(MUTEX_HELD(&os->os_user_ptr_lock));
return (os->os_user_ptr);
}
/*
* Determine name of filesystem, given name of snapshot.
* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes
*/
int
dmu_fsname(const char *snapname, char *buf)
{
char *atp = strchr(snapname, '@');
if (atp == NULL)
return (SET_ERROR(EINVAL));
if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strlcpy(buf, snapname, atp - snapname + 1);
return (0);
}
/*
* Call when we think we're going to write/free space in open context to track
* the amount of dirty data in the open txg, which is also the amount
* of memory that can not be evicted until this txg syncs.
*/
void
dmu_objset_willuse_space(objset_t *os, int64_t space, dmu_tx_t *tx)
{
dsl_dataset_t *ds = os->os_dsl_dataset;
int64_t aspace = spa_get_worst_case_asize(os->os_spa, space);
if (ds != NULL) {
dsl_dir_willuse_space(ds->ds_dir, aspace, tx);
dsl_pool_dirty_space(dmu_tx_pool(tx), space, tx);
}
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(dmu_objset_zil);
EXPORT_SYMBOL(dmu_objset_pool);
EXPORT_SYMBOL(dmu_objset_ds);
EXPORT_SYMBOL(dmu_objset_type);
EXPORT_SYMBOL(dmu_objset_name);
EXPORT_SYMBOL(dmu_objset_hold);
EXPORT_SYMBOL(dmu_objset_hold_flags);
EXPORT_SYMBOL(dmu_objset_own);
EXPORT_SYMBOL(dmu_objset_rele);
EXPORT_SYMBOL(dmu_objset_rele_flags);
EXPORT_SYMBOL(dmu_objset_disown);
EXPORT_SYMBOL(dmu_objset_from_ds);
EXPORT_SYMBOL(dmu_objset_create);
EXPORT_SYMBOL(dmu_objset_clone);
EXPORT_SYMBOL(dmu_objset_stats);
EXPORT_SYMBOL(dmu_objset_fast_stat);
EXPORT_SYMBOL(dmu_objset_spa);
EXPORT_SYMBOL(dmu_objset_space);
EXPORT_SYMBOL(dmu_objset_fsid_guid);
EXPORT_SYMBOL(dmu_objset_find);
EXPORT_SYMBOL(dmu_objset_byteswap);
EXPORT_SYMBOL(dmu_objset_evict_dbufs);
EXPORT_SYMBOL(dmu_objset_snap_cmtime);
EXPORT_SYMBOL(dmu_objset_dnodesize);
EXPORT_SYMBOL(dmu_objset_sync);
EXPORT_SYMBOL(dmu_objset_is_dirty);
EXPORT_SYMBOL(dmu_objset_create_impl_dnstats);
EXPORT_SYMBOL(dmu_objset_create_impl);
EXPORT_SYMBOL(dmu_objset_open_impl);
EXPORT_SYMBOL(dmu_objset_evict);
EXPORT_SYMBOL(dmu_objset_register_type);
EXPORT_SYMBOL(dmu_objset_do_userquota_updates);
EXPORT_SYMBOL(dmu_objset_userquota_get_ids);
EXPORT_SYMBOL(dmu_objset_userused_enabled);
EXPORT_SYMBOL(dmu_objset_userspace_upgrade);
EXPORT_SYMBOL(dmu_objset_userspace_present);
EXPORT_SYMBOL(dmu_objset_userobjused_enabled);
EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable);
EXPORT_SYMBOL(dmu_objset_userobjspace_present);
EXPORT_SYMBOL(dmu_objset_projectquota_enabled);
EXPORT_SYMBOL(dmu_objset_projectquota_present);
EXPORT_SYMBOL(dmu_objset_projectquota_upgradable);
EXPORT_SYMBOL(dmu_objset_id_quota_upgrade);
#endif
diff --git a/module/zfs/dmu_tx.c b/module/zfs/dmu_tx.c
index f72859ba1a15..861769b40658 100644
--- a/module/zfs/dmu_tx.c
+++ b/module/zfs/dmu_tx.c
@@ -1,1379 +1,1396 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dbuf.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_pool.h>
#include <sys/zap_impl.h>
#include <sys/spa.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfs_context.h>
#include <sys/varargs.h>
#include <sys/trace_dmu.h>
typedef void (*dmu_tx_hold_func_t)(dmu_tx_t *tx, struct dnode *dn,
uint64_t arg1, uint64_t arg2);
dmu_tx_stats_t dmu_tx_stats = {
{ "dmu_tx_assigned", KSTAT_DATA_UINT64 },
{ "dmu_tx_delay", KSTAT_DATA_UINT64 },
{ "dmu_tx_error", KSTAT_DATA_UINT64 },
{ "dmu_tx_suspended", KSTAT_DATA_UINT64 },
{ "dmu_tx_group", KSTAT_DATA_UINT64 },
{ "dmu_tx_memory_reserve", KSTAT_DATA_UINT64 },
{ "dmu_tx_memory_reclaim", KSTAT_DATA_UINT64 },
{ "dmu_tx_dirty_throttle", KSTAT_DATA_UINT64 },
{ "dmu_tx_dirty_delay", KSTAT_DATA_UINT64 },
{ "dmu_tx_dirty_over_max", KSTAT_DATA_UINT64 },
{ "dmu_tx_quota", KSTAT_DATA_UINT64 },
};
static kstat_t *dmu_tx_ksp;
dmu_tx_t *
dmu_tx_create_dd(dsl_dir_t *dd)
{
dmu_tx_t *tx = kmem_zalloc(sizeof (dmu_tx_t), KM_SLEEP);
tx->tx_dir = dd;
if (dd != NULL)
tx->tx_pool = dd->dd_pool;
list_create(&tx->tx_holds, sizeof (dmu_tx_hold_t),
offsetof(dmu_tx_hold_t, txh_node));
list_create(&tx->tx_callbacks, sizeof (dmu_tx_callback_t),
offsetof(dmu_tx_callback_t, dcb_node));
tx->tx_start = gethrtime();
return (tx);
}
dmu_tx_t *
dmu_tx_create(objset_t *os)
{
dmu_tx_t *tx = dmu_tx_create_dd(os->os_dsl_dataset->ds_dir);
tx->tx_objset = os;
return (tx);
}
dmu_tx_t *
dmu_tx_create_assigned(struct dsl_pool *dp, uint64_t txg)
{
dmu_tx_t *tx = dmu_tx_create_dd(NULL);
txg_verify(dp->dp_spa, txg);
tx->tx_pool = dp;
tx->tx_txg = txg;
tx->tx_anyobj = TRUE;
return (tx);
}
int
dmu_tx_is_syncing(dmu_tx_t *tx)
{
return (tx->tx_anyobj);
}
int
dmu_tx_private_ok(dmu_tx_t *tx)
{
return (tx->tx_anyobj);
}
static dmu_tx_hold_t *
dmu_tx_hold_dnode_impl(dmu_tx_t *tx, dnode_t *dn, enum dmu_tx_hold_type type,
uint64_t arg1, uint64_t arg2)
{
dmu_tx_hold_t *txh;
if (dn != NULL) {
(void) refcount_add(&dn->dn_holds, tx);
if (tx->tx_txg != 0) {
mutex_enter(&dn->dn_mtx);
/*
* dn->dn_assigned_txg == tx->tx_txg doesn't pose a
* problem, but there's no way for it to happen (for
* now, at least).
*/
ASSERT(dn->dn_assigned_txg == 0);
dn->dn_assigned_txg = tx->tx_txg;
(void) refcount_add(&dn->dn_tx_holds, tx);
mutex_exit(&dn->dn_mtx);
}
}
txh = kmem_zalloc(sizeof (dmu_tx_hold_t), KM_SLEEP);
txh->txh_tx = tx;
txh->txh_dnode = dn;
refcount_create(&txh->txh_space_towrite);
refcount_create(&txh->txh_memory_tohold);
txh->txh_type = type;
txh->txh_arg1 = arg1;
txh->txh_arg2 = arg2;
list_insert_tail(&tx->tx_holds, txh);
return (txh);
}
static dmu_tx_hold_t *
dmu_tx_hold_object_impl(dmu_tx_t *tx, objset_t *os, uint64_t object,
enum dmu_tx_hold_type type, uint64_t arg1, uint64_t arg2)
{
dnode_t *dn = NULL;
dmu_tx_hold_t *txh;
int err;
if (object != DMU_NEW_OBJECT) {
err = dnode_hold(os, object, FTAG, &dn);
if (err != 0) {
tx->tx_err = err;
return (NULL);
}
}
txh = dmu_tx_hold_dnode_impl(tx, dn, type, arg1, arg2);
if (dn != NULL)
dnode_rele(dn, FTAG);
return (txh);
}
void
dmu_tx_add_new_object(dmu_tx_t *tx, dnode_t *dn)
{
/*
* If we're syncing, they can manipulate any object anyhow, and
* the hold on the dnode_t can cause problems.
*/
if (!dmu_tx_is_syncing(tx))
(void) dmu_tx_hold_dnode_impl(tx, dn, THT_NEWOBJECT, 0, 0);
}
/*
* This function reads specified data from disk. The specified data will
* be needed to perform the transaction -- i.e, it will be read after
* we do dmu_tx_assign(). There are two reasons that we read the data now
* (before dmu_tx_assign()):
*
* 1. Reading it now has potentially better performance. The transaction
* has not yet been assigned, so the TXG is not held open, and also the
* caller typically has less locks held when calling dmu_tx_hold_*() than
* after the transaction has been assigned. This reduces the lock (and txg)
* hold times, thus reducing lock contention.
*
* 2. It is easier for callers (primarily the ZPL) to handle i/o errors
* that are detected before they start making changes to the DMU state
* (i.e. now). Once the transaction has been assigned, and some DMU
* state has been changed, it can be difficult to recover from an i/o
* error (e.g. to undo the changes already made in memory at the DMU
* layer). Typically code to do so does not exist in the caller -- it
* assumes that the data has already been cached and thus i/o errors are
* not possible.
*
* It has been observed that the i/o initiated here can be a performance
* problem, and it appears to be optional, because we don't look at the
* data which is read. However, removing this read would only serve to
* move the work elsewhere (after the dmu_tx_assign()), where it may
* have a greater impact on performance (in addition to the impact on
* fault tolerance noted above).
*/
static int
dmu_tx_check_ioerr(zio_t *zio, dnode_t *dn, int level, uint64_t blkid)
{
int err;
dmu_buf_impl_t *db;
rw_enter(&dn->dn_struct_rwlock, RW_READER);
db = dbuf_hold_level(dn, level, blkid, FTAG);
rw_exit(&dn->dn_struct_rwlock);
if (db == NULL)
return (SET_ERROR(EIO));
err = dbuf_read(db, zio, DB_RF_CANFAIL | DB_RF_NOPREFETCH);
dbuf_rele(db, FTAG);
return (err);
}
/* ARGSUSED */
static void
dmu_tx_count_write(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
dnode_t *dn = txh->txh_dnode;
int err = 0;
if (len == 0)
return;
(void) refcount_add_many(&txh->txh_space_towrite, len, FTAG);
if (refcount_count(&txh->txh_space_towrite) > 2 * DMU_MAX_ACCESS)
err = SET_ERROR(EFBIG);
if (dn == NULL)
return;
/*
* For i/o error checking, read the blocks that will be needed
* to perform the write: the first and last level-0 blocks (if
* they are not aligned, i.e. if they are partial-block writes),
* and all the level-1 blocks.
*/
if (dn->dn_maxblkid == 0) {
if (off < dn->dn_datablksz &&
(off > 0 || len < dn->dn_datablksz)) {
err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
} else {
zio_t *zio = zio_root(dn->dn_objset->os_spa,
NULL, NULL, ZIO_FLAG_CANFAIL);
/* first level-0 block */
uint64_t start = off >> dn->dn_datablkshift;
if (P2PHASE(off, dn->dn_datablksz) || len < dn->dn_datablksz) {
err = dmu_tx_check_ioerr(zio, dn, 0, start);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
/* last level-0 block */
uint64_t end = (off + len - 1) >> dn->dn_datablkshift;
if (end != start && end <= dn->dn_maxblkid &&
P2PHASE(off + len, dn->dn_datablksz)) {
err = dmu_tx_check_ioerr(zio, dn, 0, end);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
/* level-1 blocks */
if (dn->dn_nlevels > 1) {
int shft = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (uint64_t i = (start >> shft) + 1;
i < end >> shft; i++) {
err = dmu_tx_check_ioerr(zio, dn, 1, i);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
}
err = zio_wait(zio);
if (err != 0) {
txh->txh_tx->tx_err = err;
}
}
}
static void
dmu_tx_count_dnode(dmu_tx_hold_t *txh)
{
(void) refcount_add_many(&txh->txh_space_towrite, DNODE_MIN_SIZE, FTAG);
}
void
dmu_tx_hold_write(dmu_tx_t *tx, uint64_t object, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT3U(len, <=, DMU_MAX_ACCESS);
ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_WRITE, off, len);
if (txh != NULL) {
dmu_tx_count_write(txh, off, len);
dmu_tx_count_dnode(txh);
}
}
+void
+dmu_tx_hold_remap_l1indirect(dmu_tx_t *tx, uint64_t object)
+{
+ dmu_tx_hold_t *txh;
+
+ ASSERT(tx->tx_txg == 0);
+ txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
+ object, THT_WRITE, 0, 0);
+ if (txh == NULL)
+ return;
+
+ dnode_t *dn = txh->txh_dnode;
+ (void) refcount_add_many(&txh->txh_space_towrite,
+ 1ULL << dn->dn_indblkshift, FTAG);
+ dmu_tx_count_dnode(txh);
+}
+
void
dmu_tx_hold_write_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, int len)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT3U(len, <=, DMU_MAX_ACCESS);
ASSERT(len == 0 || UINT64_MAX - off >= len - 1);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_WRITE, off, len);
if (txh != NULL) {
dmu_tx_count_write(txh, off, len);
dmu_tx_count_dnode(txh);
}
}
/*
* This function marks the transaction as being a "net free". The end
* result is that refquotas will be disabled for this transaction, and
* this transaction will be able to use half of the pool space overhead
* (see dsl_pool_adjustedsize()). Therefore this function should only
* be called for transactions that we expect will not cause a net increase
* in the amount of space used (but it's OK if that is occasionally not true).
*/
void
dmu_tx_mark_netfree(dmu_tx_t *tx)
{
tx->tx_netfree = B_TRUE;
}
static void
dmu_tx_hold_free_impl(dmu_tx_hold_t *txh, uint64_t off, uint64_t len)
{
dmu_tx_t *tx = txh->txh_tx;
dnode_t *dn = txh->txh_dnode;
int err;
ASSERT(tx->tx_txg == 0);
dmu_tx_count_dnode(txh);
if (off >= (dn->dn_maxblkid + 1) * dn->dn_datablksz)
return;
if (len == DMU_OBJECT_END)
len = (dn->dn_maxblkid + 1) * dn->dn_datablksz - off;
dmu_tx_count_dnode(txh);
/*
* For i/o error checking, we read the first and last level-0
* blocks if they are not aligned, and all the level-1 blocks.
*
* Note: dbuf_free_range() assumes that we have not instantiated
* any level-0 dbufs that will be completely freed. Therefore we must
* exercise care to not read or count the first and last blocks
* if they are blocksize-aligned.
*/
if (dn->dn_datablkshift == 0) {
if (off != 0 || len < dn->dn_datablksz)
dmu_tx_count_write(txh, 0, dn->dn_datablksz);
} else {
/* first block will be modified if it is not aligned */
if (!IS_P2ALIGNED(off, 1 << dn->dn_datablkshift))
dmu_tx_count_write(txh, off, 1);
/* last block will be modified if it is not aligned */
if (!IS_P2ALIGNED(off + len, 1 << dn->dn_datablkshift))
dmu_tx_count_write(txh, off + len, 1);
}
/*
* Check level-1 blocks.
*/
if (dn->dn_nlevels > 1) {
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
uint64_t start = off >> shift;
uint64_t end = (off + len) >> shift;
ASSERT(dn->dn_indblkshift != 0);
/*
* dnode_reallocate() can result in an object with indirect
* blocks having an odd data block size. In this case,
* just check the single block.
*/
if (dn->dn_datablkshift == 0)
start = end = 0;
zio_t *zio = zio_root(tx->tx_pool->dp_spa,
NULL, NULL, ZIO_FLAG_CANFAIL);
for (uint64_t i = start; i <= end; i++) {
uint64_t ibyte = i << shift;
err = dnode_next_offset(dn, 0, &ibyte, 2, 1, 0);
i = ibyte >> shift;
if (err == ESRCH || i > end)
break;
if (err != 0) {
tx->tx_err = err;
(void) zio_wait(zio);
return;
}
(void) refcount_add_many(&txh->txh_memory_tohold,
1 << dn->dn_indblkshift, FTAG);
err = dmu_tx_check_ioerr(zio, dn, 1, i);
if (err != 0) {
tx->tx_err = err;
(void) zio_wait(zio);
return;
}
}
err = zio_wait(zio);
if (err != 0) {
tx->tx_err = err;
return;
}
}
}
void
dmu_tx_hold_free(dmu_tx_t *tx, uint64_t object, uint64_t off, uint64_t len)
{
dmu_tx_hold_t *txh;
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_FREE, off, len);
if (txh != NULL)
(void) dmu_tx_hold_free_impl(txh, off, len);
}
void
dmu_tx_hold_free_by_dnode(dmu_tx_t *tx, dnode_t *dn, uint64_t off, uint64_t len)
{
dmu_tx_hold_t *txh;
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_FREE, off, len);
if (txh != NULL)
(void) dmu_tx_hold_free_impl(txh, off, len);
}
static void
dmu_tx_hold_zap_impl(dmu_tx_hold_t *txh, const char *name)
{
dmu_tx_t *tx = txh->txh_tx;
dnode_t *dn = txh->txh_dnode;
int err;
ASSERT(tx->tx_txg == 0);
dmu_tx_count_dnode(txh);
/*
* Modifying a almost-full microzap is around the worst case (128KB)
*
* If it is a fat zap, the worst case would be 7*16KB=112KB:
* - 3 blocks overwritten: target leaf, ptrtbl block, header block
* - 4 new blocks written if adding:
* - 2 blocks for possibly split leaves,
* - 2 grown ptrtbl blocks
*/
(void) refcount_add_many(&txh->txh_space_towrite,
MZAP_MAX_BLKSZ, FTAG);
if (dn == NULL)
return;
ASSERT3U(DMU_OT_BYTESWAP(dn->dn_type), ==, DMU_BSWAP_ZAP);
if (dn->dn_maxblkid == 0 || name == NULL) {
/*
* This is a microzap (only one block), or we don't know
* the name. Check the first block for i/o errors.
*/
err = dmu_tx_check_ioerr(NULL, dn, 0, 0);
if (err != 0) {
tx->tx_err = err;
}
} else {
/*
* Access the name so that we'll check for i/o errors to
* the leaf blocks, etc. We ignore ENOENT, as this name
* may not yet exist.
*/
err = zap_lookup_by_dnode(dn, name, 8, 0, NULL);
if (err == EIO || err == ECKSUM || err == ENXIO) {
tx->tx_err = err;
}
}
}
void
dmu_tx_hold_zap(dmu_tx_t *tx, uint64_t object, int add, const char *name)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_ZAP, add, (uintptr_t)name);
if (txh != NULL)
dmu_tx_hold_zap_impl(txh, name);
}
void
dmu_tx_hold_zap_by_dnode(dmu_tx_t *tx, dnode_t *dn, int add, const char *name)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
ASSERT(dn != NULL);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_ZAP, add, (uintptr_t)name);
if (txh != NULL)
dmu_tx_hold_zap_impl(txh, name);
}
void
dmu_tx_hold_bonus(dmu_tx_t *tx, uint64_t object)
{
dmu_tx_hold_t *txh;
ASSERT(tx->tx_txg == 0);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
object, THT_BONUS, 0, 0);
if (txh)
dmu_tx_count_dnode(txh);
}
void
dmu_tx_hold_bonus_by_dnode(dmu_tx_t *tx, dnode_t *dn)
{
dmu_tx_hold_t *txh;
ASSERT0(tx->tx_txg);
txh = dmu_tx_hold_dnode_impl(tx, dn, THT_BONUS, 0, 0);
if (txh)
dmu_tx_count_dnode(txh);
}
void
dmu_tx_hold_space(dmu_tx_t *tx, uint64_t space)
{
dmu_tx_hold_t *txh;
ASSERT(tx->tx_txg == 0);
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset,
DMU_NEW_OBJECT, THT_SPACE, space, 0);
if (txh)
(void) refcount_add_many(&txh->txh_space_towrite, space, FTAG);
}
#ifdef ZFS_DEBUG
void
dmu_tx_dirty_buf(dmu_tx_t *tx, dmu_buf_impl_t *db)
{
boolean_t match_object = B_FALSE;
boolean_t match_offset = B_FALSE;
DB_DNODE_ENTER(db);
dnode_t *dn = DB_DNODE(db);
ASSERT(tx->tx_txg != 0);
ASSERT(tx->tx_objset == NULL || dn->dn_objset == tx->tx_objset);
ASSERT3U(dn->dn_object, ==, db->db.db_object);
if (tx->tx_anyobj) {
DB_DNODE_EXIT(db);
return;
}
/* XXX No checking on the meta dnode for now */
if (db->db.db_object == DMU_META_DNODE_OBJECT) {
DB_DNODE_EXIT(db);
return;
}
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
txh = list_next(&tx->tx_holds, txh)) {
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
if (txh->txh_dnode == dn && txh->txh_type != THT_NEWOBJECT)
match_object = TRUE;
if (txh->txh_dnode == NULL || txh->txh_dnode == dn) {
int datablkshift = dn->dn_datablkshift ?
dn->dn_datablkshift : SPA_MAXBLOCKSHIFT;
int epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
int shift = datablkshift + epbs * db->db_level;
uint64_t beginblk = shift >= 64 ? 0 :
(txh->txh_arg1 >> shift);
uint64_t endblk = shift >= 64 ? 0 :
((txh->txh_arg1 + txh->txh_arg2 - 1) >> shift);
uint64_t blkid = db->db_blkid;
/* XXX txh_arg2 better not be zero... */
dprintf("found txh type %x beginblk=%llx endblk=%llx\n",
txh->txh_type, beginblk, endblk);
switch (txh->txh_type) {
case THT_WRITE:
if (blkid >= beginblk && blkid <= endblk)
match_offset = TRUE;
/*
* We will let this hold work for the bonus
* or spill buffer so that we don't need to
* hold it when creating a new object.
*/
if (blkid == DMU_BONUS_BLKID ||
blkid == DMU_SPILL_BLKID)
match_offset = TRUE;
/*
* They might have to increase nlevels,
* thus dirtying the new TLIBs. Or the
* might have to change the block size,
* thus dirying the new lvl=0 blk=0.
*/
if (blkid == 0)
match_offset = TRUE;
break;
case THT_FREE:
/*
* We will dirty all the level 1 blocks in
* the free range and perhaps the first and
* last level 0 block.
*/
if (blkid >= beginblk && (blkid <= endblk ||
txh->txh_arg2 == DMU_OBJECT_END))
match_offset = TRUE;
break;
case THT_SPILL:
if (blkid == DMU_SPILL_BLKID)
match_offset = TRUE;
break;
case THT_BONUS:
if (blkid == DMU_BONUS_BLKID)
match_offset = TRUE;
break;
case THT_ZAP:
match_offset = TRUE;
break;
case THT_NEWOBJECT:
match_object = TRUE;
break;
default:
cmn_err(CE_PANIC, "bad txh_type %d",
txh->txh_type);
}
}
if (match_object && match_offset) {
DB_DNODE_EXIT(db);
return;
}
}
DB_DNODE_EXIT(db);
panic("dirtying dbuf obj=%llx lvl=%u blkid=%llx but not tx_held\n",
(u_longlong_t)db->db.db_object, db->db_level,
(u_longlong_t)db->db_blkid);
}
#endif
/*
* If we can't do 10 iops, something is wrong. Let us go ahead
* and hit zfs_dirty_data_max.
*/
hrtime_t zfs_delay_max_ns = 100 * MICROSEC; /* 100 milliseconds */
int zfs_delay_resolution_ns = 100 * 1000; /* 100 microseconds */
/*
* We delay transactions when we've determined that the backend storage
* isn't able to accommodate the rate of incoming writes.
*
* If there is already a transaction waiting, we delay relative to when
* that transaction finishes waiting. This way the calculated min_time
* is independent of the number of threads concurrently executing
* transactions.
*
* If we are the only waiter, wait relative to when the transaction
* started, rather than the current time. This credits the transaction for
* "time already served", e.g. reading indirect blocks.
*
* The minimum time for a transaction to take is calculated as:
* min_time = scale * (dirty - min) / (max - dirty)
* min_time is then capped at zfs_delay_max_ns.
*
* The delay has two degrees of freedom that can be adjusted via tunables.
* The percentage of dirty data at which we start to delay is defined by
* zfs_delay_min_dirty_percent. This should typically be at or above
* zfs_vdev_async_write_active_max_dirty_percent so that we only start to
* delay after writing at full speed has failed to keep up with the incoming
* write rate. The scale of the curve is defined by zfs_delay_scale. Roughly
* speaking, this variable determines the amount of delay at the midpoint of
* the curve.
*
* delay
* 10ms +-------------------------------------------------------------*+
* | *|
* 9ms + *+
* | *|
* 8ms + *+
* | * |
* 7ms + * +
* | * |
* 6ms + * +
* | * |
* 5ms + * +
* | * |
* 4ms + * +
* | * |
* 3ms + * +
* | * |
* 2ms + (midpoint) * +
* | | ** |
* 1ms + v *** +
* | zfs_delay_scale ----------> ******** |
* 0 +-------------------------------------*********----------------+
* 0% <- zfs_dirty_data_max -> 100%
*
* Note that since the delay is added to the outstanding time remaining on the
* most recent transaction, the delay is effectively the inverse of IOPS.
* Here the midpoint of 500us translates to 2000 IOPS. The shape of the curve
* was chosen such that small changes in the amount of accumulated dirty data
* in the first 3/4 of the curve yield relatively small differences in the
* amount of delay.
*
* The effects can be easier to understand when the amount of delay is
* represented on a log scale:
*
* delay
* 100ms +-------------------------------------------------------------++
* + +
* | |
* + *+
* 10ms + *+
* + ** +
* | (midpoint) ** |
* + | ** +
* 1ms + v **** +
* + zfs_delay_scale ----------> ***** +
* | **** |
* + **** +
* 100us + ** +
* + * +
* | * |
* + * +
* 10us + * +
* + +
* | |
* + +
* +--------------------------------------------------------------+
* 0% <- zfs_dirty_data_max -> 100%
*
* Note here that only as the amount of dirty data approaches its limit does
* the delay start to increase rapidly. The goal of a properly tuned system
* should be to keep the amount of dirty data out of that range by first
* ensuring that the appropriate limits are set for the I/O scheduler to reach
* optimal throughput on the backend storage, and then by changing the value
* of zfs_delay_scale to increase the steepness of the curve.
*/
static void
dmu_tx_delay(dmu_tx_t *tx, uint64_t dirty)
{
dsl_pool_t *dp = tx->tx_pool;
uint64_t delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
hrtime_t wakeup, min_tx_time, now;
if (dirty <= delay_min_bytes)
return;
/*
* The caller has already waited until we are under the max.
* We make them pass us the amount of dirty data so we don't
* have to handle the case of it being >= the max, which could
* cause a divide-by-zero if it's == the max.
*/
ASSERT3U(dirty, <, zfs_dirty_data_max);
now = gethrtime();
min_tx_time = zfs_delay_scale *
(dirty - delay_min_bytes) / (zfs_dirty_data_max - dirty);
min_tx_time = MIN(min_tx_time, zfs_delay_max_ns);
if (now > tx->tx_start + min_tx_time)
return;
DTRACE_PROBE3(delay__mintime, dmu_tx_t *, tx, uint64_t, dirty,
uint64_t, min_tx_time);
mutex_enter(&dp->dp_lock);
wakeup = MAX(tx->tx_start + min_tx_time,
dp->dp_last_wakeup + min_tx_time);
dp->dp_last_wakeup = wakeup;
mutex_exit(&dp->dp_lock);
zfs_sleep_until(wakeup);
}
/*
* This routine attempts to assign the transaction to a transaction group.
* To do so, we must determine if there is sufficient free space on disk.
*
* If this is a "netfree" transaction (i.e. we called dmu_tx_mark_netfree()
* on it), then it is assumed that there is sufficient free space,
* unless there's insufficient slop space in the pool (see the comment
* above spa_slop_shift in spa_misc.c).
*
* If it is not a "netfree" transaction, then if the data already on disk
* is over the allowed usage (e.g. quota), this will fail with EDQUOT or
* ENOSPC. Otherwise, if the current rough estimate of pending changes,
* plus the rough estimate of this transaction's changes, may exceed the
* allowed usage, then this will fail with ERESTART, which will cause the
* caller to wait for the pending changes to be written to disk (by waiting
* for the next TXG to open), and then check the space usage again.
*
* The rough estimate of pending changes is comprised of the sum of:
*
* - this transaction's holds' txh_space_towrite
*
* - dd_tempreserved[], which is the sum of in-flight transactions'
* holds' txh_space_towrite (i.e. those transactions that have called
* dmu_tx_assign() but not yet called dmu_tx_commit()).
*
* - dd_space_towrite[], which is the amount of dirtied dbufs.
*
* Note that all of these values are inflated by spa_get_worst_case_asize(),
* which means that we may get ERESTART well before we are actually in danger
* of running out of space, but this also mitigates any small inaccuracies
* in the rough estimate (e.g. txh_space_towrite doesn't take into account
* indirect blocks, and dd_space_towrite[] doesn't take into account changes
* to the MOS).
*
* Note that due to this algorithm, it is possible to exceed the allowed
* usage by one transaction. Also, as we approach the allowed usage,
* we will allow a very limited amount of changes into each TXG, thus
* decreasing performance.
*/
static int
dmu_tx_try_assign(dmu_tx_t *tx, uint64_t txg_how)
{
spa_t *spa = tx->tx_pool->dp_spa;
ASSERT0(tx->tx_txg);
if (tx->tx_err) {
DMU_TX_STAT_BUMP(dmu_tx_error);
return (tx->tx_err);
}
if (spa_suspended(spa)) {
DMU_TX_STAT_BUMP(dmu_tx_suspended);
/*
* If the user has indicated a blocking failure mode
* then return ERESTART which will block in dmu_tx_wait().
* Otherwise, return EIO so that an error can get
* propagated back to the VOP calls.
*
* Note that we always honor the txg_how flag regardless
* of the failuremode setting.
*/
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_CONTINUE &&
!(txg_how & TXG_WAIT))
return (SET_ERROR(EIO));
return (SET_ERROR(ERESTART));
}
if (!tx->tx_dirty_delayed &&
dsl_pool_need_dirty_delay(tx->tx_pool)) {
tx->tx_wait_dirty = B_TRUE;
DMU_TX_STAT_BUMP(dmu_tx_dirty_delay);
return (SET_ERROR(ERESTART));
}
tx->tx_txg = txg_hold_open(tx->tx_pool, &tx->tx_txgh);
tx->tx_needassign_txh = NULL;
/*
* NB: No error returns are allowed after txg_hold_open, but
* before processing the dnode holds, due to the
* dmu_tx_unassign() logic.
*/
uint64_t towrite = 0;
uint64_t tohold = 0;
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode;
if (dn != NULL) {
mutex_enter(&dn->dn_mtx);
if (dn->dn_assigned_txg == tx->tx_txg - 1) {
mutex_exit(&dn->dn_mtx);
tx->tx_needassign_txh = txh;
DMU_TX_STAT_BUMP(dmu_tx_group);
return (SET_ERROR(ERESTART));
}
if (dn->dn_assigned_txg == 0)
dn->dn_assigned_txg = tx->tx_txg;
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
(void) refcount_add(&dn->dn_tx_holds, tx);
mutex_exit(&dn->dn_mtx);
}
towrite += refcount_count(&txh->txh_space_towrite);
tohold += refcount_count(&txh->txh_memory_tohold);
}
/* needed allocation: worst-case estimate of write space */
uint64_t asize = spa_get_worst_case_asize(tx->tx_pool->dp_spa, towrite);
/* calculate memory footprint estimate */
uint64_t memory = towrite + tohold;
if (tx->tx_dir != NULL && asize != 0) {
int err = dsl_dir_tempreserve_space(tx->tx_dir, memory,
asize, tx->tx_netfree, &tx->tx_tempreserve_cookie, tx);
if (err != 0)
return (err);
}
DMU_TX_STAT_BUMP(dmu_tx_assigned);
return (0);
}
static void
dmu_tx_unassign(dmu_tx_t *tx)
{
if (tx->tx_txg == 0)
return;
txg_rele_to_quiesce(&tx->tx_txgh);
/*
* Walk the transaction's hold list, removing the hold on the
* associated dnode, and notifying waiters if the refcount drops to 0.
*/
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds);
txh && txh != tx->tx_needassign_txh;
txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode;
if (dn == NULL)
continue;
mutex_enter(&dn->dn_mtx);
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
dn->dn_assigned_txg = 0;
cv_broadcast(&dn->dn_notxholds);
}
mutex_exit(&dn->dn_mtx);
}
txg_rele_to_sync(&tx->tx_txgh);
tx->tx_lasttried_txg = tx->tx_txg;
tx->tx_txg = 0;
}
/*
* Assign tx to a transaction group; txg_how is a bitmask:
*
* If TXG_WAIT is set and the currently open txg is full, this function
* will wait until there's a new txg. This should be used when no locks
* are being held. With this bit set, this function will only fail if
* we're truly out of space (or over quota).
*
* If TXG_WAIT is *not* set and we can't assign into the currently open
* txg without blocking, this function will return immediately with
* ERESTART. This should be used whenever locks are being held. On an
* ERESTART error, the caller should drop all locks, call dmu_tx_wait(),
* and try again.
*
* If TXG_NOTHROTTLE is set, this indicates that this tx should not be
* delayed due on the ZFS Write Throttle (see comments in dsl_pool.c for
* details on the throttle). This is used by the VFS operations, after
* they have already called dmu_tx_wait() (though most likely on a
* different tx).
*/
int
dmu_tx_assign(dmu_tx_t *tx, uint64_t txg_how)
{
int err;
ASSERT(tx->tx_txg == 0);
ASSERT0(txg_how & ~(TXG_WAIT | TXG_NOTHROTTLE));
ASSERT(!dsl_pool_sync_context(tx->tx_pool));
/* If we might wait, we must not hold the config lock. */
IMPLY((txg_how & TXG_WAIT), !dsl_pool_config_held(tx->tx_pool));
if ((txg_how & TXG_NOTHROTTLE))
tx->tx_dirty_delayed = B_TRUE;
while ((err = dmu_tx_try_assign(tx, txg_how)) != 0) {
dmu_tx_unassign(tx);
if (err != ERESTART || !(txg_how & TXG_WAIT))
return (err);
dmu_tx_wait(tx);
}
txg_rele_to_quiesce(&tx->tx_txgh);
return (0);
}
void
dmu_tx_wait(dmu_tx_t *tx)
{
spa_t *spa = tx->tx_pool->dp_spa;
dsl_pool_t *dp = tx->tx_pool;
hrtime_t before;
ASSERT(tx->tx_txg == 0);
ASSERT(!dsl_pool_config_held(tx->tx_pool));
before = gethrtime();
if (tx->tx_wait_dirty) {
uint64_t dirty;
/*
* dmu_tx_try_assign() has determined that we need to wait
* because we've consumed much or all of the dirty buffer
* space.
*/
mutex_enter(&dp->dp_lock);
if (dp->dp_dirty_total >= zfs_dirty_data_max)
DMU_TX_STAT_BUMP(dmu_tx_dirty_over_max);
while (dp->dp_dirty_total >= zfs_dirty_data_max)
cv_wait(&dp->dp_spaceavail_cv, &dp->dp_lock);
dirty = dp->dp_dirty_total;
mutex_exit(&dp->dp_lock);
dmu_tx_delay(tx, dirty);
tx->tx_wait_dirty = B_FALSE;
/*
* Note: setting tx_dirty_delayed only has effect if the
* caller used TX_WAIT. Otherwise they are going to
* destroy this tx and try again. The common case,
* zfs_write(), uses TX_WAIT.
*/
tx->tx_dirty_delayed = B_TRUE;
} else if (spa_suspended(spa) || tx->tx_lasttried_txg == 0) {
/*
* If the pool is suspended we need to wait until it
* is resumed. Note that it's possible that the pool
* has become active after this thread has tried to
* obtain a tx. If that's the case then tx_lasttried_txg
* would not have been set.
*/
txg_wait_synced(dp, spa_last_synced_txg(spa) + 1);
} else if (tx->tx_needassign_txh) {
dnode_t *dn = tx->tx_needassign_txh->txh_dnode;
mutex_enter(&dn->dn_mtx);
while (dn->dn_assigned_txg == tx->tx_lasttried_txg - 1)
cv_wait(&dn->dn_notxholds, &dn->dn_mtx);
mutex_exit(&dn->dn_mtx);
tx->tx_needassign_txh = NULL;
} else {
/*
* A dnode is assigned to the quiescing txg. Wait for its
* transaction to complete.
*/
txg_wait_open(tx->tx_pool, tx->tx_lasttried_txg + 1);
}
spa_tx_assign_add_nsecs(spa, gethrtime() - before);
}
static void
dmu_tx_destroy(dmu_tx_t *tx)
{
dmu_tx_hold_t *txh;
while ((txh = list_head(&tx->tx_holds)) != NULL) {
dnode_t *dn = txh->txh_dnode;
list_remove(&tx->tx_holds, txh);
refcount_destroy_many(&txh->txh_space_towrite,
refcount_count(&txh->txh_space_towrite));
refcount_destroy_many(&txh->txh_memory_tohold,
refcount_count(&txh->txh_memory_tohold));
kmem_free(txh, sizeof (dmu_tx_hold_t));
if (dn != NULL)
dnode_rele(dn, tx);
}
list_destroy(&tx->tx_callbacks);
list_destroy(&tx->tx_holds);
kmem_free(tx, sizeof (dmu_tx_t));
}
void
dmu_tx_commit(dmu_tx_t *tx)
{
ASSERT(tx->tx_txg != 0);
/*
* Go through the transaction's hold list and remove holds on
* associated dnodes, notifying waiters if no holds remain.
*/
for (dmu_tx_hold_t *txh = list_head(&tx->tx_holds); txh != NULL;
txh = list_next(&tx->tx_holds, txh)) {
dnode_t *dn = txh->txh_dnode;
if (dn == NULL)
continue;
mutex_enter(&dn->dn_mtx);
ASSERT3U(dn->dn_assigned_txg, ==, tx->tx_txg);
if (refcount_remove(&dn->dn_tx_holds, tx) == 0) {
dn->dn_assigned_txg = 0;
cv_broadcast(&dn->dn_notxholds);
}
mutex_exit(&dn->dn_mtx);
}
if (tx->tx_tempreserve_cookie)
dsl_dir_tempreserve_clear(tx->tx_tempreserve_cookie, tx);
if (!list_is_empty(&tx->tx_callbacks))
txg_register_callbacks(&tx->tx_txgh, &tx->tx_callbacks);
if (tx->tx_anyobj == FALSE)
txg_rele_to_sync(&tx->tx_txgh);
dmu_tx_destroy(tx);
}
void
dmu_tx_abort(dmu_tx_t *tx)
{
ASSERT(tx->tx_txg == 0);
/*
* Call any registered callbacks with an error code.
*/
if (!list_is_empty(&tx->tx_callbacks))
dmu_tx_do_callbacks(&tx->tx_callbacks, ECANCELED);
dmu_tx_destroy(tx);
}
uint64_t
dmu_tx_get_txg(dmu_tx_t *tx)
{
ASSERT(tx->tx_txg != 0);
return (tx->tx_txg);
}
dsl_pool_t *
dmu_tx_pool(dmu_tx_t *tx)
{
ASSERT(tx->tx_pool != NULL);
return (tx->tx_pool);
}
void
dmu_tx_callback_register(dmu_tx_t *tx, dmu_tx_callback_func_t *func, void *data)
{
dmu_tx_callback_t *dcb;
dcb = kmem_alloc(sizeof (dmu_tx_callback_t), KM_SLEEP);
dcb->dcb_func = func;
dcb->dcb_data = data;
list_insert_tail(&tx->tx_callbacks, dcb);
}
/*
* Call all the commit callbacks on a list, with a given error code.
*/
void
dmu_tx_do_callbacks(list_t *cb_list, int error)
{
dmu_tx_callback_t *dcb;
while ((dcb = list_tail(cb_list)) != NULL) {
list_remove(cb_list, dcb);
dcb->dcb_func(dcb->dcb_data, error);
kmem_free(dcb, sizeof (dmu_tx_callback_t));
}
}
/*
* Interface to hold a bunch of attributes.
* used for creating new files.
* attrsize is the total size of all attributes
* to be added during object creation
*
* For updating/adding a single attribute dmu_tx_hold_sa() should be used.
*/
/*
* hold necessary attribute name for attribute registration.
* should be a very rare case where this is needed. If it does
* happen it would only happen on the first write to the file system.
*/
static void
dmu_tx_sa_registration_hold(sa_os_t *sa, dmu_tx_t *tx)
{
if (!sa->sa_need_attr_registration)
return;
for (int i = 0; i != sa->sa_num_attrs; i++) {
if (!sa->sa_attr_table[i].sa_registered) {
if (sa->sa_reg_attr_obj)
dmu_tx_hold_zap(tx, sa->sa_reg_attr_obj,
B_TRUE, sa->sa_attr_table[i].sa_name);
else
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT,
B_TRUE, sa->sa_attr_table[i].sa_name);
}
}
}
void
dmu_tx_hold_spill(dmu_tx_t *tx, uint64_t object)
{
dmu_tx_hold_t *txh;
txh = dmu_tx_hold_object_impl(tx, tx->tx_objset, object,
THT_SPILL, 0, 0);
if (txh != NULL)
(void) refcount_add_many(&txh->txh_space_towrite,
SPA_OLD_MAXBLOCKSIZE, FTAG);
}
void
dmu_tx_hold_sa_create(dmu_tx_t *tx, int attrsize)
{
sa_os_t *sa = tx->tx_objset->os_sa;
dmu_tx_hold_bonus(tx, DMU_NEW_OBJECT);
if (tx->tx_objset->os_sa->sa_master_obj == 0)
return;
if (tx->tx_objset->os_sa->sa_layout_attr_obj) {
dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
} else {
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
}
dmu_tx_sa_registration_hold(sa, tx);
if (attrsize <= DN_OLD_MAX_BONUSLEN && !sa->sa_force_spill)
return;
(void) dmu_tx_hold_object_impl(tx, tx->tx_objset, DMU_NEW_OBJECT,
THT_SPILL, 0, 0);
}
/*
* Hold SA attribute
*
* dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *, attribute, add, size)
*
* variable_size is the total size of all variable sized attributes
* passed to this function. It is not the total size of all
* variable size attributes that *may* exist on this object.
*/
void
dmu_tx_hold_sa(dmu_tx_t *tx, sa_handle_t *hdl, boolean_t may_grow)
{
uint64_t object;
sa_os_t *sa = tx->tx_objset->os_sa;
ASSERT(hdl != NULL);
object = sa_handle_object(hdl);
dmu_tx_hold_bonus(tx, object);
if (tx->tx_objset->os_sa->sa_master_obj == 0)
return;
if (tx->tx_objset->os_sa->sa_reg_attr_obj == 0 ||
tx->tx_objset->os_sa->sa_layout_attr_obj == 0) {
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_LAYOUTS);
dmu_tx_hold_zap(tx, sa->sa_master_obj, B_TRUE, SA_REGISTRY);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, B_TRUE, NULL);
}
dmu_tx_sa_registration_hold(sa, tx);
if (may_grow && tx->tx_objset->os_sa->sa_layout_attr_obj)
dmu_tx_hold_zap(tx, sa->sa_layout_attr_obj, B_TRUE, NULL);
if (sa->sa_force_spill || may_grow || hdl->sa_spill) {
ASSERT(tx->tx_txg == 0);
dmu_tx_hold_spill(tx, object);
} else {
dmu_buf_impl_t *db = (dmu_buf_impl_t *)hdl->sa_bonus;
dnode_t *dn;
DB_DNODE_ENTER(db);
dn = DB_DNODE(db);
if (dn->dn_have_spill) {
ASSERT(tx->tx_txg == 0);
dmu_tx_hold_spill(tx, object);
}
DB_DNODE_EXIT(db);
}
}
void
dmu_tx_init(void)
{
dmu_tx_ksp = kstat_create("zfs", 0, "dmu_tx", "misc",
KSTAT_TYPE_NAMED, sizeof (dmu_tx_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dmu_tx_ksp != NULL) {
dmu_tx_ksp->ks_data = &dmu_tx_stats;
kstat_install(dmu_tx_ksp);
}
}
void
dmu_tx_fini(void)
{
if (dmu_tx_ksp != NULL) {
kstat_delete(dmu_tx_ksp);
dmu_tx_ksp = NULL;
}
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(dmu_tx_create);
EXPORT_SYMBOL(dmu_tx_hold_write);
EXPORT_SYMBOL(dmu_tx_hold_write_by_dnode);
EXPORT_SYMBOL(dmu_tx_hold_free);
EXPORT_SYMBOL(dmu_tx_hold_free_by_dnode);
EXPORT_SYMBOL(dmu_tx_hold_zap);
EXPORT_SYMBOL(dmu_tx_hold_zap_by_dnode);
EXPORT_SYMBOL(dmu_tx_hold_bonus);
EXPORT_SYMBOL(dmu_tx_hold_bonus_by_dnode);
EXPORT_SYMBOL(dmu_tx_abort);
EXPORT_SYMBOL(dmu_tx_assign);
EXPORT_SYMBOL(dmu_tx_wait);
EXPORT_SYMBOL(dmu_tx_commit);
EXPORT_SYMBOL(dmu_tx_mark_netfree);
EXPORT_SYMBOL(dmu_tx_get_txg);
EXPORT_SYMBOL(dmu_tx_callback_register);
EXPORT_SYMBOL(dmu_tx_do_callbacks);
EXPORT_SYMBOL(dmu_tx_hold_spill);
EXPORT_SYMBOL(dmu_tx_hold_sa_create);
EXPORT_SYMBOL(dmu_tx_hold_sa);
#endif
diff --git a/module/zfs/dmu_zfetch.c b/module/zfs/dmu_zfetch.c
index a2bf80993f9e..e22560ed1d43 100644
--- a/module/zfs/dmu_zfetch.c
+++ b/module/zfs/dmu_zfetch.c
@@ -1,368 +1,378 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dnode.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_zfetch.h>
#include <sys/dmu.h>
#include <sys/dbuf.h>
#include <sys/kstat.h>
/*
* This tunable disables predictive prefetch. Note that it leaves "prescient"
* prefetch (e.g. prefetch for zfs send) intact. Unlike predictive prefetch,
* prescient prefetch never issues i/os that end up not being needed,
* so it can't hurt performance.
*/
int zfs_prefetch_disable = B_FALSE;
/* max # of streams per zfetch */
unsigned int zfetch_max_streams = 8;
/* min time before stream reclaim */
unsigned int zfetch_min_sec_reap = 2;
/* max bytes to prefetch per stream (default 8MB) */
unsigned int zfetch_max_distance = 8 * 1024 * 1024;
/* max bytes to prefetch indirects for per stream (default 64MB) */
unsigned int zfetch_max_idistance = 64 * 1024 * 1024;
/* max number of bytes in an array_read in which we allow prefetching (1MB) */
unsigned long zfetch_array_rd_sz = 1024 * 1024;
typedef struct zfetch_stats {
kstat_named_t zfetchstat_hits;
kstat_named_t zfetchstat_misses;
kstat_named_t zfetchstat_max_streams;
} zfetch_stats_t;
static zfetch_stats_t zfetch_stats = {
{ "hits", KSTAT_DATA_UINT64 },
{ "misses", KSTAT_DATA_UINT64 },
{ "max_streams", KSTAT_DATA_UINT64 },
};
#define ZFETCHSTAT_BUMP(stat) \
atomic_inc_64(&zfetch_stats.stat.value.ui64);
kstat_t *zfetch_ksp;
void
zfetch_init(void)
{
zfetch_ksp = kstat_create("zfs", 0, "zfetchstats", "misc",
KSTAT_TYPE_NAMED, sizeof (zfetch_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zfetch_ksp != NULL) {
zfetch_ksp->ks_data = &zfetch_stats;
kstat_install(zfetch_ksp);
}
}
void
zfetch_fini(void)
{
if (zfetch_ksp != NULL) {
kstat_delete(zfetch_ksp);
zfetch_ksp = NULL;
}
}
/*
* This takes a pointer to a zfetch structure and a dnode. It performs the
* necessary setup for the zfetch structure, grokking data from the
* associated dnode.
*/
void
dmu_zfetch_init(zfetch_t *zf, dnode_t *dno)
{
if (zf == NULL)
return;
zf->zf_dnode = dno;
list_create(&zf->zf_stream, sizeof (zstream_t),
offsetof(zstream_t, zs_node));
rw_init(&zf->zf_rwlock, NULL, RW_DEFAULT, NULL);
}
static void
dmu_zfetch_stream_remove(zfetch_t *zf, zstream_t *zs)
{
ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
list_remove(&zf->zf_stream, zs);
mutex_destroy(&zs->zs_lock);
kmem_free(zs, sizeof (*zs));
}
/*
* Clean-up state associated with a zfetch structure (e.g. destroy the
* streams). This doesn't free the zfetch_t itself, that's left to the caller.
*/
void
dmu_zfetch_fini(zfetch_t *zf)
{
zstream_t *zs;
ASSERT(!RW_LOCK_HELD(&zf->zf_rwlock));
rw_enter(&zf->zf_rwlock, RW_WRITER);
while ((zs = list_head(&zf->zf_stream)) != NULL)
dmu_zfetch_stream_remove(zf, zs);
rw_exit(&zf->zf_rwlock);
list_destroy(&zf->zf_stream);
rw_destroy(&zf->zf_rwlock);
zf->zf_dnode = NULL;
}
/*
* If there aren't too many streams already, create a new stream.
* The "blkid" argument is the next block that we expect this stream to access.
* While we're here, clean up old streams (which haven't been
* accessed for at least zfetch_min_sec_reap seconds).
*/
static void
dmu_zfetch_stream_create(zfetch_t *zf, uint64_t blkid)
{
zstream_t *zs_next;
int numstreams = 0;
ASSERT(RW_WRITE_HELD(&zf->zf_rwlock));
/*
* Clean up old streams.
*/
for (zstream_t *zs = list_head(&zf->zf_stream);
zs != NULL; zs = zs_next) {
zs_next = list_next(&zf->zf_stream, zs);
if (((gethrtime() - zs->zs_atime) / NANOSEC) >
zfetch_min_sec_reap)
dmu_zfetch_stream_remove(zf, zs);
else
numstreams++;
}
/*
* The maximum number of streams is normally zfetch_max_streams,
* but for small files we lower it such that it's at least possible
* for all the streams to be non-overlapping.
*
* If we are already at the maximum number of streams for this file,
* even after removing old streams, then don't create this stream.
*/
uint32_t max_streams = MAX(1, MIN(zfetch_max_streams,
zf->zf_dnode->dn_maxblkid * zf->zf_dnode->dn_datablksz /
zfetch_max_distance));
if (numstreams >= max_streams) {
ZFETCHSTAT_BUMP(zfetchstat_max_streams);
return;
}
zstream_t *zs = kmem_zalloc(sizeof (*zs), KM_SLEEP);
zs->zs_blkid = blkid;
zs->zs_pf_blkid = blkid;
zs->zs_ipf_blkid = blkid;
zs->zs_atime = gethrtime();
mutex_init(&zs->zs_lock, NULL, MUTEX_DEFAULT, NULL);
list_insert_head(&zf->zf_stream, zs);
}
/*
* This is the predictive prefetch entry point. It associates dnode access
* specified with blkid and nblks arguments with prefetch stream, predicts
* further accesses based on that stats and initiates speculative prefetch.
* fetch_data argument specifies whether actual data blocks should be fetched:
* FALSE -- prefetch only indirect blocks for predicted data blocks;
* TRUE -- prefetch predicted data blocks plus following indirect blocks.
*/
void
dmu_zfetch(zfetch_t *zf, uint64_t blkid, uint64_t nblks, boolean_t fetch_data)
{
zstream_t *zs;
int64_t pf_start, ipf_start, ipf_istart, ipf_iend;
int64_t pf_ahead_blks, max_blks;
int epbs, max_dist_blks, pf_nblks, ipf_nblks;
uint64_t end_of_access_blkid;
end_of_access_blkid = blkid + nblks;
+ spa_t *spa = zf->zf_dnode->dn_objset->os_spa;
if (zfs_prefetch_disable)
return;
+ /*
+ * If we haven't yet loaded the indirect vdevs' mappings, we
+ * can only read from blocks that we carefully ensure are on
+ * concrete vdevs (or previously-loaded indirect vdevs). So we
+ * can't allow the predictive prefetcher to attempt reads of other
+ * blocks (e.g. of the MOS's dnode obejct).
+ */
+ if (!spa_indirect_vdevs_loaded(spa))
+ return;
/*
* As a fast path for small (single-block) files, ignore access
* to the first block.
*/
if (blkid == 0)
return;
rw_enter(&zf->zf_rwlock, RW_READER);
/*
* Find matching prefetch stream. Depending on whether the accesses
* are block-aligned, first block of the new access may either follow
* the last block of the previous access, or be equal to it.
*/
for (zs = list_head(&zf->zf_stream); zs != NULL;
zs = list_next(&zf->zf_stream, zs)) {
if (blkid == zs->zs_blkid || blkid + 1 == zs->zs_blkid) {
mutex_enter(&zs->zs_lock);
/*
* zs_blkid could have changed before we
* acquired zs_lock; re-check them here.
*/
if (blkid == zs->zs_blkid) {
break;
} else if (blkid + 1 == zs->zs_blkid) {
blkid++;
nblks--;
if (nblks == 0) {
/* Already prefetched this before. */
mutex_exit(&zs->zs_lock);
rw_exit(&zf->zf_rwlock);
return;
}
break;
}
mutex_exit(&zs->zs_lock);
}
}
if (zs == NULL) {
/*
* This access is not part of any existing stream. Create
* a new stream for it.
*/
ZFETCHSTAT_BUMP(zfetchstat_misses);
if (rw_tryupgrade(&zf->zf_rwlock))
dmu_zfetch_stream_create(zf, end_of_access_blkid);
rw_exit(&zf->zf_rwlock);
return;
}
/*
* This access was to a block that we issued a prefetch for on
* behalf of this stream. Issue further prefetches for this stream.
*
* Normally, we start prefetching where we stopped
* prefetching last (zs_pf_blkid). But when we get our first
* hit on this stream, zs_pf_blkid == zs_blkid, we don't
* want to prefetch the block we just accessed. In this case,
* start just after the block we just accessed.
*/
pf_start = MAX(zs->zs_pf_blkid, end_of_access_blkid);
/*
* Double our amount of prefetched data, but don't let the
* prefetch get further ahead than zfetch_max_distance.
*/
if (fetch_data) {
max_dist_blks =
zfetch_max_distance >> zf->zf_dnode->dn_datablkshift;
/*
* Previously, we were (zs_pf_blkid - blkid) ahead. We
* want to now be double that, so read that amount again,
* plus the amount we are catching up by (i.e. the amount
* read just now).
*/
pf_ahead_blks = zs->zs_pf_blkid - blkid + nblks;
max_blks = max_dist_blks - (pf_start - end_of_access_blkid);
pf_nblks = MIN(pf_ahead_blks, max_blks);
} else {
pf_nblks = 0;
}
zs->zs_pf_blkid = pf_start + pf_nblks;
/*
* Do the same for indirects, starting from where we stopped last,
* or where we will stop reading data blocks (and the indirects
* that point to them).
*/
ipf_start = MAX(zs->zs_ipf_blkid, zs->zs_pf_blkid);
max_dist_blks = zfetch_max_idistance >> zf->zf_dnode->dn_datablkshift;
/*
* We want to double our distance ahead of the data prefetch
* (or reader, if we are not prefetching data). Previously, we
* were (zs_ipf_blkid - blkid) ahead. To double that, we read
* that amount again, plus the amount we are catching up by
* (i.e. the amount read now + the amount of data prefetched now).
*/
pf_ahead_blks = zs->zs_ipf_blkid - blkid + nblks + pf_nblks;
max_blks = max_dist_blks - (ipf_start - end_of_access_blkid);
ipf_nblks = MIN(pf_ahead_blks, max_blks);
zs->zs_ipf_blkid = ipf_start + ipf_nblks;
epbs = zf->zf_dnode->dn_indblkshift - SPA_BLKPTRSHIFT;
ipf_istart = P2ROUNDUP(ipf_start, 1 << epbs) >> epbs;
ipf_iend = P2ROUNDUP(zs->zs_ipf_blkid, 1 << epbs) >> epbs;
zs->zs_atime = gethrtime();
zs->zs_blkid = end_of_access_blkid;
mutex_exit(&zs->zs_lock);
rw_exit(&zf->zf_rwlock);
/*
* dbuf_prefetch() is asynchronous (even when it needs to read
* indirect blocks), but we still prefer to drop our locks before
* calling it to reduce the time we hold them.
*/
for (int i = 0; i < pf_nblks; i++) {
dbuf_prefetch(zf->zf_dnode, 0, pf_start + i,
ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH);
}
for (int64_t iblk = ipf_istart; iblk < ipf_iend; iblk++) {
dbuf_prefetch(zf->zf_dnode, 1, iblk,
ZIO_PRIORITY_ASYNC_READ, ARC_FLAG_PREDICTIVE_PREFETCH);
}
ZFETCHSTAT_BUMP(zfetchstat_hits);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* BEGIN CSTYLED */
module_param(zfs_prefetch_disable, int, 0644);
MODULE_PARM_DESC(zfs_prefetch_disable, "Disable all ZFS prefetching");
module_param(zfetch_max_streams, uint, 0644);
MODULE_PARM_DESC(zfetch_max_streams, "Max number of streams per zfetch");
module_param(zfetch_min_sec_reap, uint, 0644);
MODULE_PARM_DESC(zfetch_min_sec_reap, "Min time before stream reclaim");
module_param(zfetch_max_distance, uint, 0644);
MODULE_PARM_DESC(zfetch_max_distance,
"Max bytes to prefetch per stream (default 8MB)");
module_param(zfetch_array_rd_sz, ulong, 0644);
MODULE_PARM_DESC(zfetch_array_rd_sz, "Number of bytes in a array_read");
/* END CSTYLED */
#endif
diff --git a/module/zfs/dnode.c b/module/zfs/dnode.c
index a379527a0662..80d5f33d98be 100644
--- a/module/zfs/dnode.c
+++ b/module/zfs/dnode.c
@@ -1,2396 +1,2395 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dbuf.h>
#include <sys/dnode.h>
#include <sys/dmu.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/spa.h>
#include <sys/zio.h>
#include <sys/dmu_zfetch.h>
#include <sys/range_tree.h>
#include <sys/trace_dnode.h>
#include <sys/zfs_project.h>
dnode_stats_t dnode_stats = {
{ "dnode_hold_dbuf_hold", KSTAT_DATA_UINT64 },
{ "dnode_hold_dbuf_read", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_interior", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_alloc_type_none", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_hits", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_misses", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_overflow", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_refcount", KSTAT_DATA_UINT64 },
{ "dnode_hold_free_txg", KSTAT_DATA_UINT64 },
{ "dnode_free_interior_lock_retry", KSTAT_DATA_UINT64 },
{ "dnode_allocate", KSTAT_DATA_UINT64 },
{ "dnode_reallocate", KSTAT_DATA_UINT64 },
{ "dnode_buf_evict", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_chunk", KSTAT_DATA_UINT64 },
{ "dnode_alloc_race", KSTAT_DATA_UINT64 },
{ "dnode_alloc_next_block", KSTAT_DATA_UINT64 },
{ "dnode_move_invalid", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck1", KSTAT_DATA_UINT64 },
{ "dnode_move_recheck2", KSTAT_DATA_UINT64 },
{ "dnode_move_special", KSTAT_DATA_UINT64 },
{ "dnode_move_handle", KSTAT_DATA_UINT64 },
{ "dnode_move_rwlock", KSTAT_DATA_UINT64 },
{ "dnode_move_active", KSTAT_DATA_UINT64 },
};
static kstat_t *dnode_ksp;
static kmem_cache_t *dnode_cache;
ASSERTV(static dnode_phys_t dnode_phys_zero);
int zfs_default_bs = SPA_MINBLOCKSHIFT;
int zfs_default_ibs = DN_MAX_INDBLKSHIFT;
#ifdef _KERNEL
static kmem_cbrc_t dnode_move(void *, void *, size_t, void *);
#endif /* _KERNEL */
static int
dbuf_compare(const void *x1, const void *x2)
{
const dmu_buf_impl_t *d1 = x1;
const dmu_buf_impl_t *d2 = x2;
int cmp = AVL_CMP(d1->db_level, d2->db_level);
if (likely(cmp))
return (cmp);
cmp = AVL_CMP(d1->db_blkid, d2->db_blkid);
if (likely(cmp))
return (cmp);
if (d1->db_state == DB_SEARCH) {
ASSERT3S(d2->db_state, !=, DB_SEARCH);
return (-1);
} else if (d2->db_state == DB_SEARCH) {
ASSERT3S(d1->db_state, !=, DB_SEARCH);
return (1);
}
return (AVL_PCMP(d1, d2));
}
/* ARGSUSED */
static int
dnode_cons(void *arg, void *unused, int kmflag)
{
dnode_t *dn = arg;
int i;
rw_init(&dn->dn_struct_rwlock, NULL, RW_NOLOCKDEP, NULL);
mutex_init(&dn->dn_mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&dn->dn_dbufs_mtx, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dn->dn_notxholds, NULL, CV_DEFAULT, NULL);
/*
* Every dbuf has a reference, and dropping a tracked reference is
* O(number of references), so don't track dn_holds.
*/
refcount_create_untracked(&dn->dn_holds);
refcount_create(&dn->dn_tx_holds);
list_link_init(&dn->dn_link);
bzero(&dn->dn_next_nblkptr[0], sizeof (dn->dn_next_nblkptr));
bzero(&dn->dn_next_nlevels[0], sizeof (dn->dn_next_nlevels));
bzero(&dn->dn_next_indblkshift[0], sizeof (dn->dn_next_indblkshift));
bzero(&dn->dn_next_bonustype[0], sizeof (dn->dn_next_bonustype));
bzero(&dn->dn_rm_spillblk[0], sizeof (dn->dn_rm_spillblk));
bzero(&dn->dn_next_bonuslen[0], sizeof (dn->dn_next_bonuslen));
bzero(&dn->dn_next_blksz[0], sizeof (dn->dn_next_blksz));
bzero(&dn->dn_next_maxblkid[0], sizeof (dn->dn_next_maxblkid));
for (i = 0; i < TXG_SIZE; i++) {
multilist_link_init(&dn->dn_dirty_link[i]);
dn->dn_free_ranges[i] = NULL;
list_create(&dn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
}
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
dn->dn_dirtyctx_firstset = NULL;
dn->dn_bonus = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_zio = NULL;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dn->dn_dbufs_count = 0;
avl_create(&dn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
dn->dn_moved = 0;
return (0);
}
/* ARGSUSED */
static void
dnode_dest(void *arg, void *unused)
{
int i;
dnode_t *dn = arg;
rw_destroy(&dn->dn_struct_rwlock);
mutex_destroy(&dn->dn_mtx);
mutex_destroy(&dn->dn_dbufs_mtx);
cv_destroy(&dn->dn_notxholds);
refcount_destroy(&dn->dn_holds);
refcount_destroy(&dn->dn_tx_holds);
ASSERT(!list_link_active(&dn->dn_link));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
list_destroy(&dn->dn_dirty_records[i]);
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
}
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_free_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT0(dn->dn_dirtyctx);
ASSERT3P(dn->dn_dirtyctx_firstset, ==, NULL);
ASSERT3P(dn->dn_bonus, ==, NULL);
ASSERT(!dn->dn_have_spill);
ASSERT3P(dn->dn_zio, ==, NULL);
ASSERT0(dn->dn_oldused);
ASSERT0(dn->dn_oldflags);
ASSERT0(dn->dn_olduid);
ASSERT0(dn->dn_oldgid);
ASSERT0(dn->dn_oldprojid);
ASSERT0(dn->dn_newuid);
ASSERT0(dn->dn_newgid);
ASSERT0(dn->dn_newprojid);
ASSERT0(dn->dn_id_flags);
ASSERT0(dn->dn_dbufs_count);
avl_destroy(&dn->dn_dbufs);
}
void
dnode_init(void)
{
ASSERT(dnode_cache == NULL);
dnode_cache = kmem_cache_create("dnode_t", sizeof (dnode_t),
0, dnode_cons, dnode_dest, NULL, NULL, NULL, 0);
kmem_cache_set_move(dnode_cache, dnode_move);
dnode_ksp = kstat_create("zfs", 0, "dnodestats", "misc",
KSTAT_TYPE_NAMED, sizeof (dnode_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (dnode_ksp != NULL) {
dnode_ksp->ks_data = &dnode_stats;
kstat_install(dnode_ksp);
}
}
void
dnode_fini(void)
{
if (dnode_ksp != NULL) {
kstat_delete(dnode_ksp);
dnode_ksp = NULL;
}
kmem_cache_destroy(dnode_cache);
dnode_cache = NULL;
}
#ifdef ZFS_DEBUG
void
dnode_verify(dnode_t *dn)
{
int drop_struct_lock = FALSE;
ASSERT(dn->dn_phys);
ASSERT(dn->dn_objset);
ASSERT(dn->dn_handle->dnh_dnode == dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
if (!(zfs_flags & ZFS_DEBUG_DNODE_VERIFY))
return;
if (!RW_WRITE_HELD(&dn->dn_struct_rwlock)) {
rw_enter(&dn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
if (dn->dn_phys->dn_type != DMU_OT_NONE || dn->dn_allocated_txg != 0) {
int i;
int max_bonuslen = DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots);
ASSERT3U(dn->dn_indblkshift, <=, SPA_MAXBLOCKSHIFT);
if (dn->dn_datablkshift) {
ASSERT3U(dn->dn_datablkshift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(dn->dn_datablkshift, <=, SPA_MAXBLOCKSHIFT);
ASSERT3U(1<<dn->dn_datablkshift, ==, dn->dn_datablksz);
}
ASSERT3U(dn->dn_nlevels, <=, 30);
ASSERT(DMU_OT_IS_VALID(dn->dn_type));
ASSERT3U(dn->dn_nblkptr, >=, 1);
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
ASSERT3U(dn->dn_bonuslen, <=, max_bonuslen);
ASSERT3U(dn->dn_datablksz, ==,
dn->dn_datablkszsec << SPA_MINBLOCKSHIFT);
ASSERT3U(ISP2(dn->dn_datablksz), ==, dn->dn_datablkshift != 0);
ASSERT3U((dn->dn_nblkptr - 1) * sizeof (blkptr_t) +
dn->dn_bonuslen, <=, max_bonuslen);
for (i = 0; i < TXG_SIZE; i++) {
ASSERT3U(dn->dn_next_nlevels[i], <=, dn->dn_nlevels);
}
}
if (dn->dn_phys->dn_type != DMU_OT_NONE)
ASSERT3U(dn->dn_phys->dn_nlevels, <=, dn->dn_nlevels);
ASSERT(DMU_OBJECT_IS_SPECIAL(dn->dn_object) || dn->dn_dbuf != NULL);
if (dn->dn_dbuf != NULL) {
ASSERT3P(dn->dn_phys, ==,
(dnode_phys_t *)dn->dn_dbuf->db.db_data +
(dn->dn_object % (dn->dn_dbuf->db.db_size >> DNODE_SHIFT)));
}
if (drop_struct_lock)
rw_exit(&dn->dn_struct_rwlock);
}
#endif
void
dnode_byteswap(dnode_phys_t *dnp)
{
uint64_t *buf64 = (void*)&dnp->dn_blkptr;
int i;
if (dnp->dn_type == DMU_OT_NONE) {
bzero(dnp, sizeof (dnode_phys_t));
return;
}
dnp->dn_datablkszsec = BSWAP_16(dnp->dn_datablkszsec);
dnp->dn_bonuslen = BSWAP_16(dnp->dn_bonuslen);
dnp->dn_extra_slots = BSWAP_8(dnp->dn_extra_slots);
dnp->dn_maxblkid = BSWAP_64(dnp->dn_maxblkid);
dnp->dn_used = BSWAP_64(dnp->dn_used);
/*
* dn_nblkptr is only one byte, so it's OK to read it in either
* byte order. We can't read dn_bouslen.
*/
ASSERT(dnp->dn_indblkshift <= SPA_MAXBLOCKSHIFT);
ASSERT(dnp->dn_nblkptr <= DN_MAX_NBLKPTR);
for (i = 0; i < dnp->dn_nblkptr * sizeof (blkptr_t)/8; i++)
buf64[i] = BSWAP_64(buf64[i]);
/*
* OK to check dn_bonuslen for zero, because it won't matter if
* we have the wrong byte order. This is necessary because the
* dnode dnode is smaller than a regular dnode.
*/
if (dnp->dn_bonuslen != 0) {
/*
* Note that the bonus length calculated here may be
* longer than the actual bonus buffer. This is because
* we always put the bonus buffer after the last block
* pointer (instead of packing it against the end of the
* dnode buffer).
*/
int off = (dnp->dn_nblkptr-1) * sizeof (blkptr_t);
int slots = dnp->dn_extra_slots + 1;
size_t len = DN_SLOTS_TO_BONUSLEN(slots) - off;
dmu_object_byteswap_t byteswap;
ASSERT(DMU_OT_IS_VALID(dnp->dn_bonustype));
byteswap = DMU_OT_BYTESWAP(dnp->dn_bonustype);
dmu_ot_byteswap[byteswap].ob_func(dnp->dn_bonus + off, len);
}
/* Swap SPILL block if we have one */
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR)
byteswap_uint64_array(DN_SPILL_BLKPTR(dnp), sizeof (blkptr_t));
}
void
dnode_buf_byteswap(void *vbuf, size_t size)
{
int i = 0;
ASSERT3U(sizeof (dnode_phys_t), ==, (1<<DNODE_SHIFT));
ASSERT((size & (sizeof (dnode_phys_t)-1)) == 0);
while (i < size) {
dnode_phys_t *dnp = (void *)(((char *)vbuf) + i);
dnode_byteswap(dnp);
i += DNODE_MIN_SIZE;
if (dnp->dn_type != DMU_OT_NONE)
i += dnp->dn_extra_slots * DNODE_MIN_SIZE;
}
}
void
dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx)
{
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
ASSERT3U(newsize, <=, DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t));
dn->dn_bonuslen = newsize;
if (newsize == 0)
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = DN_ZERO_BONUSLEN;
else
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_setbonus_type(dnode_t *dn, dmu_object_type_t newtype, dmu_tx_t *tx)
{
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
dnode_setdirty(dn, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dn->dn_bonustype = newtype;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
rw_exit(&dn->dn_struct_rwlock);
}
void
dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx)
{
ASSERT3U(refcount_count(&dn->dn_holds), >=, 1);
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
dnode_setdirty(dn, tx);
dn->dn_rm_spillblk[tx->tx_txg&TXG_MASK] = DN_KILL_SPILLBLK;
dn->dn_have_spill = B_FALSE;
}
static void
dnode_setdblksz(dnode_t *dn, int size)
{
ASSERT0(P2PHASE(size, SPA_MINBLOCKSIZE));
ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
ASSERT3U(size, >=, SPA_MINBLOCKSIZE);
ASSERT3U(size >> SPA_MINBLOCKSHIFT, <,
1<<(sizeof (dn->dn_phys->dn_datablkszsec) * 8));
dn->dn_datablksz = size;
dn->dn_datablkszsec = size >> SPA_MINBLOCKSHIFT;
dn->dn_datablkshift = ISP2(size) ? highbit64(size - 1) : 0;
}
static dnode_t *
dnode_create(objset_t *os, dnode_phys_t *dnp, dmu_buf_impl_t *db,
uint64_t object, dnode_handle_t *dnh)
{
dnode_t *dn;
dn = kmem_cache_alloc(dnode_cache, KM_SLEEP);
ASSERT(!POINTER_IS_VALID(dn->dn_objset));
dn->dn_moved = 0;
/*
* Defer setting dn_objset until the dnode is ready to be a candidate
* for the dnode_move() callback.
*/
dn->dn_object = object;
dn->dn_dbuf = db;
dn->dn_handle = dnh;
dn->dn_phys = dnp;
if (dnp->dn_datablkszsec) {
dnode_setdblksz(dn, dnp->dn_datablkszsec << SPA_MINBLOCKSHIFT);
} else {
dn->dn_datablksz = 0;
dn->dn_datablkszsec = 0;
dn->dn_datablkshift = 0;
}
dn->dn_indblkshift = dnp->dn_indblkshift;
dn->dn_nlevels = dnp->dn_nlevels;
dn->dn_type = dnp->dn_type;
dn->dn_nblkptr = dnp->dn_nblkptr;
dn->dn_checksum = dnp->dn_checksum;
dn->dn_compress = dnp->dn_compress;
dn->dn_bonustype = dnp->dn_bonustype;
dn->dn_bonuslen = dnp->dn_bonuslen;
dn->dn_num_slots = dnp->dn_extra_slots + 1;
dn->dn_maxblkid = dnp->dn_maxblkid;
dn->dn_have_spill = ((dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) != 0);
dn->dn_id_flags = 0;
dmu_zfetch_init(&dn->dn_zfetch, dn);
ASSERT(DMU_OT_IS_VALID(dn->dn_phys->dn_type));
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
ASSERT(!DN_SLOT_IS_PTR(dnh->dnh_dnode));
mutex_enter(&os->os_lock);
/*
* Exclude special dnodes from os_dnodes so an empty os_dnodes
* signifies that the special dnodes have no references from
* their children (the entries in os_dnodes). This allows
* dnode_destroy() to easily determine if the last child has
* been removed and then complete eviction of the objset.
*/
if (!DMU_OBJECT_IS_SPECIAL(object))
list_insert_head(&os->os_dnodes, dn);
membar_producer();
/*
* Everything else must be valid before assigning dn_objset
* makes the dnode eligible for dnode_move().
*/
dn->dn_objset = os;
dnh->dnh_dnode = dn;
mutex_exit(&os->os_lock);
arc_space_consume(sizeof (dnode_t), ARC_SPACE_DNODE);
return (dn);
}
/*
* Caller must be holding the dnode handle, which is released upon return.
*/
static void
dnode_destroy(dnode_t *dn)
{
objset_t *os = dn->dn_objset;
boolean_t complete_os_eviction = B_FALSE;
ASSERT((dn->dn_id_flags & DN_ID_NEW_EXIST) == 0);
mutex_enter(&os->os_lock);
POINTER_INVALIDATE(&dn->dn_objset);
if (!DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
list_remove(&os->os_dnodes, dn);
complete_os_eviction =
list_is_empty(&os->os_dnodes) &&
list_link_active(&os->os_evicting_node);
}
mutex_exit(&os->os_lock);
/* the dnode can no longer move, so we can release the handle */
if (!zrl_is_locked(&dn->dn_handle->dnh_zrlock))
zrl_remove(&dn->dn_handle->dnh_zrlock);
dn->dn_allocated_txg = 0;
dn->dn_free_txg = 0;
dn->dn_assigned_txg = 0;
dn->dn_dirty_txg = 0;
dn->dn_dirtyctx = 0;
if (dn->dn_dirtyctx_firstset != NULL) {
kmem_free(dn->dn_dirtyctx_firstset, 1);
dn->dn_dirtyctx_firstset = NULL;
}
if (dn->dn_bonus != NULL) {
mutex_enter(&dn->dn_bonus->db_mtx);
dbuf_destroy(dn->dn_bonus);
dn->dn_bonus = NULL;
}
dn->dn_zio = NULL;
dn->dn_have_spill = B_FALSE;
dn->dn_oldused = 0;
dn->dn_oldflags = 0;
dn->dn_olduid = 0;
dn->dn_oldgid = 0;
dn->dn_oldprojid = ZFS_DEFAULT_PROJID;
dn->dn_newuid = 0;
dn->dn_newgid = 0;
dn->dn_newprojid = ZFS_DEFAULT_PROJID;
dn->dn_id_flags = 0;
dmu_zfetch_fini(&dn->dn_zfetch);
kmem_cache_free(dnode_cache, dn);
arc_space_return(sizeof (dnode_t), ARC_SPACE_DNODE);
if (complete_os_eviction)
dmu_objset_evict_done(os);
}
void
dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
{
int i;
ASSERT3U(dn_slots, >, 0);
ASSERT3U(dn_slots << DNODE_SHIFT, <=,
spa_maxdnodesize(dmu_objset_spa(dn->dn_objset)));
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (blocksize == 0)
blocksize = 1 << zfs_default_bs;
else
blocksize = P2ROUNDUP(blocksize, SPA_MINBLOCKSIZE);
if (ibs == 0)
ibs = zfs_default_ibs;
ibs = MIN(MAX(ibs, DN_MIN_INDBLKSHIFT), DN_MAX_INDBLKSHIFT);
dprintf("os=%p obj=%llu txg=%llu blocksize=%d ibs=%d dn_slots=%d\n",
dn->dn_objset, dn->dn_object, tx->tx_txg, blocksize, ibs, dn_slots);
DNODE_STAT_BUMP(dnode_allocate);
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT(bcmp(dn->dn_phys, &dnode_phys_zero, sizeof (dnode_phys_t)) == 0);
ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE);
ASSERT(ot != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(ot));
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=, DN_SLOTS_TO_BONUSLEN(dn_slots));
ASSERT(dn->dn_type == DMU_OT_NONE);
ASSERT0(dn->dn_maxblkid);
ASSERT0(dn->dn_allocated_txg);
ASSERT0(dn->dn_assigned_txg);
ASSERT0(dn->dn_dirty_txg);
ASSERT(refcount_is_zero(&dn->dn_tx_holds));
ASSERT3U(refcount_count(&dn->dn_holds), <=, 1);
ASSERT(avl_is_empty(&dn->dn_dbufs));
for (i = 0; i < TXG_SIZE; i++) {
ASSERT0(dn->dn_next_nblkptr[i]);
ASSERT0(dn->dn_next_nlevels[i]);
ASSERT0(dn->dn_next_indblkshift[i]);
ASSERT0(dn->dn_next_bonuslen[i]);
ASSERT0(dn->dn_next_bonustype[i]);
ASSERT0(dn->dn_rm_spillblk[i]);
ASSERT0(dn->dn_next_blksz[i]);
ASSERT0(dn->dn_next_maxblkid[i]);
ASSERT(!multilist_link_active(&dn->dn_dirty_link[i]));
ASSERT3P(list_head(&dn->dn_dirty_records[i]), ==, NULL);
ASSERT3P(dn->dn_free_ranges[i], ==, NULL);
}
dn->dn_type = ot;
dnode_setdblksz(dn, blocksize);
dn->dn_indblkshift = ibs;
dn->dn_nlevels = 1;
dn->dn_num_slots = dn_slots;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
dn->dn_nblkptr = 1;
else {
dn->dn_nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
}
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
dn->dn_dirtyctx = 0;
dn->dn_free_txg = 0;
if (dn->dn_dirtyctx_firstset) {
kmem_free(dn->dn_dirtyctx_firstset, 1);
dn->dn_dirtyctx_firstset = NULL;
}
dn->dn_allocated_txg = tx->tx_txg;
dn->dn_id_flags = 0;
dnode_setdirty(dn, tx);
dn->dn_next_indblkshift[tx->tx_txg & TXG_MASK] = ibs;
dn->dn_next_bonuslen[tx->tx_txg & TXG_MASK] = dn->dn_bonuslen;
dn->dn_next_bonustype[tx->tx_txg & TXG_MASK] = dn->dn_bonustype;
dn->dn_next_blksz[tx->tx_txg & TXG_MASK] = dn->dn_datablksz;
}
void
dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize,
dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx)
{
int nblkptr;
ASSERT3U(blocksize, >=, SPA_MINBLOCKSIZE);
ASSERT3U(blocksize, <=,
spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
ASSERT0(blocksize % SPA_MINBLOCKSIZE);
ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT || dmu_tx_private_ok(tx));
ASSERT(tx->tx_txg != 0);
ASSERT((bonustype == DMU_OT_NONE && bonuslen == 0) ||
(bonustype != DMU_OT_NONE && bonuslen != 0) ||
(bonustype == DMU_OT_SA && bonuslen == 0));
ASSERT(DMU_OT_IS_VALID(bonustype));
ASSERT3U(bonuslen, <=,
DN_BONUS_SIZE(spa_maxdnodesize(dmu_objset_spa(dn->dn_objset))));
dn_slots = dn_slots > 0 ? dn_slots : DNODE_MIN_SLOTS;
dnode_free_interior_slots(dn);
DNODE_STAT_BUMP(dnode_reallocate);
/* clean up any unreferenced dbufs */
dnode_evict_dbufs(dn);
dn->dn_id_flags = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
dnode_setdirty(dn, tx);
if (dn->dn_datablksz != blocksize) {
/* change blocksize */
ASSERT(dn->dn_maxblkid == 0 &&
(BP_IS_HOLE(&dn->dn_phys->dn_blkptr[0]) ||
dnode_block_freed(dn, 0)));
dnode_setdblksz(dn, blocksize);
dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = blocksize;
}
if (dn->dn_bonuslen != bonuslen)
dn->dn_next_bonuslen[tx->tx_txg&TXG_MASK] = bonuslen;
if (bonustype == DMU_OT_SA) /* Maximize bonus space for SA */
nblkptr = 1;
else
nblkptr = MIN(DN_MAX_NBLKPTR,
1 + ((DN_SLOTS_TO_BONUSLEN(dn_slots) - bonuslen) >>
SPA_BLKPTRSHIFT));
if (dn->dn_bonustype != bonustype)
dn->dn_next_bonustype[tx->tx_txg&TXG_MASK] = bonustype;
if (dn->dn_nblkptr != nblkptr)
dn->dn_next_nblkptr[tx->tx_txg&TXG_MASK] = nblkptr;
if (dn->dn_phys->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
dbuf_rm_spill(dn, tx);
dnode_rm_spill(dn, tx);
}
rw_exit(&dn->dn_struct_rwlock);
/* change type */
dn->dn_type = ot;
/* change bonus size and type */
mutex_enter(&dn->dn_mtx);
dn->dn_bonustype = bonustype;
dn->dn_bonuslen = bonuslen;
dn->dn_num_slots = dn_slots;
dn->dn_nblkptr = nblkptr;
dn->dn_checksum = ZIO_CHECKSUM_INHERIT;
dn->dn_compress = ZIO_COMPRESS_INHERIT;
ASSERT3U(dn->dn_nblkptr, <=, DN_MAX_NBLKPTR);
/* fix up the bonus db_size */
if (dn->dn_bonus) {
dn->dn_bonus->db.db_size =
DN_SLOTS_TO_BONUSLEN(dn->dn_num_slots) -
(dn->dn_nblkptr-1) * sizeof (blkptr_t);
ASSERT(dn->dn_bonuslen <= dn->dn_bonus->db.db_size);
}
dn->dn_allocated_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
}
#ifdef _KERNEL
static void
dnode_move_impl(dnode_t *odn, dnode_t *ndn)
{
int i;
ASSERT(!RW_LOCK_HELD(&odn->dn_struct_rwlock));
ASSERT(MUTEX_NOT_HELD(&odn->dn_mtx));
ASSERT(MUTEX_NOT_HELD(&odn->dn_dbufs_mtx));
ASSERT(!RW_LOCK_HELD(&odn->dn_zfetch.zf_rwlock));
/* Copy fields. */
ndn->dn_objset = odn->dn_objset;
ndn->dn_object = odn->dn_object;
ndn->dn_dbuf = odn->dn_dbuf;
ndn->dn_handle = odn->dn_handle;
ndn->dn_phys = odn->dn_phys;
ndn->dn_type = odn->dn_type;
ndn->dn_bonuslen = odn->dn_bonuslen;
ndn->dn_bonustype = odn->dn_bonustype;
ndn->dn_nblkptr = odn->dn_nblkptr;
ndn->dn_checksum = odn->dn_checksum;
ndn->dn_compress = odn->dn_compress;
ndn->dn_nlevels = odn->dn_nlevels;
ndn->dn_indblkshift = odn->dn_indblkshift;
ndn->dn_datablkshift = odn->dn_datablkshift;
ndn->dn_datablkszsec = odn->dn_datablkszsec;
ndn->dn_datablksz = odn->dn_datablksz;
ndn->dn_maxblkid = odn->dn_maxblkid;
ndn->dn_num_slots = odn->dn_num_slots;
bcopy(&odn->dn_next_nblkptr[0], &ndn->dn_next_nblkptr[0],
sizeof (odn->dn_next_nblkptr));
bcopy(&odn->dn_next_nlevels[0], &ndn->dn_next_nlevels[0],
sizeof (odn->dn_next_nlevels));
bcopy(&odn->dn_next_indblkshift[0], &ndn->dn_next_indblkshift[0],
sizeof (odn->dn_next_indblkshift));
bcopy(&odn->dn_next_bonustype[0], &ndn->dn_next_bonustype[0],
sizeof (odn->dn_next_bonustype));
bcopy(&odn->dn_rm_spillblk[0], &ndn->dn_rm_spillblk[0],
sizeof (odn->dn_rm_spillblk));
bcopy(&odn->dn_next_bonuslen[0], &ndn->dn_next_bonuslen[0],
sizeof (odn->dn_next_bonuslen));
bcopy(&odn->dn_next_blksz[0], &ndn->dn_next_blksz[0],
sizeof (odn->dn_next_blksz));
bcopy(&odn->dn_next_maxblkid[0], &ndn->dn_next_maxblkid[0],
sizeof (odn->dn_next_maxblkid));
for (i = 0; i < TXG_SIZE; i++) {
list_move_tail(&ndn->dn_dirty_records[i],
&odn->dn_dirty_records[i]);
}
bcopy(&odn->dn_free_ranges[0], &ndn->dn_free_ranges[0],
sizeof (odn->dn_free_ranges));
ndn->dn_allocated_txg = odn->dn_allocated_txg;
ndn->dn_free_txg = odn->dn_free_txg;
ndn->dn_assigned_txg = odn->dn_assigned_txg;
ndn->dn_dirty_txg = odn->dn_dirty_txg;
ndn->dn_dirtyctx = odn->dn_dirtyctx;
ndn->dn_dirtyctx_firstset = odn->dn_dirtyctx_firstset;
ASSERT(refcount_count(&odn->dn_tx_holds) == 0);
refcount_transfer(&ndn->dn_holds, &odn->dn_holds);
ASSERT(avl_is_empty(&ndn->dn_dbufs));
avl_swap(&ndn->dn_dbufs, &odn->dn_dbufs);
ndn->dn_dbufs_count = odn->dn_dbufs_count;
ndn->dn_bonus = odn->dn_bonus;
ndn->dn_have_spill = odn->dn_have_spill;
ndn->dn_zio = odn->dn_zio;
ndn->dn_oldused = odn->dn_oldused;
ndn->dn_oldflags = odn->dn_oldflags;
ndn->dn_olduid = odn->dn_olduid;
ndn->dn_oldgid = odn->dn_oldgid;
ndn->dn_oldprojid = odn->dn_oldprojid;
ndn->dn_newuid = odn->dn_newuid;
ndn->dn_newgid = odn->dn_newgid;
ndn->dn_newprojid = odn->dn_newprojid;
ndn->dn_id_flags = odn->dn_id_flags;
dmu_zfetch_init(&ndn->dn_zfetch, NULL);
list_move_tail(&ndn->dn_zfetch.zf_stream, &odn->dn_zfetch.zf_stream);
ndn->dn_zfetch.zf_dnode = odn->dn_zfetch.zf_dnode;
/*
* Update back pointers. Updating the handle fixes the back pointer of
* every descendant dbuf as well as the bonus dbuf.
*/
ASSERT(ndn->dn_handle->dnh_dnode == odn);
ndn->dn_handle->dnh_dnode = ndn;
if (ndn->dn_zfetch.zf_dnode == odn) {
ndn->dn_zfetch.zf_dnode = ndn;
}
/*
* Invalidate the original dnode by clearing all of its back pointers.
*/
odn->dn_dbuf = NULL;
odn->dn_handle = NULL;
avl_create(&odn->dn_dbufs, dbuf_compare, sizeof (dmu_buf_impl_t),
offsetof(dmu_buf_impl_t, db_link));
odn->dn_dbufs_count = 0;
odn->dn_bonus = NULL;
odn->dn_zfetch.zf_dnode = NULL;
/*
* Set the low bit of the objset pointer to ensure that dnode_move()
* recognizes the dnode as invalid in any subsequent callback.
*/
POINTER_INVALIDATE(&odn->dn_objset);
/*
* Satisfy the destructor.
*/
for (i = 0; i < TXG_SIZE; i++) {
list_create(&odn->dn_dirty_records[i],
sizeof (dbuf_dirty_record_t),
offsetof(dbuf_dirty_record_t, dr_dirty_node));
odn->dn_free_ranges[i] = NULL;
odn->dn_next_nlevels[i] = 0;
odn->dn_next_indblkshift[i] = 0;
odn->dn_next_bonustype[i] = 0;
odn->dn_rm_spillblk[i] = 0;
odn->dn_next_bonuslen[i] = 0;
odn->dn_next_blksz[i] = 0;
}
odn->dn_allocated_txg = 0;
odn->dn_free_txg = 0;
odn->dn_assigned_txg = 0;
odn->dn_dirty_txg = 0;
odn->dn_dirtyctx = 0;
odn->dn_dirtyctx_firstset = NULL;
odn->dn_have_spill = B_FALSE;
odn->dn_zio = NULL;
odn->dn_oldused = 0;
odn->dn_oldflags = 0;
odn->dn_olduid = 0;
odn->dn_oldgid = 0;
odn->dn_oldprojid = ZFS_DEFAULT_PROJID;
odn->dn_newuid = 0;
odn->dn_newgid = 0;
odn->dn_newprojid = ZFS_DEFAULT_PROJID;
odn->dn_id_flags = 0;
/*
* Mark the dnode.
*/
ndn->dn_moved = 1;
odn->dn_moved = (uint8_t)-1;
}
/*ARGSUSED*/
static kmem_cbrc_t
dnode_move(void *buf, void *newbuf, size_t size, void *arg)
{
dnode_t *odn = buf, *ndn = newbuf;
objset_t *os;
int64_t refcount;
uint32_t dbufs;
/*
* The dnode is on the objset's list of known dnodes if the objset
* pointer is valid. We set the low bit of the objset pointer when
* freeing the dnode to invalidate it, and the memory patterns written
* by kmem (baddcafe and deadbeef) set at least one of the two low bits.
* A newly created dnode sets the objset pointer last of all to indicate
* that the dnode is known and in a valid state to be moved by this
* function.
*/
os = odn->dn_objset;
if (!POINTER_IS_VALID(os)) {
DNODE_STAT_BUMP(dnode_move_invalid);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* Ensure that the objset does not go away during the move.
*/
rw_enter(&os_lock, RW_WRITER);
if (os != odn->dn_objset) {
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck1);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* If the dnode is still valid, then so is the objset. We know that no
* valid objset can be freed while we hold os_lock, so we can safely
* ensure that the objset remains in use.
*/
mutex_enter(&os->os_lock);
/*
* Recheck the objset pointer in case the dnode was removed just before
* acquiring the lock.
*/
if (os != odn->dn_objset) {
mutex_exit(&os->os_lock);
rw_exit(&os_lock);
DNODE_STAT_BUMP(dnode_move_recheck2);
return (KMEM_CBRC_DONT_KNOW);
}
/*
* At this point we know that as long as we hold os->os_lock, the dnode
* cannot be freed and fields within the dnode can be safely accessed.
* The objset listing this dnode cannot go away as long as this dnode is
* on its list.
*/
rw_exit(&os_lock);
if (DMU_OBJECT_IS_SPECIAL(odn->dn_object)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_special);
return (KMEM_CBRC_NO);
}
ASSERT(odn->dn_dbuf != NULL); /* only "special" dnodes have no parent */
/*
* Lock the dnode handle to prevent the dnode from obtaining any new
* holds. This also prevents the descendant dbufs and the bonus dbuf
* from accessing the dnode, so that we can discount their holds. The
* handle is safe to access because we know that while the dnode cannot
* go away, neither can its handle. Once we hold dnh_zrlock, we can
* safely move any dnode referenced only by dbufs.
*/
if (!zrl_tryenter(&odn->dn_handle->dnh_zrlock)) {
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_handle);
return (KMEM_CBRC_LATER);
}
/*
* Ensure a consistent view of the dnode's holds and the dnode's dbufs.
* We need to guarantee that there is a hold for every dbuf in order to
* determine whether the dnode is actively referenced. Falsely matching
* a dbuf to an active hold would lead to an unsafe move. It's possible
* that a thread already having an active dnode hold is about to add a
* dbuf, and we can't compare hold and dbuf counts while the add is in
* progress.
*/
if (!rw_tryenter(&odn->dn_struct_rwlock, RW_WRITER)) {
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_rwlock);
return (KMEM_CBRC_LATER);
}
/*
* A dbuf may be removed (evicted) without an active dnode hold. In that
* case, the dbuf count is decremented under the handle lock before the
* dbuf's hold is released. This order ensures that if we count the hold
* after the dbuf is removed but before its hold is released, we will
* treat the unmatched hold as active and exit safely. If we count the
* hold before the dbuf is removed, the hold is discounted, and the
* removal is blocked until the move completes.
*/
refcount = refcount_count(&odn->dn_holds);
ASSERT(refcount >= 0);
dbufs = odn->dn_dbufs_count;
/* We can't have more dbufs than dnode holds. */
ASSERT3U(dbufs, <=, refcount);
DTRACE_PROBE3(dnode__move, dnode_t *, odn, int64_t, refcount,
uint32_t, dbufs);
if (refcount > dbufs) {
rw_exit(&odn->dn_struct_rwlock);
zrl_exit(&odn->dn_handle->dnh_zrlock);
mutex_exit(&os->os_lock);
DNODE_STAT_BUMP(dnode_move_active);
return (KMEM_CBRC_LATER);
}
rw_exit(&odn->dn_struct_rwlock);
/*
* At this point we know that anyone with a hold on the dnode is not
* actively referencing it. The dnode is known and in a valid state to
* move. We're holding the locks needed to execute the critical section.
*/
dnode_move_impl(odn, ndn);
list_link_replace(&odn->dn_link, &ndn->dn_link);
/* If the dnode was safe to move, the refcount cannot have changed. */
ASSERT(refcount == refcount_count(&ndn->dn_holds));
ASSERT(dbufs == ndn->dn_dbufs_count);
zrl_exit(&ndn->dn_handle->dnh_zrlock); /* handle has moved */
mutex_exit(&os->os_lock);
return (KMEM_CBRC_YES);
}
#endif /* _KERNEL */
static void
dnode_slots_hold(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
zrl_add(&dnh->dnh_zrlock);
}
}
static void
dnode_slots_rele(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (zrl_is_locked(&dnh->dnh_zrlock))
zrl_exit(&dnh->dnh_zrlock);
else
zrl_remove(&dnh->dnh_zrlock);
}
}
static int
dnode_slots_tryenter(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
if (!zrl_tryenter(&dnh->dnh_zrlock)) {
for (int j = idx; j < i; j++) {
dnh = &children->dnc_children[j];
zrl_exit(&dnh->dnh_zrlock);
}
return (0);
}
}
return (1);
}
static void
dnode_set_slots(dnode_children_t *children, int idx, int slots, void *ptr)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnh->dnh_dnode = ptr;
}
}
static boolean_t
dnode_check_slots_free(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
/*
* If all dnode slots are either already free or
* evictable return B_TRUE.
*/
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
dnode_t *dn = dnh->dnh_dnode;
if (dn == DN_SLOT_FREE) {
continue;
} else if (DN_SLOT_IS_PTR(dn)) {
mutex_enter(&dn->dn_mtx);
boolean_t can_free = (dn->dn_type == DMU_OT_NONE &&
!DNODE_IS_DIRTY(dn));
mutex_exit(&dn->dn_mtx);
if (!can_free)
return (B_FALSE);
else
continue;
} else {
return (B_FALSE);
}
}
return (B_TRUE);
}
static void
dnode_reclaim_slots(dnode_children_t *children, int idx, int slots)
{
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
for (int i = idx; i < idx + slots; i++) {
dnode_handle_t *dnh = &children->dnc_children[i];
ASSERT(zrl_is_locked(&dnh->dnh_zrlock));
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
ASSERT3S(dnh->dnh_dnode->dn_type, ==, DMU_OT_NONE);
dnode_destroy(dnh->dnh_dnode);
dnh->dnh_dnode = DN_SLOT_FREE;
}
}
}
void
dnode_free_interior_slots(dnode_t *dn)
{
dnode_children_t *children = dmu_buf_get_user(&dn->dn_dbuf->db);
int epb = dn->dn_dbuf->db.db_size >> DNODE_SHIFT;
int idx = (dn->dn_object & (epb - 1)) + 1;
int slots = dn->dn_num_slots - 1;
if (slots == 0)
return;
ASSERT3S(idx + slots, <=, DNODES_PER_BLOCK);
while (!dnode_slots_tryenter(children, idx, slots))
DNODE_STAT_BUMP(dnode_free_interior_lock_retry);
dnode_set_slots(children, idx, slots, DN_SLOT_FREE);
dnode_slots_rele(children, idx, slots);
}
void
dnode_special_close(dnode_handle_t *dnh)
{
dnode_t *dn = dnh->dnh_dnode;
/*
* Wait for final references to the dnode to clear. This can
* only happen if the arc is asynchronously evicting state that
* has a hold on this dnode while we are trying to evict this
* dnode.
*/
while (refcount_count(&dn->dn_holds) > 0)
delay(1);
ASSERT(dn->dn_dbuf == NULL ||
dmu_buf_get_user(&dn->dn_dbuf->db) == NULL);
zrl_add(&dnh->dnh_zrlock);
dnode_destroy(dn); /* implicit zrl_remove() */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = NULL;
}
void
dnode_special_open(objset_t *os, dnode_phys_t *dnp, uint64_t object,
dnode_handle_t *dnh)
{
dnode_t *dn;
zrl_init(&dnh->dnh_zrlock);
zrl_tryenter(&dnh->dnh_zrlock);
dn = dnode_create(os, dnp, NULL, object, dnh);
DNODE_VERIFY(dn);
zrl_exit(&dnh->dnh_zrlock);
}
static void
dnode_buf_evict_async(void *dbu)
{
dnode_children_t *dnc = dbu;
DNODE_STAT_BUMP(dnode_buf_evict);
for (int i = 0; i < dnc->dnc_count; i++) {
dnode_handle_t *dnh = &dnc->dnc_children[i];
dnode_t *dn;
/*
* The dnode handle lock guards against the dnode moving to
* another valid address, so there is no need here to guard
* against changes to or from NULL.
*/
if (!DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
continue;
}
zrl_add(&dnh->dnh_zrlock);
dn = dnh->dnh_dnode;
/*
* If there are holds on this dnode, then there should
* be holds on the dnode's containing dbuf as well; thus
* it wouldn't be eligible for eviction and this function
* would not have been called.
*/
ASSERT(refcount_is_zero(&dn->dn_holds));
ASSERT(refcount_is_zero(&dn->dn_tx_holds));
dnode_destroy(dn); /* implicit zrl_remove() for first slot */
zrl_destroy(&dnh->dnh_zrlock);
dnh->dnh_dnode = DN_SLOT_UNINIT;
}
kmem_free(dnc, sizeof (dnode_children_t) +
dnc->dnc_count * sizeof (dnode_handle_t));
}
/*
* When the DNODE_MUST_BE_FREE flag is set, the "slots" parameter is used
* to ensure the hole at the specified object offset is large enough to
* hold the dnode being created. The slots parameter is also used to ensure
* a dnode does not span multiple dnode blocks. In both of these cases, if
* a failure occurs, ENOSPC is returned. Keep in mind, these failure cases
* are only possible when using DNODE_MUST_BE_FREE.
*
* If the DNODE_MUST_BE_ALLOCATED flag is set, "slots" must be 0.
* dnode_hold_impl() will check if the requested dnode is already consumed
* as an extra dnode slot by an large dnode, in which case it returns
* ENOENT.
*
* errors:
* EINVAL - Invalid object number or flags.
* ENOSPC - Hole too small to fulfill "slots" request (DNODE_MUST_BE_FREE)
* EEXIST - Refers to an allocated dnode (DNODE_MUST_BE_FREE)
* - Refers to an interior dnode slot (DNODE_MUST_BE_ALLOCATED)
* ENOENT - The requested dnode is not allocated (DNODE_MUST_BE_ALLOCATED)
* EIO - I/O error when reading the meta dnode dbuf.
*
* succeeds even for free dnodes.
*/
int
dnode_hold_impl(objset_t *os, uint64_t object, int flag, int slots,
void *tag, dnode_t **dnp)
{
int epb, idx, err;
int drop_struct_lock = FALSE;
int type;
uint64_t blk;
dnode_t *mdn, *dn;
dmu_buf_impl_t *db;
dnode_children_t *dnc;
dnode_phys_t *dn_block;
dnode_handle_t *dnh;
ASSERT(!(flag & DNODE_MUST_BE_ALLOCATED) || (slots == 0));
ASSERT(!(flag & DNODE_MUST_BE_FREE) || (slots > 0));
/*
* If you are holding the spa config lock as writer, you shouldn't
* be asking the DMU to do *anything* unless it's the root pool
* which may require us to read from the root filesystem while
* holding some (not all) of the locks as writer.
*/
ASSERT(spa_config_held(os->os_spa, SCL_ALL, RW_WRITER) == 0 ||
(spa_is_root(os->os_spa) &&
spa_config_held(os->os_spa, SCL_STATE, RW_WRITER)));
if (object == DMU_USERUSED_OBJECT || object == DMU_GROUPUSED_OBJECT ||
object == DMU_PROJECTUSED_OBJECT) {
if (object == DMU_USERUSED_OBJECT)
dn = DMU_USERUSED_DNODE(os);
else if (object == DMU_GROUPUSED_OBJECT)
dn = DMU_GROUPUSED_DNODE(os);
else
dn = DMU_PROJECTUSED_DNODE(os);
if (dn == NULL)
return (SET_ERROR(ENOENT));
type = dn->dn_type;
if ((flag & DNODE_MUST_BE_ALLOCATED) && type == DMU_OT_NONE)
return (SET_ERROR(ENOENT));
if ((flag & DNODE_MUST_BE_FREE) && type != DMU_OT_NONE)
return (SET_ERROR(EEXIST));
DNODE_VERIFY(dn);
(void) refcount_add(&dn->dn_holds, tag);
*dnp = dn;
return (0);
}
if (object == 0 || object >= DN_MAX_OBJECT)
return (SET_ERROR(EINVAL));
mdn = DMU_META_DNODE(os);
ASSERT(mdn->dn_object == DMU_META_DNODE_OBJECT);
DNODE_VERIFY(mdn);
if (!RW_WRITE_HELD(&mdn->dn_struct_rwlock)) {
rw_enter(&mdn->dn_struct_rwlock, RW_READER);
drop_struct_lock = TRUE;
}
blk = dbuf_whichblock(mdn, 0, object * sizeof (dnode_phys_t));
db = dbuf_hold(mdn, blk, FTAG);
if (drop_struct_lock)
rw_exit(&mdn->dn_struct_rwlock);
if (db == NULL) {
DNODE_STAT_BUMP(dnode_hold_dbuf_hold);
return (SET_ERROR(EIO));
}
/*
* We do not need to decrypt to read the dnode so it doesn't matter
* if we get the encrypted or decrypted version.
*/
err = dbuf_read(db, NULL, DB_RF_CANFAIL | DB_RF_NO_DECRYPT);
if (err) {
DNODE_STAT_BUMP(dnode_hold_dbuf_read);
dbuf_rele(db, FTAG);
return (err);
}
ASSERT3U(db->db.db_size, >=, 1<<DNODE_SHIFT);
epb = db->db.db_size >> DNODE_SHIFT;
idx = object & (epb - 1);
dn_block = (dnode_phys_t *)db->db.db_data;
ASSERT(DB_DNODE(db)->dn_type == DMU_OT_DNODE);
dnc = dmu_buf_get_user(&db->db);
dnh = NULL;
if (dnc == NULL) {
dnode_children_t *winner;
int skip = 0;
dnc = kmem_zalloc(sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t), KM_SLEEP);
dnc->dnc_count = epb;
dnh = &dnc->dnc_children[0];
/* Initialize dnode slot status from dnode_phys_t */
for (int i = 0; i < epb; i++) {
zrl_init(&dnh[i].dnh_zrlock);
if (skip) {
skip--;
continue;
}
if (dn_block[i].dn_type != DMU_OT_NONE) {
int interior = dn_block[i].dn_extra_slots;
dnode_set_slots(dnc, i, 1, DN_SLOT_ALLOCATED);
dnode_set_slots(dnc, i + 1, interior,
DN_SLOT_INTERIOR);
skip = interior;
} else {
dnh[i].dnh_dnode = DN_SLOT_FREE;
skip = 0;
}
}
dmu_buf_init_user(&dnc->dnc_dbu, NULL,
dnode_buf_evict_async, NULL);
winner = dmu_buf_set_user(&db->db, &dnc->dnc_dbu);
if (winner != NULL) {
for (int i = 0; i < epb; i++)
zrl_destroy(&dnh[i].dnh_zrlock);
kmem_free(dnc, sizeof (dnode_children_t) +
epb * sizeof (dnode_handle_t));
dnc = winner;
}
}
ASSERT(dnc->dnc_count == epb);
dn = DN_SLOT_UNINIT;
if (flag & DNODE_MUST_BE_ALLOCATED) {
slots = 1;
while (dn == DN_SLOT_UNINIT) {
dnode_slots_hold(dnc, idx, slots);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
break;
} else if (dnh->dnh_dnode == DN_SLOT_INTERIOR) {
DNODE_STAT_BUMP(dnode_hold_alloc_interior);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
} else if (dnh->dnh_dnode != DN_SLOT_ALLOCATED) {
DNODE_STAT_BUMP(dnode_hold_alloc_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
}
dnode_slots_rele(dnc, idx, slots);
if (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_retry);
continue;
}
/*
* Someone else won the race and called dnode_create()
* after we checked DN_SLOT_IS_PTR() above but before
* we acquired the lock.
*/
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
DNODE_STAT_BUMP(dnode_hold_alloc_lock_misses);
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
}
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE) {
DNODE_STAT_BUMP(dnode_hold_alloc_type_none);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOENT));
}
DNODE_STAT_BUMP(dnode_hold_alloc_hits);
} else if (flag & DNODE_MUST_BE_FREE) {
if (idx + slots - 1 >= DNODES_PER_BLOCK) {
DNODE_STAT_BUMP(dnode_hold_free_overflow);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
while (dn == DN_SLOT_UNINIT) {
dnode_slots_hold(dnc, idx, slots);
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
dnode_slots_rele(dnc, idx, slots);
if (!dnode_slots_tryenter(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_retry);
continue;
}
if (!dnode_check_slots_free(dnc, idx, slots)) {
DNODE_STAT_BUMP(dnode_hold_free_lock_misses);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(ENOSPC));
}
/*
* Allocated but otherwise free dnodes which would
* be in the interior of a multi-slot dnodes need
* to be freed. Single slot dnodes can be safely
* re-purposed as a performance optimization.
*/
if (slots > 1)
dnode_reclaim_slots(dnc, idx + 1, slots - 1);
dnh = &dnc->dnc_children[idx];
if (DN_SLOT_IS_PTR(dnh->dnh_dnode)) {
dn = dnh->dnh_dnode;
} else {
dn = dnode_create(os, dn_block + idx, db,
object, dnh);
}
}
mutex_enter(&dn->dn_mtx);
if (!refcount_is_zero(&dn->dn_holds)) {
DNODE_STAT_BUMP(dnode_hold_free_refcount);
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(EEXIST));
}
dnode_set_slots(dnc, idx + 1, slots - 1, DN_SLOT_INTERIOR);
DNODE_STAT_BUMP(dnode_hold_free_hits);
} else {
dbuf_rele(db, FTAG);
return (SET_ERROR(EINVAL));
}
if (dn->dn_free_txg) {
DNODE_STAT_BUMP(dnode_hold_free_txg);
type = dn->dn_type;
mutex_exit(&dn->dn_mtx);
dnode_slots_rele(dnc, idx, slots);
dbuf_rele(db, FTAG);
return (SET_ERROR(type == DMU_OT_NONE ? ENOENT : EEXIST));
}
if (refcount_add(&dn->dn_holds, tag) == 1)
dbuf_add_ref(db, dnh);
mutex_exit(&dn->dn_mtx);
/* Now we can rely on the hold to prevent the dnode from moving. */
dnode_slots_rele(dnc, idx, slots);
DNODE_VERIFY(dn);
ASSERT3P(dn->dn_dbuf, ==, db);
ASSERT3U(dn->dn_object, ==, object);
dbuf_rele(db, FTAG);
*dnp = dn;
return (0);
}
/*
* Return held dnode if the object is allocated, NULL if not.
*/
int
dnode_hold(objset_t *os, uint64_t object, void *tag, dnode_t **dnp)
{
return (dnode_hold_impl(os, object, DNODE_MUST_BE_ALLOCATED, 0, tag,
dnp));
}
/*
* Can only add a reference if there is already at least one
* reference on the dnode. Returns FALSE if unable to add a
* new reference.
*/
boolean_t
dnode_add_ref(dnode_t *dn, void *tag)
{
mutex_enter(&dn->dn_mtx);
if (refcount_is_zero(&dn->dn_holds)) {
mutex_exit(&dn->dn_mtx);
return (FALSE);
}
VERIFY(1 < refcount_add(&dn->dn_holds, tag));
mutex_exit(&dn->dn_mtx);
return (TRUE);
}
void
dnode_rele(dnode_t *dn, void *tag)
{
mutex_enter(&dn->dn_mtx);
dnode_rele_and_unlock(dn, tag);
}
void
dnode_rele_and_unlock(dnode_t *dn, void *tag)
{
uint64_t refs;
/* Get while the hold prevents the dnode from moving. */
dmu_buf_impl_t *db = dn->dn_dbuf;
dnode_handle_t *dnh = dn->dn_handle;
refs = refcount_remove(&dn->dn_holds, tag);
mutex_exit(&dn->dn_mtx);
/*
* It's unsafe to release the last hold on a dnode by dnode_rele() or
* indirectly by dbuf_rele() while relying on the dnode handle to
* prevent the dnode from moving, since releasing the last hold could
* result in the dnode's parent dbuf evicting its dnode handles. For
* that reason anyone calling dnode_rele() or dbuf_rele() without some
* other direct or indirect hold on the dnode must first drop the dnode
* handle.
*/
ASSERT(refs > 0 || dnh->dnh_zrlock.zr_owner != curthread);
/* NOTE: the DNODE_DNODE does not have a dn_dbuf */
if (refs == 0 && db != NULL) {
/*
* Another thread could add a hold to the dnode handle in
* dnode_hold_impl() while holding the parent dbuf. Since the
* hold on the parent dbuf prevents the handle from being
* destroyed, the hold on the handle is OK. We can't yet assert
* that the handle has zero references, but that will be
* asserted anyway when the handle gets destroyed.
*/
dbuf_rele(db, dnh);
}
}
void
dnode_setdirty(dnode_t *dn, dmu_tx_t *tx)
{
objset_t *os = dn->dn_objset;
uint64_t txg = tx->tx_txg;
if (DMU_OBJECT_IS_SPECIAL(dn->dn_object)) {
dsl_dataset_dirty(os->os_dsl_dataset, tx);
return;
}
DNODE_VERIFY(dn);
#ifdef ZFS_DEBUG
mutex_enter(&dn->dn_mtx);
ASSERT(dn->dn_phys->dn_type || dn->dn_allocated_txg);
ASSERT(dn->dn_free_txg == 0 || dn->dn_free_txg >= txg);
mutex_exit(&dn->dn_mtx);
#endif
/*
* Determine old uid/gid when necessary
*/
dmu_objset_userquota_get_ids(dn, B_TRUE, tx);
multilist_t *dirtylist = os->os_dirty_dnodes[txg & TXG_MASK];
multilist_sublist_t *mls = multilist_sublist_lock_obj(dirtylist, dn);
/*
* If we are already marked dirty, we're done.
*/
if (multilist_link_active(&dn->dn_dirty_link[txg & TXG_MASK])) {
multilist_sublist_unlock(mls);
return;
}
ASSERT(!refcount_is_zero(&dn->dn_holds) ||
!avl_is_empty(&dn->dn_dbufs));
ASSERT(dn->dn_datablksz != 0);
ASSERT0(dn->dn_next_bonuslen[txg&TXG_MASK]);
ASSERT0(dn->dn_next_blksz[txg&TXG_MASK]);
ASSERT0(dn->dn_next_bonustype[txg&TXG_MASK]);
dprintf_ds(os->os_dsl_dataset, "obj=%llu txg=%llu\n",
dn->dn_object, txg);
multilist_sublist_insert_head(mls, dn);
multilist_sublist_unlock(mls);
/*
* The dnode maintains a hold on its containing dbuf as
* long as there are holds on it. Each instantiated child
* dbuf maintains a hold on the dnode. When the last child
* drops its hold, the dnode will drop its hold on the
* containing dbuf. We add a "dirty hold" here so that the
* dnode will hang around after we finish processing its
* children.
*/
VERIFY(dnode_add_ref(dn, (void *)(uintptr_t)tx->tx_txg));
(void) dbuf_dirty(dn->dn_dbuf, tx);
dsl_dataset_dirty(os->os_dsl_dataset, tx);
}
void
dnode_free(dnode_t *dn, dmu_tx_t *tx)
{
mutex_enter(&dn->dn_mtx);
if (dn->dn_type == DMU_OT_NONE || dn->dn_free_txg) {
mutex_exit(&dn->dn_mtx);
return;
}
dn->dn_free_txg = tx->tx_txg;
mutex_exit(&dn->dn_mtx);
dnode_setdirty(dn, tx);
}
/*
* Try to change the block size for the indicated dnode. This can only
* succeed if there are no blocks allocated or dirty beyond first block
*/
int
dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
int err;
ASSERT3U(size, <=, spa_maxblocksize(dmu_objset_spa(dn->dn_objset)));
if (size == 0)
size = SPA_MINBLOCKSIZE;
else
size = P2ROUNDUP(size, SPA_MINBLOCKSIZE);
if (ibs == dn->dn_indblkshift)
ibs = 0;
if (size >> SPA_MINBLOCKSHIFT == dn->dn_datablkszsec && ibs == 0)
return (0);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
/* Check for any allocated blocks beyond the first */
if (dn->dn_maxblkid != 0)
goto fail;
mutex_enter(&dn->dn_dbufs_mtx);
for (db = avl_first(&dn->dn_dbufs); db != NULL;
db = AVL_NEXT(&dn->dn_dbufs, db)) {
if (db->db_blkid != 0 && db->db_blkid != DMU_BONUS_BLKID &&
db->db_blkid != DMU_SPILL_BLKID) {
mutex_exit(&dn->dn_dbufs_mtx);
goto fail;
}
}
mutex_exit(&dn->dn_dbufs_mtx);
if (ibs && dn->dn_nlevels != 1)
goto fail;
/* resize the old block */
err = dbuf_hold_impl(dn, 0, 0, TRUE, FALSE, FTAG, &db);
if (err == 0)
dbuf_new_size(db, size, tx);
else if (err != ENOENT)
goto fail;
dnode_setdblksz(dn, size);
dnode_setdirty(dn, tx);
dn->dn_next_blksz[tx->tx_txg&TXG_MASK] = size;
if (ibs) {
dn->dn_indblkshift = ibs;
dn->dn_next_indblkshift[tx->tx_txg&TXG_MASK] = ibs;
}
/* rele after we have fixed the blocksize in the dnode */
if (db)
dbuf_rele(db, FTAG);
rw_exit(&dn->dn_struct_rwlock);
return (0);
fail:
rw_exit(&dn->dn_struct_rwlock);
return (SET_ERROR(ENOTSUP));
}
static void
dnode_set_nlevels_impl(dnode_t *dn, int new_nlevels, dmu_tx_t *tx)
{
uint64_t txgoff = tx->tx_txg & TXG_MASK;
int old_nlevels = dn->dn_nlevels;
dmu_buf_impl_t *db;
list_t *list;
dbuf_dirty_record_t *new, *dr, *dr_next;
ASSERT(RW_WRITE_HELD(&dn->dn_struct_rwlock));
dn->dn_nlevels = new_nlevels;
ASSERT3U(new_nlevels, >, dn->dn_next_nlevels[txgoff]);
dn->dn_next_nlevels[txgoff] = new_nlevels;
/* dirty the left indirects */
db = dbuf_hold_level(dn, old_nlevels, 0, FTAG);
ASSERT(db != NULL);
new = dbuf_dirty(db, tx);
dbuf_rele(db, FTAG);
/* transfer the dirty records to the new indirect */
mutex_enter(&dn->dn_mtx);
mutex_enter(&new->dt.di.dr_mtx);
list = &dn->dn_dirty_records[txgoff];
for (dr = list_head(list); dr; dr = dr_next) {
dr_next = list_next(&dn->dn_dirty_records[txgoff], dr);
if (dr->dr_dbuf->db_level != new_nlevels-1 &&
dr->dr_dbuf->db_blkid != DMU_BONUS_BLKID &&
dr->dr_dbuf->db_blkid != DMU_SPILL_BLKID) {
ASSERT(dr->dr_dbuf->db_level == old_nlevels-1);
list_remove(&dn->dn_dirty_records[txgoff], dr);
list_insert_tail(&new->dt.di.dr_children, dr);
dr->dr_parent = new;
}
}
mutex_exit(&new->dt.di.dr_mtx);
mutex_exit(&dn->dn_mtx);
}
int
dnode_set_nlevels(dnode_t *dn, int nlevels, dmu_tx_t *tx)
{
int ret = 0;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
if (dn->dn_nlevels == nlevels) {
ret = 0;
goto out;
} else if (nlevels < dn->dn_nlevels) {
ret = SET_ERROR(EINVAL);
goto out;
}
dnode_set_nlevels_impl(dn, nlevels, tx);
out:
rw_exit(&dn->dn_struct_rwlock);
return (ret);
}
/* read-holding callers must not rely on the lock being continuously held */
void
dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t have_read)
{
int epbs, new_nlevels;
uint64_t sz;
ASSERT(blkid != DMU_BONUS_BLKID);
ASSERT(have_read ?
RW_READ_HELD(&dn->dn_struct_rwlock) :
RW_WRITE_HELD(&dn->dn_struct_rwlock));
/*
* if we have a read-lock, check to see if we need to do any work
* before upgrading to a write-lock.
*/
if (have_read) {
if (blkid <= dn->dn_maxblkid)
return;
if (!rw_tryupgrade(&dn->dn_struct_rwlock)) {
rw_exit(&dn->dn_struct_rwlock);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
}
}
if (blkid <= dn->dn_maxblkid)
goto out;
dn->dn_maxblkid = blkid;
dn->dn_next_maxblkid[tx->tx_txg & TXG_MASK] = blkid;
/*
* Compute the number of levels necessary to support the new maxblkid.
*/
new_nlevels = 1;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
for (sz = dn->dn_nblkptr;
sz <= blkid && sz >= dn->dn_nblkptr; sz <<= epbs)
new_nlevels++;
ASSERT3U(new_nlevels, <=, DN_MAX_LEVELS);
if (new_nlevels > dn->dn_nlevels)
dnode_set_nlevels_impl(dn, new_nlevels, tx);
out:
if (have_read)
rw_downgrade(&dn->dn_struct_rwlock);
}
static void
dnode_dirty_l1(dnode_t *dn, uint64_t l1blkid, dmu_tx_t *tx)
{
dmu_buf_impl_t *db = dbuf_hold_level(dn, 1, l1blkid, FTAG);
if (db != NULL) {
dmu_buf_will_dirty(&db->db, tx);
dbuf_rele(db, FTAG);
}
}
void
dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx)
{
dmu_buf_impl_t *db;
uint64_t blkoff, blkid, nblks;
int blksz, blkshift, head, tail;
int trunc = FALSE;
int epbs;
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
blksz = dn->dn_datablksz;
blkshift = dn->dn_datablkshift;
epbs = dn->dn_indblkshift - SPA_BLKPTRSHIFT;
if (len == DMU_OBJECT_END) {
len = UINT64_MAX - off;
trunc = TRUE;
}
/*
* First, block align the region to free:
*/
if (ISP2(blksz)) {
head = P2NPHASE(off, blksz);
blkoff = P2PHASE(off, blksz);
if ((off >> blkshift) > dn->dn_maxblkid)
goto out;
} else {
ASSERT(dn->dn_maxblkid == 0);
if (off == 0 && len >= blksz) {
/*
* Freeing the whole block; fast-track this request.
* Note that we won't dirty any indirect blocks,
* which is fine because we will be freeing the entire
* file and thus all indirect blocks will be freed
* by free_children().
*/
blkid = 0;
nblks = 1;
goto done;
} else if (off >= blksz) {
/* Freeing past end-of-data */
goto out;
} else {
/* Freeing part of the block. */
head = blksz - off;
ASSERT3U(head, >, 0);
}
blkoff = off;
}
/* zero out any partial block data at the start of the range */
if (head) {
ASSERT3U(blkoff + head, ==, blksz);
if (len < head)
head = len;
if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off),
TRUE, FALSE, FTAG, &db) == 0) {
caddr_t data;
/* don't dirty if it isn't on disk and isn't dirty */
if (db->db_last_dirty ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_will_dirty(&db->db, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
data = db->db.db_data;
bzero(data + blkoff, head);
}
dbuf_rele(db, FTAG);
}
off += head;
len -= head;
}
/* If the range was less than one block, we're done */
if (len == 0)
goto out;
/* If the remaining range is past end of file, we're done */
if ((off >> blkshift) > dn->dn_maxblkid)
goto out;
ASSERT(ISP2(blksz));
if (trunc)
tail = 0;
else
tail = P2PHASE(len, blksz);
ASSERT0(P2PHASE(off, blksz));
/* zero out any partial block data at the end of the range */
if (tail) {
if (len < tail)
tail = len;
if (dbuf_hold_impl(dn, 0, dbuf_whichblock(dn, 0, off+len),
TRUE, FALSE, FTAG, &db) == 0) {
/* don't dirty if not on disk and not dirty */
if (db->db_last_dirty ||
(db->db_blkptr && !BP_IS_HOLE(db->db_blkptr))) {
rw_exit(&dn->dn_struct_rwlock);
dmu_buf_will_dirty(&db->db, tx);
rw_enter(&dn->dn_struct_rwlock, RW_WRITER);
bzero(db->db.db_data, tail);
}
dbuf_rele(db, FTAG);
}
len -= tail;
}
/* If the range did not include a full block, we are done */
if (len == 0)
goto out;
ASSERT(IS_P2ALIGNED(off, blksz));
ASSERT(trunc || IS_P2ALIGNED(len, blksz));
blkid = off >> blkshift;
nblks = len >> blkshift;
if (trunc)
nblks += 1;
/*
* Dirty all the indirect blocks in this range. Note that only
* the first and last indirect blocks can actually be written
* (if they were partially freed) -- they must be dirtied, even if
* they do not exist on disk yet. The interior blocks will
* be freed by free_children(), so they will not actually be written.
* Even though these interior blocks will not be written, we
* dirty them for two reasons:
*
* - It ensures that the indirect blocks remain in memory until
* syncing context. (They have already been prefetched by
* dmu_tx_hold_free(), so we don't have to worry about reading
* them serially here.)
*
* - The dirty space accounting will put pressure on the txg sync
* mechanism to begin syncing, and to delay transactions if there
* is a large amount of freeing. Even though these indirect
* blocks will not be written, we could need to write the same
* amount of space if we copy the freed BPs into deadlists.
*/
if (dn->dn_nlevels > 1) {
uint64_t first, last;
first = blkid >> epbs;
dnode_dirty_l1(dn, first, tx);
if (trunc)
last = dn->dn_maxblkid >> epbs;
else
last = (blkid + nblks - 1) >> epbs;
if (last != first)
dnode_dirty_l1(dn, last, tx);
int shift = dn->dn_datablkshift + dn->dn_indblkshift -
SPA_BLKPTRSHIFT;
for (uint64_t i = first + 1; i < last; i++) {
/*
* Set i to the blockid of the next non-hole
* level-1 indirect block at or after i. Note
* that dnode_next_offset() operates in terms of
* level-0-equivalent bytes.
*/
uint64_t ibyte = i << shift;
int err = dnode_next_offset(dn, DNODE_FIND_HAVELOCK,
&ibyte, 2, 1, 0);
i = ibyte >> shift;
if (i >= last)
break;
/*
* Normally we should not see an error, either
* from dnode_next_offset() or dbuf_hold_level()
* (except for ESRCH from dnode_next_offset).
* If there is an i/o error, then when we read
* this block in syncing context, it will use
* ZIO_FLAG_MUSTSUCCEED, and thus hang/panic according
* to the "failmode" property. dnode_next_offset()
* doesn't have a flag to indicate MUSTSUCCEED.
*/
if (err != 0)
break;
dnode_dirty_l1(dn, i, tx);
}
}
done:
/*
* Add this range to the dnode range list.
* We will finish up this free operation in the syncing phase.
*/
mutex_enter(&dn->dn_mtx);
{
int txgoff = tx->tx_txg & TXG_MASK;
if (dn->dn_free_ranges[txgoff] == NULL) {
- dn->dn_free_ranges[txgoff] =
- range_tree_create(NULL, NULL, &dn->dn_mtx);
+ dn->dn_free_ranges[txgoff] = range_tree_create(NULL, NULL);
}
range_tree_clear(dn->dn_free_ranges[txgoff], blkid, nblks);
range_tree_add(dn->dn_free_ranges[txgoff], blkid, nblks);
}
dprintf_dnode(dn, "blkid=%llu nblks=%llu txg=%llu\n",
blkid, nblks, tx->tx_txg);
mutex_exit(&dn->dn_mtx);
dbuf_free_range(dn, blkid, blkid + nblks - 1, tx);
dnode_setdirty(dn, tx);
out:
rw_exit(&dn->dn_struct_rwlock);
}
static boolean_t
dnode_spill_freed(dnode_t *dn)
{
int i;
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_rm_spillblk[i] == DN_KILL_SPILLBLK)
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* return TRUE if this blkid was freed in a recent txg, or FALSE if it wasn't */
uint64_t
dnode_block_freed(dnode_t *dn, uint64_t blkid)
{
void *dp = spa_get_dsl(dn->dn_objset->os_spa);
int i;
if (blkid == DMU_BONUS_BLKID)
return (FALSE);
/*
* If we're in the process of opening the pool, dp will not be
* set yet, but there shouldn't be anything dirty.
*/
if (dp == NULL)
return (FALSE);
if (dn->dn_free_txg)
return (TRUE);
if (blkid == DMU_SPILL_BLKID)
return (dnode_spill_freed(dn));
mutex_enter(&dn->dn_mtx);
for (i = 0; i < TXG_SIZE; i++) {
if (dn->dn_free_ranges[i] != NULL &&
range_tree_contains(dn->dn_free_ranges[i], blkid, 1))
break;
}
mutex_exit(&dn->dn_mtx);
return (i < TXG_SIZE);
}
/* call from syncing context when we actually write/free space for this dnode */
void
dnode_diduse_space(dnode_t *dn, int64_t delta)
{
uint64_t space;
dprintf_dnode(dn, "dn=%p dnp=%p used=%llu delta=%lld\n",
dn, dn->dn_phys,
(u_longlong_t)dn->dn_phys->dn_used,
(longlong_t)delta);
mutex_enter(&dn->dn_mtx);
space = DN_USED_BYTES(dn->dn_phys);
if (delta > 0) {
ASSERT3U(space + delta, >=, space); /* no overflow */
} else {
ASSERT3U(space, >=, -delta); /* no underflow */
}
space += delta;
if (spa_version(dn->dn_objset->os_spa) < SPA_VERSION_DNODE_BYTES) {
ASSERT((dn->dn_phys->dn_flags & DNODE_FLAG_USED_BYTES) == 0);
ASSERT0(P2PHASE(space, 1<<DEV_BSHIFT));
dn->dn_phys->dn_used = space >> DEV_BSHIFT;
} else {
dn->dn_phys->dn_used = space;
dn->dn_phys->dn_flags |= DNODE_FLAG_USED_BYTES;
}
mutex_exit(&dn->dn_mtx);
}
/*
* Scans a block at the indicated "level" looking for a hole or data,
* depending on 'flags'.
*
* If level > 0, then we are scanning an indirect block looking at its
* pointers. If level == 0, then we are looking at a block of dnodes.
*
* If we don't find what we are looking for in the block, we return ESRCH.
* Otherwise, return with *offset pointing to the beginning (if searching
* forwards) or end (if searching backwards) of the range covered by the
* block pointer we matched on (or dnode).
*
* The basic search algorithm used below by dnode_next_offset() is to
* use this function to search up the block tree (widen the search) until
* we find something (i.e., we don't return ESRCH) and then search back
* down the tree (narrow the search) until we reach our original search
* level.
*/
static int
dnode_next_offset_level(dnode_t *dn, int flags, uint64_t *offset,
int lvl, uint64_t blkfill, uint64_t txg)
{
dmu_buf_impl_t *db = NULL;
void *data = NULL;
uint64_t epbs = dn->dn_phys->dn_indblkshift - SPA_BLKPTRSHIFT;
uint64_t epb = 1ULL << epbs;
uint64_t minfill, maxfill;
boolean_t hole;
int i, inc, error, span;
hole = ((flags & DNODE_FIND_HOLE) != 0);
inc = (flags & DNODE_FIND_BACKWARDS) ? -1 : 1;
ASSERT(txg == 0 || !hole);
if (lvl == dn->dn_phys->dn_nlevels) {
error = 0;
epb = dn->dn_phys->dn_nblkptr;
data = dn->dn_phys->dn_blkptr;
} else {
uint64_t blkid = dbuf_whichblock(dn, lvl, *offset);
error = dbuf_hold_impl(dn, lvl, blkid, TRUE, FALSE, FTAG, &db);
if (error) {
if (error != ENOENT)
return (error);
if (hole)
return (0);
/*
* This can only happen when we are searching up
* the block tree for data. We don't really need to
* adjust the offset, as we will just end up looking
* at the pointer to this block in its parent, and its
* going to be unallocated, so we will skip over it.
*/
return (SET_ERROR(ESRCH));
}
error = dbuf_read(db, NULL,
DB_RF_CANFAIL | DB_RF_HAVESTRUCT | DB_RF_NO_DECRYPT);
if (error) {
dbuf_rele(db, FTAG);
return (error);
}
data = db->db.db_data;
}
if (db != NULL && txg != 0 && (db->db_blkptr == NULL ||
db->db_blkptr->blk_birth <= txg ||
BP_IS_HOLE(db->db_blkptr))) {
/*
* This can only happen when we are searching up the tree
* and these conditions mean that we need to keep climbing.
*/
error = SET_ERROR(ESRCH);
} else if (lvl == 0) {
dnode_phys_t *dnp = data;
ASSERT(dn->dn_type == DMU_OT_DNODE);
ASSERT(!(flags & DNODE_FIND_BACKWARDS));
for (i = (*offset >> DNODE_SHIFT) & (blkfill - 1);
i < blkfill; i += dnp[i].dn_extra_slots + 1) {
if ((dnp[i].dn_type == DMU_OT_NONE) == hole)
break;
}
if (i == blkfill)
error = SET_ERROR(ESRCH);
*offset = (*offset & ~(DNODE_BLOCK_SIZE - 1)) +
(i << DNODE_SHIFT);
} else {
blkptr_t *bp = data;
uint64_t start = *offset;
span = (lvl - 1) * epbs + dn->dn_datablkshift;
minfill = 0;
maxfill = blkfill << ((lvl - 1) * epbs);
if (hole)
maxfill--;
else
minfill++;
if (span >= 8 * sizeof (*offset)) {
/* This only happens on the highest indirection level */
ASSERT3U((lvl - 1), ==, dn->dn_phys->dn_nlevels - 1);
*offset = 0;
} else {
*offset = *offset >> span;
}
for (i = BF64_GET(*offset, 0, epbs);
i >= 0 && i < epb; i += inc) {
if (BP_GET_FILL(&bp[i]) >= minfill &&
BP_GET_FILL(&bp[i]) <= maxfill &&
(hole || bp[i].blk_birth > txg))
break;
if (inc > 0 || *offset > 0)
*offset += inc;
}
if (span >= 8 * sizeof (*offset)) {
*offset = start;
} else {
*offset = *offset << span;
}
if (inc < 0) {
/* traversing backwards; position offset at the end */
ASSERT3U(*offset, <=, start);
*offset = MIN(*offset + (1ULL << span) - 1, start);
} else if (*offset < start) {
*offset = start;
}
if (i < 0 || i >= epb)
error = SET_ERROR(ESRCH);
}
if (db)
dbuf_rele(db, FTAG);
return (error);
}
/*
* Find the next hole, data, or sparse region at or after *offset.
* The value 'blkfill' tells us how many items we expect to find
* in an L0 data block; this value is 1 for normal objects,
* DNODES_PER_BLOCK for the meta dnode, and some fraction of
* DNODES_PER_BLOCK when searching for sparse regions thereof.
*
* Examples:
*
* dnode_next_offset(dn, flags, offset, 1, 1, 0);
* Finds the next/previous hole/data in a file.
* Used in dmu_offset_next().
*
* dnode_next_offset(mdn, flags, offset, 0, DNODES_PER_BLOCK, txg);
* Finds the next free/allocated dnode an objset's meta-dnode.
* Only finds objects that have new contents since txg (ie.
* bonus buffer changes and content removal are ignored).
* Used in dmu_object_next().
*
* dnode_next_offset(mdn, DNODE_FIND_HOLE, offset, 2, DNODES_PER_BLOCK >> 2, 0);
* Finds the next L2 meta-dnode bp that's at most 1/4 full.
* Used in dmu_object_alloc().
*/
int
dnode_next_offset(dnode_t *dn, int flags, uint64_t *offset,
int minlvl, uint64_t blkfill, uint64_t txg)
{
uint64_t initial_offset = *offset;
int lvl, maxlvl;
int error = 0;
if (!(flags & DNODE_FIND_HAVELOCK))
rw_enter(&dn->dn_struct_rwlock, RW_READER);
if (dn->dn_phys->dn_nlevels == 0) {
error = SET_ERROR(ESRCH);
goto out;
}
if (dn->dn_datablkshift == 0) {
if (*offset < dn->dn_datablksz) {
if (flags & DNODE_FIND_HOLE)
*offset = dn->dn_datablksz;
} else {
error = SET_ERROR(ESRCH);
}
goto out;
}
maxlvl = dn->dn_phys->dn_nlevels;
for (lvl = minlvl; lvl <= maxlvl; lvl++) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
if (error != ESRCH)
break;
}
while (error == 0 && --lvl >= minlvl) {
error = dnode_next_offset_level(dn,
flags, offset, lvl, blkfill, txg);
}
/*
* There's always a "virtual hole" at the end of the object, even
* if all BP's which physically exist are non-holes.
*/
if ((flags & DNODE_FIND_HOLE) && error == ESRCH && txg == 0 &&
minlvl == 1 && blkfill == 1 && !(flags & DNODE_FIND_BACKWARDS)) {
error = 0;
}
if (error == 0 && (flags & DNODE_FIND_BACKWARDS ?
initial_offset < *offset : initial_offset > *offset))
error = SET_ERROR(ESRCH);
out:
if (!(flags & DNODE_FIND_HAVELOCK))
rw_exit(&dn->dn_struct_rwlock);
return (error);
}
diff --git a/module/zfs/dsl_dataset.c b/module/zfs/dsl_dataset.c
index 40c2df1e7e98..9823f3183d14 100644
--- a/module/zfs/dsl_dataset.c
+++ b/module/zfs/dsl_dataset.c
@@ -1,4091 +1,4286 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2014 RackTop Systems.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright 2016, OmniTI Computer Consulting, Inc. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/arc.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/unique.h>
#include <sys/zfs_context.h>
#include <sys/zfs_ioctl.h>
#include <sys/spa.h>
+#include <sys/vdev.h>
#include <sys/zfs_znode.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sys/dsl_deadlist.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_userhold.h>
#include <sys/dsl_bookmark.h>
#include <sys/policy.h>
#include <sys/dmu_send.h>
#include <sys/zio_compress.h>
#include <zfs_fletcher.h>
#include <sys/zio_checksum.h>
/*
* The SPA supports block sizes up to 16MB. However, very large blocks
* can have an impact on i/o latency (e.g. tying up a spinning disk for
* ~300ms), and also potentially on the memory allocator. Therefore,
* we do not allow the recordsize to be set larger than zfs_max_recordsize
* (default 1MB). Larger blocks can be created by changing this tunable,
* and pools with larger blocks can always be imported and used, regardless
* of this setting.
*/
int zfs_max_recordsize = 1 * 1024 * 1024;
#define SWITCH64(x, y) \
{ \
uint64_t __tmp = (x); \
(x) = (y); \
(y) = __tmp; \
}
#define DS_REF_MAX (1ULL << 62)
extern inline dsl_dataset_phys_t *dsl_dataset_phys(dsl_dataset_t *ds);
+static void dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds,
+ uint64_t obj, dmu_tx_t *tx);
+static void dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds,
+ dmu_tx_t *tx);
+
extern int spa_asize_inflation;
static zil_header_t zero_zil;
/*
* Figure out how much of this delta should be propagated to the dsl_dir
* layer. If there's a refreservation, that space has already been
* partially accounted for in our ancestors.
*/
static int64_t
parent_delta(dsl_dataset_t *ds, int64_t delta)
{
dsl_dataset_phys_t *ds_phys;
uint64_t old_bytes, new_bytes;
if (ds->ds_reserved == 0)
return (delta);
ds_phys = dsl_dataset_phys(ds);
old_bytes = MAX(ds_phys->ds_unique_bytes, ds->ds_reserved);
new_bytes = MAX(ds_phys->ds_unique_bytes + delta, ds->ds_reserved);
ASSERT3U(ABS((int64_t)(new_bytes - old_bytes)), <=, ABS(delta));
return (new_bytes - old_bytes);
}
void
dsl_dataset_block_born(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx)
{
int used, compressed, uncompressed;
int64_t delta;
used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
compressed = BP_GET_PSIZE(bp);
uncompressed = BP_GET_UCSIZE(bp);
dprintf_bp(bp, "ds=%p", ds);
ASSERT(dmu_tx_is_syncing(tx));
/* It could have been compressed away to nothing */
if (BP_IS_HOLE(bp))
return;
ASSERT(BP_GET_TYPE(bp) != DMU_OT_NONE);
ASSERT(DMU_OT_IS_VALID(BP_GET_TYPE(bp)));
if (ds == NULL) {
dsl_pool_mos_diduse_space(tx->tx_pool,
used, compressed, uncompressed);
return;
}
ASSERT3U(bp->blk_birth, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_lock);
delta = parent_delta(ds, used);
dsl_dataset_phys(ds)->ds_referenced_bytes += used;
dsl_dataset_phys(ds)->ds_compressed_bytes += compressed;
dsl_dataset_phys(ds)->ds_uncompressed_bytes += uncompressed;
dsl_dataset_phys(ds)->ds_unique_bytes += used;
if (BP_GET_LSIZE(bp) > SPA_OLD_MAXBLOCKSIZE) {
ds->ds_feature_activation_needed[SPA_FEATURE_LARGE_BLOCKS] =
B_TRUE;
}
spa_feature_t f = zio_checksum_to_feature(BP_GET_CHECKSUM(bp));
if (f != SPA_FEATURE_NONE)
ds->ds_feature_activation_needed[f] = B_TRUE;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD, delta,
compressed, uncompressed, tx);
dsl_dir_transfer_space(ds->ds_dir, used - delta,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
}
+/*
+ * Called when the specified segment has been remapped, and is thus no
+ * longer referenced in the head dataset. The vdev must be indirect.
+ *
+ * If the segment is referenced by a snapshot, put it on the remap deadlist.
+ * Otherwise, add this segment to the obsolete spacemap.
+ */
+void
+dsl_dataset_block_remapped(dsl_dataset_t *ds, uint64_t vdev, uint64_t offset,
+ uint64_t size, uint64_t birth, dmu_tx_t *tx)
+{
+ spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
+
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT(birth <= tx->tx_txg);
+ ASSERT(!ds->ds_is_snapshot);
+
+ if (birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
+ spa_vdev_indirect_mark_obsolete(spa, vdev, offset, size, tx);
+ } else {
+ blkptr_t fakebp;
+ dva_t *dva = &fakebp.blk_dva[0];
+
+ ASSERT(ds != NULL);
+
+ mutex_enter(&ds->ds_remap_deadlist_lock);
+ if (!dsl_dataset_remap_deadlist_exists(ds)) {
+ dsl_dataset_create_remap_deadlist(ds, tx);
+ }
+ mutex_exit(&ds->ds_remap_deadlist_lock);
+
+ BP_ZERO(&fakebp);
+ fakebp.blk_birth = birth;
+ DVA_SET_VDEV(dva, vdev);
+ DVA_SET_OFFSET(dva, offset);
+ DVA_SET_ASIZE(dva, size);
+
+ dsl_deadlist_insert(&ds->ds_remap_deadlist, &fakebp, tx);
+ }
+}
+
int
dsl_dataset_block_kill(dsl_dataset_t *ds, const blkptr_t *bp, dmu_tx_t *tx,
boolean_t async)
{
int used = bp_get_dsize_sync(tx->tx_pool->dp_spa, bp);
int compressed = BP_GET_PSIZE(bp);
int uncompressed = BP_GET_UCSIZE(bp);
if (BP_IS_HOLE(bp))
return (0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(bp->blk_birth <= tx->tx_txg);
if (ds == NULL) {
dsl_free(tx->tx_pool, tx->tx_txg, bp);
dsl_pool_mos_diduse_space(tx->tx_pool,
-used, -compressed, -uncompressed);
return (used);
}
ASSERT3P(tx->tx_pool, ==, ds->ds_dir->dd_pool);
ASSERT(!ds->ds_is_snapshot);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
if (bp->blk_birth > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
int64_t delta;
dprintf_bp(bp, "freeing ds=%llu", ds->ds_object);
dsl_free(tx->tx_pool, tx->tx_txg, bp);
mutex_enter(&ds->ds_lock);
ASSERT(dsl_dataset_phys(ds)->ds_unique_bytes >= used ||
!DS_UNIQUE_IS_ACCURATE(ds));
delta = parent_delta(ds, -used);
dsl_dataset_phys(ds)->ds_unique_bytes -= used;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
delta, -compressed, -uncompressed, tx);
dsl_dir_transfer_space(ds->ds_dir, -used - delta,
DD_USED_REFRSRV, DD_USED_HEAD, tx);
} else {
dprintf_bp(bp, "putting on dead list: %s", "");
if (async) {
/*
* We are here as part of zio's write done callback,
* which means we're a zio interrupt thread. We can't
* call dsl_deadlist_insert() now because it may block
* waiting for I/O. Instead, put bp on the deferred
* queue and let dsl_pool_sync() finish the job.
*/
bplist_append(&ds->ds_pending_deadlist, bp);
} else {
dsl_deadlist_insert(&ds->ds_deadlist, bp, tx);
}
ASSERT3U(ds->ds_prev->ds_object, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj);
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_num_children > 0);
/* if (bp->blk_birth > prev prev snap txg) prev unique += bs */
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object && bp->blk_birth >
dsl_dataset_phys(ds->ds_prev)->ds_prev_snap_txg) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
mutex_enter(&ds->ds_prev->ds_lock);
dsl_dataset_phys(ds->ds_prev)->ds_unique_bytes += used;
mutex_exit(&ds->ds_prev->ds_lock);
}
if (bp->blk_birth > ds->ds_dir->dd_origin_txg) {
dsl_dir_transfer_space(ds->ds_dir, used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
}
mutex_enter(&ds->ds_lock);
ASSERT3U(dsl_dataset_phys(ds)->ds_referenced_bytes, >=, used);
dsl_dataset_phys(ds)->ds_referenced_bytes -= used;
ASSERT3U(dsl_dataset_phys(ds)->ds_compressed_bytes, >=, compressed);
dsl_dataset_phys(ds)->ds_compressed_bytes -= compressed;
ASSERT3U(dsl_dataset_phys(ds)->ds_uncompressed_bytes, >=, uncompressed);
dsl_dataset_phys(ds)->ds_uncompressed_bytes -= uncompressed;
mutex_exit(&ds->ds_lock);
return (used);
}
/*
* We have to release the fsid syncronously or we risk that a subsequent
* mount of the same dataset will fail to unique_insert the fsid. This
* failure would manifest itself as the fsid of this dataset changing
* between mounts which makes NFS clients quite unhappy.
*/
static void
dsl_dataset_evict_sync(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
unique_remove(ds->ds_fsid_guid);
}
static void
dsl_dataset_evict_async(void *dbu)
{
dsl_dataset_t *ds = dbu;
ASSERT(ds->ds_owner == NULL);
ds->ds_dbuf = NULL;
if (ds->ds_objset != NULL)
dmu_objset_evict(ds->ds_objset);
if (ds->ds_prev) {
dsl_dataset_rele(ds->ds_prev, ds);
ds->ds_prev = NULL;
}
bplist_destroy(&ds->ds_pending_deadlist);
- if (ds->ds_deadlist.dl_os != NULL)
+ if (dsl_deadlist_is_open(&ds->ds_deadlist))
dsl_deadlist_close(&ds->ds_deadlist);
+ if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
+ dsl_deadlist_close(&ds->ds_remap_deadlist);
if (ds->ds_dir)
dsl_dir_async_rele(ds->ds_dir, ds);
ASSERT(!list_link_active(&ds->ds_synced_link));
list_destroy(&ds->ds_prop_cbs);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
+ mutex_destroy(&ds->ds_remap_deadlist_lock);
refcount_destroy(&ds->ds_longholds);
rrw_destroy(&ds->ds_bp_rwlock);
kmem_free(ds, sizeof (dsl_dataset_t));
}
int
dsl_dataset_get_snapname(dsl_dataset_t *ds)
{
dsl_dataset_phys_t *headphys;
int err;
dmu_buf_t *headdbuf;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
if (ds->ds_snapname[0])
return (0);
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0)
return (0);
err = dmu_bonus_hold(mos, dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj,
FTAG, &headdbuf);
if (err != 0)
return (err);
headphys = headdbuf->db_data;
err = zap_value_search(dp->dp_meta_objset,
headphys->ds_snapnames_zapobj, ds->ds_object, 0, ds->ds_snapname);
if (err != 0 && zfs_recover == B_TRUE) {
err = 0;
(void) snprintf(ds->ds_snapname, sizeof (ds->ds_snapname),
"SNAPOBJ=%llu-ERR=%d",
(unsigned long long)ds->ds_object, err);
}
dmu_buf_rele(headdbuf, FTAG);
return (err);
}
int
dsl_dataset_snap_lookup(dsl_dataset_t *ds, const char *name, uint64_t *value)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_lookup_norm(mos, snapobj, name, 8, 1,
value, mt, NULL, 0, NULL);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_lookup(mos, snapobj, name, 8, 1, value);
return (err);
}
int
dsl_dataset_snap_remove(dsl_dataset_t *ds, const char *name, dmu_tx_t *tx,
boolean_t adj_cnt)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
uint64_t snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj;
matchtype_t mt = 0;
int err;
dsl_dir_snap_cmtime_update(ds->ds_dir);
if (dsl_dataset_phys(ds)->ds_flags & DS_FLAG_CI_DATASET)
mt = MT_NORMALIZE;
err = zap_remove_norm(mos, snapobj, name, mt, tx);
if (err == ENOTSUP && (mt & MT_NORMALIZE))
err = zap_remove(mos, snapobj, name, tx);
if (err == 0 && adj_cnt)
dsl_fs_ss_count_adjust(ds->ds_dir, -1,
DD_FIELD_SNAPSHOT_COUNT, tx);
return (err);
}
boolean_t
dsl_dataset_try_add_ref(dsl_pool_t *dp, dsl_dataset_t *ds, void *tag)
{
dmu_buf_t *dbuf = ds->ds_dbuf;
boolean_t result = B_FALSE;
if (dbuf != NULL && dmu_buf_try_add_ref(dbuf, dp->dp_meta_objset,
ds->ds_object, DMU_BONUS_BLKID, tag)) {
if (ds == dmu_buf_get_user(dbuf))
result = B_TRUE;
else
dmu_buf_rele(dbuf, tag);
}
return (result);
}
int
dsl_dataset_hold_obj_flags(dsl_pool_t *dp, uint64_t dsobj,
ds_hold_flags_t flags, void *tag, dsl_dataset_t **dsp)
{
objset_t *mos = dp->dp_meta_objset;
dmu_buf_t *dbuf;
dsl_dataset_t *ds;
int err;
dmu_object_info_t doi;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(mos, dsobj, tag, &dbuf);
if (err != 0)
return (err);
/* Make sure dsobj has the correct object type. */
dmu_object_info_from_db(dbuf, &doi);
if (doi.doi_bonus_type != DMU_OT_DSL_DATASET) {
dmu_buf_rele(dbuf, tag);
return (SET_ERROR(EINVAL));
}
ds = dmu_buf_get_user(dbuf);
if (ds == NULL) {
dsl_dataset_t *winner = NULL;
ds = kmem_zalloc(sizeof (dsl_dataset_t), KM_SLEEP);
ds->ds_dbuf = dbuf;
ds->ds_object = dsobj;
ds->ds_is_snapshot = dsl_dataset_phys(ds)->ds_num_children != 0;
list_link_init(&ds->ds_synced_link);
+ err = dsl_dir_hold_obj(dp, dsl_dataset_phys(ds)->ds_dir_obj,
+ NULL, ds, &ds->ds_dir);
+ if (err != 0) {
+ kmem_free(ds, sizeof (dsl_dataset_t));
+ dmu_buf_rele(dbuf, tag);
+ return (err);
+ }
+
mutex_init(&ds->ds_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_opening_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&ds->ds_sendstream_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&ds->ds_remap_deadlist_lock,
+ NULL, MUTEX_DEFAULT, NULL);
rrw_init(&ds->ds_bp_rwlock, B_FALSE);
refcount_create(&ds->ds_longholds);
bplist_create(&ds->ds_pending_deadlist);
- dsl_deadlist_open(&ds->ds_deadlist,
- mos, dsl_dataset_phys(ds)->ds_deadlist_obj);
list_create(&ds->ds_sendstreams, sizeof (dmu_sendarg_t),
offsetof(dmu_sendarg_t, dsa_link));
list_create(&ds->ds_prop_cbs, sizeof (dsl_prop_cb_record_t),
offsetof(dsl_prop_cb_record_t, cbr_ds_node));
if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
spa_feature_t f;
for (f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET))
continue;
err = zap_contains(mos, dsobj,
spa_feature_table[f].fi_guid);
if (err == 0) {
ds->ds_feature_inuse[f] = B_TRUE;
} else {
ASSERT3U(err, ==, ENOENT);
err = 0;
}
}
}
- err = dsl_dir_hold_obj(dp,
- dsl_dataset_phys(ds)->ds_dir_obj, NULL, ds, &ds->ds_dir);
- if (err != 0) {
- mutex_destroy(&ds->ds_lock);
- mutex_destroy(&ds->ds_opening_lock);
- mutex_destroy(&ds->ds_sendstream_lock);
- refcount_destroy(&ds->ds_longholds);
- bplist_destroy(&ds->ds_pending_deadlist);
- dsl_deadlist_close(&ds->ds_deadlist);
- kmem_free(ds, sizeof (dsl_dataset_t));
- dmu_buf_rele(dbuf, tag);
- return (err);
- }
-
if (!ds->ds_is_snapshot) {
ds->ds_snapname[0] = '\0';
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev);
}
if (doi.doi_type == DMU_OTN_ZAP_METADATA) {
int zaperr = zap_lookup(mos, ds->ds_object,
DS_FIELD_BOOKMARK_NAMES,
sizeof (ds->ds_bookmarks), 1,
&ds->ds_bookmarks);
if (zaperr != ENOENT)
VERIFY0(zaperr);
}
} else {
if (zfs_flags & ZFS_DEBUG_SNAPNAMES)
err = dsl_dataset_get_snapname(ds);
if (err == 0 &&
dsl_dataset_phys(ds)->ds_userrefs_obj != 0) {
err = zap_count(
ds->ds_dir->dd_pool->dp_meta_objset,
dsl_dataset_phys(ds)->ds_userrefs_obj,
&ds->ds_userrefs);
}
}
if (err == 0 && !ds->ds_is_snapshot) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
&ds->ds_reserved);
if (err == 0) {
err = dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
&ds->ds_quota);
}
} else {
ds->ds_reserved = ds->ds_quota = 0;
}
+ dsl_deadlist_open(&ds->ds_deadlist,
+ mos, dsl_dataset_phys(ds)->ds_deadlist_obj);
+ uint64_t remap_deadlist_obj =
+ dsl_dataset_get_remap_deadlist_object(ds);
+ if (remap_deadlist_obj != 0) {
+ dsl_deadlist_open(&ds->ds_remap_deadlist, mos,
+ remap_deadlist_obj);
+ }
+
dmu_buf_init_user(&ds->ds_dbu, dsl_dataset_evict_sync,
dsl_dataset_evict_async, &ds->ds_dbuf);
if (err == 0)
winner = dmu_buf_set_user_ie(dbuf, &ds->ds_dbu);
if (err != 0 || winner != NULL) {
bplist_destroy(&ds->ds_pending_deadlist);
dsl_deadlist_close(&ds->ds_deadlist);
+ if (dsl_deadlist_is_open(&ds->ds_remap_deadlist))
+ dsl_deadlist_close(&ds->ds_remap_deadlist);
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
dsl_dir_rele(ds->ds_dir, ds);
mutex_destroy(&ds->ds_lock);
mutex_destroy(&ds->ds_opening_lock);
mutex_destroy(&ds->ds_sendstream_lock);
refcount_destroy(&ds->ds_longholds);
kmem_free(ds, sizeof (dsl_dataset_t));
if (err != 0) {
dmu_buf_rele(dbuf, tag);
return (err);
}
ds = winner;
} else {
ds->ds_fsid_guid =
unique_insert(dsl_dataset_phys(ds)->ds_fsid_guid);
if (ds->ds_fsid_guid !=
dsl_dataset_phys(ds)->ds_fsid_guid) {
zfs_dbgmsg("ds_fsid_guid changed from "
"%llx to %llx for pool %s dataset id %llu",
(long long)
dsl_dataset_phys(ds)->ds_fsid_guid,
(long long)ds->ds_fsid_guid,
spa_name(dp->dp_spa),
dsobj);
}
}
}
ASSERT3P(ds->ds_dbuf, ==, dbuf);
ASSERT3P(dsl_dataset_phys(ds), ==, dbuf->db_data);
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0 ||
spa_version(dp->dp_spa) < SPA_VERSION_ORIGIN ||
dp->dp_origin_snap == NULL || ds == dp->dp_origin_snap);
*dsp = ds;
if ((flags & DS_HOLD_FLAG_DECRYPT) && ds->ds_dir->dd_crypto_obj != 0) {
err = spa_keystore_create_mapping(dp->dp_spa, ds, ds);
if (err != 0) {
dsl_dataset_rele(ds, tag);
return (SET_ERROR(EACCES));
}
}
return (0);
}
int
dsl_dataset_hold_obj(dsl_pool_t *dp, uint64_t dsobj, void *tag,
dsl_dataset_t **dsp)
{
return (dsl_dataset_hold_obj_flags(dp, dsobj, 0, tag, dsp));
}
int
dsl_dataset_hold_flags(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
void *tag, dsl_dataset_t **dsp)
{
dsl_dir_t *dd;
const char *snapname;
uint64_t obj;
int err = 0;
dsl_dataset_t *ds;
err = dsl_dir_hold(dp, name, FTAG, &dd, &snapname);
if (err != 0)
return (err);
ASSERT(dsl_pool_config_held(dp));
obj = dsl_dir_phys(dd)->dd_head_dataset_obj;
if (obj != 0)
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag, &ds);
else
err = SET_ERROR(ENOENT);
/* we may be looking for a snapshot */
if (err == 0 && snapname != NULL) {
dsl_dataset_t *snap_ds;
if (*snapname++ != '@') {
dsl_dataset_rele_flags(ds, flags, tag);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(ENOENT));
}
dprintf("looking for snapshot '%s'\n", snapname);
err = dsl_dataset_snap_lookup(ds, snapname, &obj);
if (err == 0) {
err = dsl_dataset_hold_obj_flags(dp, obj, flags, tag,
&snap_ds);
}
dsl_dataset_rele_flags(ds, flags, tag);
if (err == 0) {
mutex_enter(&snap_ds->ds_lock);
if (snap_ds->ds_snapname[0] == 0)
(void) strlcpy(snap_ds->ds_snapname, snapname,
sizeof (snap_ds->ds_snapname));
mutex_exit(&snap_ds->ds_lock);
ds = snap_ds;
}
}
if (err == 0)
*dsp = ds;
dsl_dir_rele(dd, FTAG);
return (err);
}
int
dsl_dataset_hold(dsl_pool_t *dp, const char *name, void *tag,
dsl_dataset_t **dsp)
{
return (dsl_dataset_hold_flags(dp, name, 0, tag, dsp));
}
int
dsl_dataset_own_obj(dsl_pool_t *dp, uint64_t dsobj, ds_hold_flags_t flags,
void *tag, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_obj_flags(dp, dsobj, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
*dsp = NULL;
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_dataset_own(dsl_pool_t *dp, const char *name, ds_hold_flags_t flags,
void *tag, dsl_dataset_t **dsp)
{
int err = dsl_dataset_hold_flags(dp, name, flags, tag, dsp);
if (err != 0)
return (err);
if (!dsl_dataset_tryown(*dsp, tag)) {
dsl_dataset_rele_flags(*dsp, flags, tag);
return (SET_ERROR(EBUSY));
}
return (0);
}
/*
* See the comment above dsl_pool_hold() for details. In summary, a long
* hold is used to prevent destruction of a dataset while the pool hold
* is dropped, allowing other concurrent operations (e.g. spa_sync()).
*
* The dataset and pool must be held when this function is called. After it
* is called, the pool hold may be released while the dataset is still held
* and accessed.
*/
void
dsl_dataset_long_hold(dsl_dataset_t *ds, void *tag)
{
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
(void) refcount_add(&ds->ds_longholds, tag);
}
void
dsl_dataset_long_rele(dsl_dataset_t *ds, void *tag)
{
(void) refcount_remove(&ds->ds_longholds, tag);
}
/* Return B_TRUE if there are any long holds on this dataset. */
boolean_t
dsl_dataset_long_held(dsl_dataset_t *ds)
{
return (!refcount_is_zero(&ds->ds_longholds));
}
void
dsl_dataset_name(dsl_dataset_t *ds, char *name)
{
if (ds == NULL) {
(void) strcpy(name, "mos");
} else {
dsl_dir_name(ds->ds_dir, name);
VERIFY0(dsl_dataset_get_snapname(ds));
if (ds->ds_snapname[0]) {
VERIFY3U(strlcat(name, "@", ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
/*
* We use a "recursive" mutex so that we
* can call dprintf_ds() with ds_lock held.
*/
if (!MUTEX_HELD(&ds->ds_lock)) {
mutex_enter(&ds->ds_lock);
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&ds->ds_lock);
} else {
VERIFY3U(strlcat(name, ds->ds_snapname,
ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
}
}
}
}
int
dsl_dataset_namelen(dsl_dataset_t *ds)
{
VERIFY0(dsl_dataset_get_snapname(ds));
mutex_enter(&ds->ds_lock);
int len = strlen(ds->ds_snapname);
/* add '@' if ds is a snap */
if (len > 0)
len++;
len += dsl_dir_namelen(ds->ds_dir);
mutex_exit(&ds->ds_lock);
return (len);
}
void
dsl_dataset_rele_flags(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
{
if (ds->ds_dir != NULL && ds->ds_dir->dd_crypto_obj != 0 &&
(flags & DS_HOLD_FLAG_DECRYPT)) {
(void) spa_keystore_remove_mapping(ds->ds_dir->dd_pool->dp_spa,
ds->ds_object, ds);
}
dmu_buf_rele(ds->ds_dbuf, tag);
}
void
dsl_dataset_rele(dsl_dataset_t *ds, void *tag)
{
dsl_dataset_rele_flags(ds, 0, tag);
}
void
dsl_dataset_disown(dsl_dataset_t *ds, ds_hold_flags_t flags, void *tag)
{
ASSERT3P(ds->ds_owner, ==, tag);
ASSERT(ds->ds_dbuf != NULL);
mutex_enter(&ds->ds_lock);
ds->ds_owner = NULL;
mutex_exit(&ds->ds_lock);
dsl_dataset_long_rele(ds, tag);
dsl_dataset_rele_flags(ds, flags, tag);
}
boolean_t
dsl_dataset_tryown(dsl_dataset_t *ds, void *tag)
{
boolean_t gotit = FALSE;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
mutex_enter(&ds->ds_lock);
if (ds->ds_owner == NULL && !DS_IS_INCONSISTENT(ds)) {
ds->ds_owner = tag;
dsl_dataset_long_hold(ds, tag);
gotit = TRUE;
}
mutex_exit(&ds->ds_lock);
return (gotit);
}
boolean_t
dsl_dataset_has_owner(dsl_dataset_t *ds)
{
boolean_t rv;
mutex_enter(&ds->ds_lock);
rv = (ds->ds_owner != NULL);
mutex_exit(&ds->ds_lock);
return (rv);
}
void
dsl_dataset_activate_feature(uint64_t dsobj, spa_feature_t f, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
uint64_t zero = 0;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
spa_feature_incr(spa, f, tx);
dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
VERIFY0(zap_add(mos, dsobj, spa_feature_table[f].fi_guid,
sizeof (zero), 1, &zero, tx));
}
void
dsl_dataset_deactivate_feature(uint64_t dsobj, spa_feature_t f, dmu_tx_t *tx)
{
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = dmu_tx_pool(tx)->dp_meta_objset;
VERIFY(spa_feature_table[f].fi_flags & ZFEATURE_FLAG_PER_DATASET);
VERIFY0(zap_remove(mos, dsobj, spa_feature_table[f].fi_guid, tx));
spa_feature_decr(spa, f, tx);
}
uint64_t
dsl_dataset_create_sync_dd(dsl_dir_t *dd, dsl_dataset_t *origin,
dsl_crypto_params_t *dcp, uint64_t flags, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj;
objset_t *mos = dp->dp_meta_objset;
if (origin == NULL)
origin = dp->dp_origin_snap;
ASSERT(origin == NULL || origin->ds_dir->dd_pool == dp);
ASSERT(origin == NULL || dsl_dataset_phys(origin)->ds_num_children > 0);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
bzero(dsphys, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = dd->dd_object;
dsphys->ds_flags = flags;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_snapnames_zapobj =
zap_create_norm(mos, U8_TEXTPREP_TOUPPER, DMU_OT_DSL_DS_SNAP_MAP,
DMU_OT_NONE, 0, tx);
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = tx->tx_txg == TXG_INITIAL ? 1 : tx->tx_txg;
if (origin == NULL) {
dsphys->ds_deadlist_obj = dsl_deadlist_alloc(mos, tx);
} else {
dsl_dataset_t *ohds; /* head of the origin snapshot */
dsphys->ds_prev_snap_obj = origin->ds_object;
dsphys->ds_prev_snap_txg =
dsl_dataset_phys(origin)->ds_creation_txg;
dsphys->ds_referenced_bytes =
dsl_dataset_phys(origin)->ds_referenced_bytes;
dsphys->ds_compressed_bytes =
dsl_dataset_phys(origin)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(origin)->ds_uncompressed_bytes;
rrw_enter(&origin->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(origin)->ds_bp;
rrw_exit(&origin->ds_bp_rwlock, FTAG);
/*
* Inherit flags that describe the dataset's contents
* (INCONSISTENT) or properties (Case Insensitive).
*/
dsphys->ds_flags |= dsl_dataset_phys(origin)->ds_flags &
(DS_FLAG_INCONSISTENT | DS_FLAG_CI_DATASET);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (origin->ds_feature_inuse[f])
dsl_dataset_activate_feature(dsobj, f, tx);
}
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_dataset_phys(origin)->ds_num_children++;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(origin->ds_dir)->dd_head_dataset_obj,
FTAG, &ohds));
dsphys->ds_deadlist_obj = dsl_deadlist_clone(&ohds->ds_deadlist,
dsphys->ds_prev_snap_txg, dsphys->ds_prev_snap_obj, tx);
dsl_dataset_rele(ohds, FTAG);
if (spa_version(dp->dp_spa) >= SPA_VERSION_NEXT_CLONES) {
if (dsl_dataset_phys(origin)->ds_next_clones_obj == 0) {
dsl_dataset_phys(origin)->ds_next_clones_obj =
zap_create(mos,
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dataset_phys(origin)->ds_next_clones_obj,
dsobj, tx));
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_origin_obj = origin->ds_object;
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
dsl_dir_phys(origin->ds_dir)->dd_clones =
zap_create(mos,
DMU_OT_DSL_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(mos,
dsl_dir_phys(origin->ds_dir)->dd_clones,
dsobj, tx));
}
}
/* handle encryption */
dsl_dataset_create_crypt_sync(dsobj, dd, origin, dcp, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsphys->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
dmu_buf_rele(dbuf, FTAG);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
dsl_dir_phys(dd)->dd_head_dataset_obj = dsobj;
return (dsobj);
}
static void
dsl_dataset_zero_zil(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
if (bcmp(&os->os_zil_header, &zero_zil, sizeof (zero_zil)) != 0) {
dsl_pool_t *dp = ds->ds_dir->dd_pool;
zio_t *zio;
bzero(&os->os_zil_header, sizeof (os->os_zil_header));
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dsl_dataset_sync(ds, zio, tx);
VERIFY0(zio_wait(zio));
/* dsl_dataset_sync_done will drop this reference. */
dmu_buf_add_ref(ds->ds_dbuf, ds);
dsl_dataset_sync_done(ds, tx);
}
}
uint64_t
dsl_dataset_create_sync(dsl_dir_t *pdd, const char *lastname,
dsl_dataset_t *origin, uint64_t flags, cred_t *cr,
dsl_crypto_params_t *dcp, dmu_tx_t *tx)
{
dsl_pool_t *dp = pdd->dd_pool;
uint64_t dsobj, ddobj;
dsl_dir_t *dd;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(lastname[0] != '@');
ddobj = dsl_dir_create_sync(dp, pdd, lastname, tx);
VERIFY0(dsl_dir_hold_obj(dp, ddobj, lastname, FTAG, &dd));
dsobj = dsl_dataset_create_sync_dd(dd, origin, dcp,
flags & ~DS_CREATE_FLAG_NODIRTY, tx);
dsl_deleg_set_create_perms(dd, tx, cr);
/*
* Since we're creating a new node we know it's a leaf, so we can
* initialize the counts if the limit feature is active.
*/
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
uint64_t cnt = 0;
objset_t *os = dd->dd_pool->dp_meta_objset;
dsl_dir_zapify(dd, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (cnt), 1, &cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (cnt), 1, &cnt, tx));
}
dsl_dir_rele(dd, FTAG);
/*
* If we are creating a clone, make sure we zero out any stale
* data from the origin snapshots zil header.
*/
if (origin != NULL && !(flags & DS_CREATE_FLAG_NODIRTY)) {
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dsl_dataset_zero_zil(ds, tx);
dsl_dataset_rele(ds, FTAG);
}
return (dsobj);
}
/*
* The unique space in the head dataset can be calculated by subtracting
* the space used in the most recent snapshot, that is still being used
* in this file system, from the space currently in use. To figure out
* the space in the most recent snapshot still in use, we need to take
* the total space used in the snapshot and subtract out the space that
* has been freed up since the snapshot was taken.
*/
void
dsl_dataset_recalc_head_uniq(dsl_dataset_t *ds)
{
uint64_t mrs_used;
uint64_t dlused, dlcomp, dluncomp;
ASSERT(!ds->ds_is_snapshot);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0)
mrs_used = dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes;
else
mrs_used = 0;
dsl_deadlist_space(&ds->ds_deadlist, &dlused, &dlcomp, &dluncomp);
ASSERT3U(dlused, <=, mrs_used);
dsl_dataset_phys(ds)->ds_unique_bytes =
dsl_dataset_phys(ds)->ds_referenced_bytes - (mrs_used - dlused);
if (spa_version(ds->ds_dir->dd_pool->dp_spa) >=
SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
}
void
dsl_dataset_remove_from_next_clones(dsl_dataset_t *ds, uint64_t obj,
dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
ASSERTV(uint64_t count);
int err;
ASSERT(dsl_dataset_phys(ds)->ds_num_children >= 2);
err = zap_remove_int(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
obj, tx);
/*
* The err should not be ENOENT, but a bug in a previous version
* of the code could cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a missing entry.
* If we knew that the pool was created after
* SPA_VERSION_NEXT_CLONES, we could assert that it isn't
* ENOENT. However, at least we can check that we don't have
* too many entries in the next_clones_obj even after failing to
* remove this one.
*/
if (err != ENOENT)
VERIFY0(err);
ASSERT0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
ASSERT3U(count, <=, dsl_dataset_phys(ds)->ds_num_children - 2);
}
blkptr_t *
dsl_dataset_get_blkptr(dsl_dataset_t *ds)
{
return (&dsl_dataset_phys(ds)->ds_bp);
}
spa_t *
dsl_dataset_get_spa(dsl_dataset_t *ds)
{
return (ds->ds_dir->dd_pool->dp_spa);
}
void
dsl_dataset_dirty(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp;
if (ds == NULL) /* this is the meta-objset */
return;
ASSERT(ds->ds_objset != NULL);
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0)
panic("dirtying snapshot!");
/* Must not dirty a dataset in the same txg where it got snapshotted. */
ASSERT3U(tx->tx_txg, >, dsl_dataset_phys(ds)->ds_prev_snap_txg);
dp = ds->ds_dir->dd_pool;
if (txg_list_add(&dp->dp_dirty_datasets, ds, tx->tx_txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, ds);
}
}
boolean_t
dsl_dataset_is_dirty(dsl_dataset_t *ds)
{
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
ds, t))
return (B_TRUE);
}
return (B_FALSE);
}
static int
dsl_dataset_snapshot_reserve_space(dsl_dataset_t *ds, dmu_tx_t *tx)
{
uint64_t asize;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* If there's an fs-only reservation, any blocks that might become
* owned by the snapshot dataset must be accommodated by space
* outside of the reservation.
*/
ASSERT(ds->ds_reserved == 0 || DS_UNIQUE_IS_ACCURATE(ds));
asize = MIN(dsl_dataset_phys(ds)->ds_unique_bytes, ds->ds_reserved);
if (asize > dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* Propagate any reserved space for this snapshot to other
* snapshot checks in this sync group.
*/
if (asize > 0)
dsl_dir_willuse_space(ds->ds_dir, asize, tx);
return (0);
}
int
dsl_dataset_snapshot_check_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx, boolean_t recv, uint64_t cnt, cred_t *cr)
{
int error;
uint64_t value;
ds->ds_trysnap_txg = tx->tx_txg;
if (!dmu_tx_is_syncing(tx))
return (0);
/*
* We don't allow multiple snapshots of the same txg. If there
* is already one, try again.
*/
if (dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg)
return (SET_ERROR(EAGAIN));
/*
* Check for conflicting snapshot name.
*/
error = dsl_dataset_snap_lookup(ds, snapname, &value);
if (error == 0)
return (SET_ERROR(EEXIST));
if (error != ENOENT)
return (error);
/*
* We don't allow taking snapshots of inconsistent datasets, such as
* those into which we are currently receiving. However, if we are
* creating this snapshot as part of a receive, this check will be
* executed atomically with respect to the completion of the receive
* itself but prior to the clearing of DS_FLAG_INCONSISTENT; in this
* case we ignore this, knowing it will be fixed up for us shortly in
* dmu_recv_end_sync().
*/
if (!recv && DS_IS_INCONSISTENT(ds))
return (SET_ERROR(EBUSY));
/*
* Skip the check for temporary snapshots or if we have already checked
* the counts in dsl_dataset_snapshot_check. This means we really only
* check the count here when we're receiving a stream.
*/
if (cnt != 0 && cr != NULL) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL, cr);
if (error != 0)
return (error);
}
error = dsl_dataset_snapshot_reserve_space(ds, tx);
if (error != 0)
return (error);
return (0);
}
int
dsl_dataset_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
int rv = 0;
/*
* Pre-compute how many total new snapshots will be created for each
* level in the tree and below. This is needed for validating the
* snapshot limit when either taking a recursive snapshot or when
* taking multiple snapshots.
*
* The problem is that the counts are not actually adjusted when
* we are checking, only when we finally sync. For a single snapshot,
* this is easy, the count will increase by 1 at each node up the tree,
* but its more complicated for the recursive/multiple snapshot case.
*
* The dsl_fs_ss_limit_check function does recursively check the count
* at each level up the tree but since it is validating each snapshot
* independently we need to be sure that we are validating the complete
* count for the entire set of snapshots. We do this by rolling up the
* counts for each component of the name into an nvlist and then
* checking each of those cases with the aggregated count.
*
* This approach properly handles not only the recursive snapshot
* case (where we get all of those on the ddsa_snaps list) but also
* the sibling case (e.g. snapshot a/b and a/c so that we will also
* validate the limit on 'a' using a count of 2).
*
* We validate the snapshot names in the third loop and only report
* name errors once.
*/
if (dmu_tx_is_syncing(tx)) {
char *nm;
nvlist_t *cnt_track = NULL;
cnt_track = fnvlist_alloc();
nm = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/* Rollup aggregated counts into the cnt_track list */
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL;
pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
char *pdelim;
uint64_t val;
(void) strlcpy(nm, nvpair_name(pair), MAXPATHLEN);
pdelim = strchr(nm, '@');
if (pdelim == NULL)
continue;
*pdelim = '\0';
do {
if (nvlist_lookup_uint64(cnt_track, nm,
&val) == 0) {
/* update existing entry */
fnvlist_add_uint64(cnt_track, nm,
val + 1);
} else {
/* add to list */
fnvlist_add_uint64(cnt_track, nm, 1);
}
pdelim = strrchr(nm, '/');
if (pdelim != NULL)
*pdelim = '\0';
} while (pdelim != NULL);
}
kmem_free(nm, MAXPATHLEN);
/* Check aggregated counts at each level */
for (pair = nvlist_next_nvpair(cnt_track, NULL);
pair != NULL; pair = nvlist_next_nvpair(cnt_track, pair)) {
int error = 0;
char *name;
uint64_t cnt = 0;
dsl_dataset_t *ds;
name = nvpair_name(pair);
cnt = fnvpair_value_uint64(pair);
ASSERT(cnt > 0);
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error == 0) {
error = dsl_fs_ss_limit_check(ds->ds_dir, cnt,
ZFS_PROP_SNAPSHOT_LIMIT, NULL,
ddsa->ddsa_cr);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL)
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
rv = error;
/* only report one error for this check */
break;
}
}
nvlist_free(cnt_track);
}
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
int error = 0;
dsl_dataset_t *ds;
char *name, *atp = NULL;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
if (strlen(name) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
if (error == 0) {
atp = strchr(name, '@');
if (atp == NULL)
error = SET_ERROR(EINVAL);
if (error == 0)
(void) strlcpy(dsname, name, atp - name + 1);
}
if (error == 0)
error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (error == 0) {
/* passing 0/NULL skips dsl_fs_ss_limit_check */
error = dsl_dataset_snapshot_check_impl(ds,
atp + 1, tx, B_FALSE, 0, NULL);
dsl_dataset_rele(ds, FTAG);
}
if (error != 0) {
if (ddsa->ddsa_errors != NULL) {
fnvlist_add_int32(ddsa->ddsa_errors,
name, error);
}
rv = error;
}
}
return (rv);
}
void
dsl_dataset_snapshot_sync_impl(dsl_dataset_t *ds, const char *snapname,
dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dmu_buf_t *dbuf;
dsl_dataset_phys_t *dsphys;
uint64_t dsobj, crtxg;
objset_t *mos = dp->dp_meta_objset;
ASSERTV(static zil_header_t zero_zil);
ASSERTV(objset_t *os);
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
/*
* If we are on an old pool, the zil must not be active, in which
* case it will be zeroed. Usually zil_suspend() accomplishes this.
*/
ASSERT(spa_version(dmu_tx_pool(tx)->dp_spa) >= SPA_VERSION_FAST_SNAP ||
dmu_objset_from_ds(ds, &os) != 0 ||
bcmp(&os->os_phys->os_zil_header, &zero_zil,
sizeof (zero_zil)) == 0);
/* Should not snapshot a dirty dataset. */
ASSERT(!txg_list_member(&ds->ds_dir->dd_pool->dp_dirty_datasets,
ds, tx->tx_txg));
dsl_fs_ss_count_adjust(ds->ds_dir, 1, DD_FIELD_SNAPSHOT_COUNT, tx);
/*
* The origin's ds_creation_txg has to be < TXG_INITIAL
*/
if (strcmp(snapname, ORIGIN_DIR_NAME) == 0)
crtxg = 1;
else
crtxg = tx->tx_txg;
dsobj = dmu_object_alloc(mos, DMU_OT_DSL_DATASET, 0,
DMU_OT_DSL_DATASET, sizeof (dsl_dataset_phys_t), tx);
VERIFY0(dmu_bonus_hold(mos, dsobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
dsphys = dbuf->db_data;
bzero(dsphys, sizeof (dsl_dataset_phys_t));
dsphys->ds_dir_obj = ds->ds_dir->dd_object;
dsphys->ds_fsid_guid = unique_create();
(void) random_get_pseudo_bytes((void*)&dsphys->ds_guid,
sizeof (dsphys->ds_guid));
dsphys->ds_prev_snap_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsphys->ds_prev_snap_txg = dsl_dataset_phys(ds)->ds_prev_snap_txg;
dsphys->ds_next_snap_obj = ds->ds_object;
dsphys->ds_num_children = 1;
dsphys->ds_creation_time = gethrestime_sec();
dsphys->ds_creation_txg = crtxg;
dsphys->ds_deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
dsphys->ds_referenced_bytes = dsl_dataset_phys(ds)->ds_referenced_bytes;
dsphys->ds_compressed_bytes = dsl_dataset_phys(ds)->ds_compressed_bytes;
dsphys->ds_uncompressed_bytes =
dsl_dataset_phys(ds)->ds_uncompressed_bytes;
dsphys->ds_flags = dsl_dataset_phys(ds)->ds_flags;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsphys->ds_bp = dsl_dataset_phys(ds)->ds_bp;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dmu_buf_rele(dbuf, FTAG);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f])
dsl_dataset_activate_feature(dsobj, f, tx);
}
ASSERT3U(ds->ds_prev != 0, ==,
dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
if (ds->ds_prev) {
uint64_t next_clones_obj =
dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj;
ASSERT(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object ||
dsl_dataset_phys(ds->ds_prev)->ds_num_children > 1);
if (dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj ==
ds->ds_object) {
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
dsl_dataset_phys(ds->ds_prev)->ds_creation_txg);
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj = dsobj;
} else if (next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds->ds_prev,
dsphys->ds_next_snap_obj, tx);
VERIFY0(zap_add_int(mos,
next_clones_obj, dsobj, tx));
}
}
/*
* If we have a reference-reservation on this dataset, we will
* need to increase the amount of refreservation being charged
* since our unique space is going to zero.
*/
if (ds->ds_reserved) {
int64_t delta;
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
delta = MIN(dsl_dataset_phys(ds)->ds_unique_bytes,
ds->ds_reserved);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV,
delta, 0, 0, tx);
}
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj =
dsl_deadlist_clone(&ds->ds_deadlist, UINT64_MAX,
dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_open(&ds->ds_deadlist, mos,
dsl_dataset_phys(ds)->ds_deadlist_obj);
dsl_deadlist_add_key(&ds->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
+ if (dsl_dataset_remap_deadlist_exists(ds)) {
+ uint64_t remap_deadlist_obj =
+ dsl_dataset_get_remap_deadlist_object(ds);
+ /*
+ * Move the remap_deadlist to the snapshot. The head
+ * will create a new remap deadlist on demand, from
+ * dsl_dataset_block_remapped().
+ */
+ dsl_dataset_unset_remap_deadlist_object(ds, tx);
+ dsl_deadlist_close(&ds->ds_remap_deadlist);
+
+ dmu_object_zapify(mos, dsobj, DMU_OT_DSL_DATASET, tx);
+ VERIFY0(zap_add(mos, dsobj, DS_FIELD_REMAP_DEADLIST,
+ sizeof (remap_deadlist_obj), 1, &remap_deadlist_obj, tx));
+ }
+
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, <, tx->tx_txg);
dsl_dataset_phys(ds)->ds_prev_snap_obj = dsobj;
dsl_dataset_phys(ds)->ds_prev_snap_txg = crtxg;
dsl_dataset_phys(ds)->ds_unique_bytes = 0;
+
if (spa_version(dp->dp_spa) >= SPA_VERSION_UNIQUE_ACCURATE)
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_UNIQUE_ACCURATE;
VERIFY0(zap_add(mos, dsl_dataset_phys(ds)->ds_snapnames_zapobj,
snapname, 8, 1, &dsobj, tx));
if (ds->ds_prev)
dsl_dataset_rele(ds->ds_prev, ds);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, ds, &ds->ds_prev));
dsl_scan_ds_snapshotted(ds, tx);
dsl_dir_snap_cmtime_update(ds->ds_dir);
spa_history_log_internal_ds(ds->ds_prev, "snapshot", tx, "");
}
void
dsl_dataset_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_arg_t *ddsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
nvpair_t *pair;
for (pair = nvlist_next_nvpair(ddsa->ddsa_snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(ddsa->ddsa_snaps, pair)) {
dsl_dataset_t *ds;
char *name, *atp;
char dsname[ZFS_MAX_DATASET_NAME_LEN];
name = nvpair_name(pair);
atp = strchr(name, '@');
(void) strlcpy(dsname, name, atp - name + 1);
VERIFY0(dsl_dataset_hold(dp, dsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, atp + 1, tx);
if (ddsa->ddsa_props != NULL) {
dsl_props_set_sync_impl(ds->ds_prev,
ZPROP_SRC_LOCAL, ddsa->ddsa_props, tx);
}
zvol_create_minors(dp->dp_spa, nvpair_name(pair), B_TRUE);
dsl_dataset_rele(ds, FTAG);
}
}
/*
* The snapshots must all be in the same pool.
* All-or-nothing: if there are any failures, nothing will be modified.
*/
int
dsl_dataset_snapshot(nvlist_t *snaps, nvlist_t *props, nvlist_t *errors)
{
dsl_dataset_snapshot_arg_t ddsa;
nvpair_t *pair;
boolean_t needsuspend;
int error;
spa_t *spa;
char *firstname;
nvlist_t *suspended = NULL;
pair = nvlist_next_nvpair(snaps, NULL);
if (pair == NULL)
return (0);
firstname = nvpair_name(pair);
error = spa_open(firstname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
suspended = fnvlist_alloc();
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
char *snapname = nvpair_name(pair);
char *atp;
void *cookie;
atp = strchr(snapname, '@');
if (atp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
(void) strlcpy(fsname, snapname, atp - snapname + 1);
error = zil_suspend(fsname, &cookie);
if (error != 0)
break;
fnvlist_add_uint64(suspended, fsname,
(uintptr_t)cookie);
}
}
ddsa.ddsa_snaps = snaps;
ddsa.ddsa_props = props;
ddsa.ddsa_errors = errors;
ddsa.ddsa_cr = CRED();
if (error == 0) {
error = dsl_sync_task(firstname, dsl_dataset_snapshot_check,
dsl_dataset_snapshot_sync, &ddsa,
fnvlist_num_pairs(snaps) * 3, ZFS_SPACE_CHECK_NORMAL);
}
if (suspended != NULL) {
for (pair = nvlist_next_nvpair(suspended, NULL); pair != NULL;
pair = nvlist_next_nvpair(suspended, pair)) {
zil_resume((void *)(uintptr_t)
fnvpair_value_uint64(pair));
}
fnvlist_free(suspended);
}
return (error);
}
typedef struct dsl_dataset_snapshot_tmp_arg {
const char *ddsta_fsname;
const char *ddsta_snapname;
minor_t ddsta_cleanup_minor;
const char *ddsta_htag;
} dsl_dataset_snapshot_tmp_arg_t;
static int
dsl_dataset_snapshot_tmp_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
error = dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* NULL cred means no limit check for tmp snapshot */
error = dsl_dataset_snapshot_check_impl(ds, ddsta->ddsta_snapname,
tx, B_FALSE, 0, NULL);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (spa_version(dp->dp_spa) < SPA_VERSION_USERREFS) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
error = dsl_dataset_user_hold_check_one(NULL, ddsta->ddsta_htag,
B_TRUE, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_snapshot_tmp_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_snapshot_tmp_arg_t *ddsta = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsta->ddsta_fsname, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, ddsta->ddsta_snapname, tx);
dsl_dataset_user_hold_sync_one(ds->ds_prev, ddsta->ddsta_htag,
ddsta->ddsta_cleanup_minor, gethrestime_sec(), tx);
dsl_destroy_snapshot_sync_impl(ds->ds_prev, B_TRUE, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_snapshot_tmp(const char *fsname, const char *snapname,
minor_t cleanup_minor, const char *htag)
{
dsl_dataset_snapshot_tmp_arg_t ddsta;
int error;
spa_t *spa;
boolean_t needsuspend;
void *cookie;
ddsta.ddsta_fsname = fsname;
ddsta.ddsta_snapname = snapname;
ddsta.ddsta_cleanup_minor = cleanup_minor;
ddsta.ddsta_htag = htag;
error = spa_open(fsname, &spa, FTAG);
if (error != 0)
return (error);
needsuspend = (spa_version(spa) < SPA_VERSION_FAST_SNAP);
spa_close(spa, FTAG);
if (needsuspend) {
error = zil_suspend(fsname, &cookie);
if (error != 0)
return (error);
}
error = dsl_sync_task(fsname, dsl_dataset_snapshot_tmp_check,
dsl_dataset_snapshot_tmp_sync, &ddsta, 3, ZFS_SPACE_CHECK_RESERVED);
if (needsuspend)
zil_resume(cookie);
return (error);
}
void
dsl_dataset_sync(dsl_dataset_t *ds, zio_t *zio, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(ds->ds_objset != NULL);
ASSERT(dsl_dataset_phys(ds)->ds_next_snap_obj == 0);
/*
* in case we had to change ds_fsid_guid when we opened it,
* sync it out now.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_fsid_guid = ds->ds_fsid_guid;
if (ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] != 0) {
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OBJECT, 8, 1,
&ds->ds_resume_object[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_OFFSET, 8, 1,
&ds->ds_resume_offset[tx->tx_txg & TXG_MASK], tx));
VERIFY0(zap_update(tx->tx_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_BYTES, 8, 1,
&ds->ds_resume_bytes[tx->tx_txg & TXG_MASK], tx));
ds->ds_resume_object[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_offset[tx->tx_txg & TXG_MASK] = 0;
ds->ds_resume_bytes[tx->tx_txg & TXG_MASK] = 0;
}
dmu_objset_sync(ds->ds_objset, zio, tx);
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_activation_needed[f]) {
if (ds->ds_feature_inuse[f])
continue;
dsl_dataset_activate_feature(ds->ds_object, f, tx);
ds->ds_feature_inuse[f] = B_TRUE;
}
}
}
static int
deadlist_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, tx);
return (0);
}
void
dsl_dataset_sync_done(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *os = ds->ds_objset;
bplist_iterate(&ds->ds_pending_deadlist,
deadlist_enqueue_cb, &ds->ds_deadlist, tx);
if (os->os_synced_dnodes != NULL) {
multilist_destroy(os->os_synced_dnodes);
os->os_synced_dnodes = NULL;
}
ASSERT(!dmu_objset_is_dirty(os, dmu_tx_get_txg(tx)));
dmu_buf_rele(ds->ds_dbuf, ds);
}
int
get_clones_stat_impl(dsl_dataset_t *ds, nvlist_t *val)
{
uint64_t count = 0;
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool));
/*
* There may be missing entries in ds_next_clones_obj
* due to a bug in a previous version of the code.
* Only trust it if it has the right number of entries.
*/
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
VERIFY0(zap_count(mos, dsl_dataset_phys(ds)->ds_next_clones_obj,
&count));
}
if (count != dsl_dataset_phys(ds)->ds_num_children - 1) {
return (ENOENT);
}
for (zap_cursor_init(&zc, mos,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *clone;
char buf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
za.za_first_integer, FTAG, &clone));
dsl_dir_name(clone->ds_dir, buf);
fnvlist_add_boolean(val, buf);
dsl_dataset_rele(clone, FTAG);
}
zap_cursor_fini(&zc);
return (0);
}
void
get_clones_stat(dsl_dataset_t *ds, nvlist_t *nv)
{
nvlist_t *propval = fnvlist_alloc();
nvlist_t *val;
/*
* We use nvlist_alloc() instead of fnvlist_alloc() because the
* latter would allocate the list with NV_UNIQUE_NAME flag.
* As a result, every time a clone name is appended to the list
* it would be (linearly) searched for for a duplicate name.
* We already know that all clone names must be unique and we
* want avoid the quadratic complexity of double-checking that
* because we can have a large number of clones.
*/
VERIFY0(nvlist_alloc(&val, 0, KM_SLEEP));
if (get_clones_stat_impl(ds, val) == 0) {
fnvlist_add_nvlist(propval, ZPROP_VALUE, val);
fnvlist_add_nvlist(nv, zfs_prop_to_name(ZFS_PROP_CLONES),
propval);
}
nvlist_free(val);
nvlist_free(propval);
}
/*
* Returns a string that represents the receive resume stats token. It should
* be freed with strfree().
*/
char *
get_receive_resume_stats_impl(dsl_dataset_t *ds)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
if (dsl_dataset_has_resume_receive_state(ds)) {
char *str;
void *packed;
uint8_t *compressed;
uint64_t val;
nvlist_t *token_nv = fnvlist_alloc();
size_t packed_size, compressed_size;
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_FROMGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "fromguid", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OBJECT, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "object", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_OFFSET, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "offset", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_BYTES, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "bytes", val);
}
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TOGUID, sizeof (val), 1, &val) == 0) {
fnvlist_add_uint64(token_nv, "toguid", val);
}
char buf[MAXNAMELEN];
if (zap_lookup(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_TONAME, 1, sizeof (buf), buf) == 0) {
fnvlist_add_string(token_nv, "toname", buf);
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_LARGEBLOCK) == 0) {
fnvlist_add_boolean(token_nv, "largeblockok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_EMBEDOK) == 0) {
fnvlist_add_boolean(token_nv, "embedok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_COMPRESSOK) == 0) {
fnvlist_add_boolean(token_nv, "compressok");
}
if (zap_contains(dp->dp_meta_objset, ds->ds_object,
DS_FIELD_RESUME_RAWOK) == 0) {
fnvlist_add_boolean(token_nv, "rawok");
}
packed = fnvlist_pack(token_nv, &packed_size);
fnvlist_free(token_nv);
compressed = kmem_alloc(packed_size, KM_SLEEP);
compressed_size = gzip_compress(packed, compressed,
packed_size, packed_size, 6);
zio_cksum_t cksum;
fletcher_4_native_varsize(compressed, compressed_size, &cksum);
str = kmem_alloc(compressed_size * 2 + 1, KM_SLEEP);
for (int i = 0; i < compressed_size; i++) {
(void) sprintf(str + i * 2, "%02x", compressed[i]);
}
str[compressed_size * 2] = '\0';
char *propval = kmem_asprintf("%u-%llx-%llx-%s",
ZFS_SEND_RESUME_TOKEN_VERSION,
(longlong_t)cksum.zc_word[0],
(longlong_t)packed_size, str);
kmem_free(packed, packed_size);
kmem_free(str, compressed_size * 2 + 1);
kmem_free(compressed, packed_size);
return (propval);
}
return (strdup(""));
}
/*
* Returns a string that represents the receive resume stats token of the
* dataset's child. It should be freed with strfree().
*/
char *
get_child_receive_stats(dsl_dataset_t *ds)
{
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
dsl_dataset_name(ds, recvname);
if (strlcat(recvname, "/", sizeof (recvname)) <
sizeof (recvname) &&
strlcat(recvname, recv_clone_name, sizeof (recvname)) <
sizeof (recvname) &&
dsl_dataset_hold(ds->ds_dir->dd_pool, recvname, FTAG,
&recv_ds) == 0) {
char *propval = get_receive_resume_stats_impl(recv_ds);
dsl_dataset_rele(recv_ds, FTAG);
return (propval);
}
return (strdup(""));
}
static void
get_receive_resume_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
char *propval = get_receive_resume_stats_impl(ds);
if (strcmp(propval, "") != 0) {
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_RECEIVE_RESUME_TOKEN, propval);
} else {
char *childval = get_child_receive_stats(ds);
if (strcmp(childval, "") != 0) {
dsl_prop_nvlist_add_string(nv,
ZFS_PROP_RECEIVE_RESUME_TOKEN, childval);
}
strfree(childval);
}
strfree(propval);
}
uint64_t
dsl_get_refratio(dsl_dataset_t *ds)
{
uint64_t ratio = dsl_dataset_phys(ds)->ds_compressed_bytes == 0 ? 100 :
(dsl_dataset_phys(ds)->ds_uncompressed_bytes * 100 /
dsl_dataset_phys(ds)->ds_compressed_bytes);
return (ratio);
}
uint64_t
dsl_get_logicalreferenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_uncompressed_bytes);
}
uint64_t
dsl_get_compressratio(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_get_refratio(ds));
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_compressratio(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_used(dsl_dataset_t *ds)
{
if (ds->ds_is_snapshot) {
return (dsl_dataset_phys(ds)->ds_unique_bytes);
} else {
dsl_dir_t *dd = ds->ds_dir;
mutex_enter(&dd->dd_lock);
uint64_t val = dsl_dir_get_used(dd);
mutex_exit(&dd->dd_lock);
return (val);
}
}
uint64_t
dsl_get_creation(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_time);
}
uint64_t
dsl_get_creationtxg(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_creation_txg);
}
uint64_t
dsl_get_refquota(dsl_dataset_t *ds)
{
return (ds->ds_quota);
}
uint64_t
dsl_get_refreservation(dsl_dataset_t *ds)
{
return (ds->ds_reserved);
}
uint64_t
dsl_get_guid(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_guid);
}
uint64_t
dsl_get_unique(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_unique_bytes);
}
uint64_t
dsl_get_objsetid(dsl_dataset_t *ds)
{
return (ds->ds_object);
}
uint64_t
dsl_get_userrefs(dsl_dataset_t *ds)
{
return (ds->ds_userrefs);
}
uint64_t
dsl_get_defer_destroy(dsl_dataset_t *ds)
{
return (DS_IS_DEFER_DESTROY(ds) ? 1 : 0);
}
uint64_t
dsl_get_referenced(dsl_dataset_t *ds)
{
return (dsl_dataset_phys(ds)->ds_referenced_bytes);
}
uint64_t
dsl_get_numclones(dsl_dataset_t *ds)
{
ASSERT(ds->ds_is_snapshot);
return (dsl_dataset_phys(ds)->ds_num_children - 1);
}
uint64_t
dsl_get_inconsistent(dsl_dataset_t *ds)
{
return ((dsl_dataset_phys(ds)->ds_flags & DS_FLAG_INCONSISTENT) ?
1 : 0);
}
uint64_t
dsl_get_available(dsl_dataset_t *ds)
{
uint64_t refdbytes = dsl_get_referenced(ds);
uint64_t availbytes = dsl_dir_space_available(ds->ds_dir,
NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
availbytes +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
}
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (refdbytes < ds->ds_quota) {
availbytes = MIN(availbytes,
ds->ds_quota - refdbytes);
} else {
availbytes = 0;
}
}
return (availbytes);
}
int
dsl_get_written(dsl_dataset_t *ds, uint64_t *written)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_dataset_t *prev;
int err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err == 0) {
uint64_t comp, uncomp;
err = dsl_dataset_space_written(prev, ds, written,
&comp, &uncomp);
dsl_dataset_rele(prev, FTAG);
}
return (err);
}
/*
* 'snap' should be a buffer of size ZFS_MAX_DATASET_NAME_LEN.
*/
int
dsl_get_prev_snap(dsl_dataset_t *ds, char *snap)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
if (ds->ds_prev != NULL && ds->ds_prev != dp->dp_origin_snap) {
dsl_dataset_name(ds->ds_prev, snap);
return (0);
} else {
return (ENOENT);
}
}
/*
* Returns the mountpoint property and source for the given dataset in the value
* and source buffers. The value buffer must be at least as large as MAXPATHLEN
* and the source buffer as least as large a ZFS_MAX_DATASET_NAME_LEN.
* Returns 0 on success and an error on failure.
*/
int
dsl_get_mountpoint(dsl_dataset_t *ds, const char *dsname, char *value,
char *source)
{
int error;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
/* Retrieve the mountpoint value stored in the zap opbject */
error = dsl_prop_get_ds(ds, zfs_prop_to_name(ZFS_PROP_MOUNTPOINT), 1,
ZAP_MAXVALUELEN, value, source);
if (error != 0) {
return (error);
}
/*
* Process the dsname and source to find the full mountpoint string.
* Can be skipped for 'legacy' or 'none'.
*/
if (value[0] == '/') {
char *buf = kmem_alloc(ZAP_MAXVALUELEN, KM_SLEEP);
char *root = buf;
const char *relpath;
/*
* If we inherit the mountpoint, even from a dataset
* with a received value, the source will be the path of
* the dataset we inherit from. If source is
* ZPROP_SOURCE_VAL_RECVD, the received value is not
* inherited.
*/
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0) {
relpath = "";
} else {
ASSERT0(strncmp(dsname, source, strlen(source)));
relpath = dsname + strlen(source);
if (relpath[0] == '/')
relpath++;
}
spa_altroot(dp->dp_spa, root, ZAP_MAXVALUELEN);
/*
* Special case an alternate root of '/'. This will
* avoid having multiple leading slashes in the
* mountpoint path.
*/
if (strcmp(root, "/") == 0)
root++;
/*
* If the mountpoint is '/' then skip over this
* if we are obtaining either an alternate root or
* an inherited mountpoint.
*/
char *mnt = value;
if (value[1] == '\0' && (root[0] != '\0' ||
relpath[0] != '\0'))
mnt = value + 1;
if (relpath[0] == '\0') {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s",
root, mnt);
} else {
(void) snprintf(value, ZAP_MAXVALUELEN, "%s%s%s%s",
root, mnt, relpath[0] == '@' ? "" : "/",
relpath);
}
kmem_free(buf, ZAP_MAXVALUELEN);
}
return (0);
}
void
dsl_dataset_stats(dsl_dataset_t *ds, nvlist_t *nv)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRATIO,
dsl_get_refratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALREFERENCED,
dsl_get_logicalreferenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_COMPRESSRATIO,
dsl_get_compressratio(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USED,
dsl_get_used(ds));
if (ds->ds_is_snapshot) {
get_clones_stat(ds, nv);
} else {
char buf[ZFS_MAX_DATASET_NAME_LEN];
if (dsl_get_prev_snap(ds, buf) == 0)
dsl_prop_nvlist_add_string(nv, ZFS_PROP_PREV_SNAP,
buf);
dsl_dir_stats(ds->ds_dir, nv);
}
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_AVAILABLE,
dsl_get_available(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFERENCED,
dsl_get_referenced(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATION,
dsl_get_creation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_CREATETXG,
dsl_get_creationtxg(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFQUOTA,
dsl_get_refquota(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REFRESERVATION,
dsl_get_refreservation(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_GUID,
dsl_get_guid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_UNIQUE,
dsl_get_unique(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_OBJSETID,
dsl_get_objsetid(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERREFS,
dsl_get_userrefs(ds));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_DEFER_DESTROY,
dsl_get_defer_destroy(ds));
dsl_dataset_crypt_stats(ds, nv);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
uint64_t written;
if (dsl_get_written(ds, &written) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_WRITTEN,
written);
}
}
if (!dsl_dataset_is_snapshot(ds)) {
/*
* A failed "newfs" (e.g. full) resumable receive leaves
* the stats set on this dataset. Check here for the prop.
*/
get_receive_resume_stats(ds, nv);
/*
* A failed incremental resumable receive leaves the
* stats set on our child named "%recv". Check the child
* for the prop.
*/
/* 6 extra bytes for /%recv */
char recvname[ZFS_MAX_DATASET_NAME_LEN + 6];
dsl_dataset_t *recv_ds;
dsl_dataset_name(ds, recvname);
if (strlcat(recvname, "/", sizeof (recvname)) <
sizeof (recvname) &&
strlcat(recvname, recv_clone_name, sizeof (recvname)) <
sizeof (recvname) &&
dsl_dataset_hold(dp, recvname, FTAG, &recv_ds) == 0) {
get_receive_resume_stats(recv_ds, nv);
dsl_dataset_rele(recv_ds, FTAG);
}
}
}
void
dsl_dataset_fast_stat(dsl_dataset_t *ds, dmu_objset_stats_t *stat)
{
ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
ASSERT(dsl_pool_config_held(dp));
stat->dds_creation_txg = dsl_get_creationtxg(ds);
stat->dds_inconsistent = dsl_get_inconsistent(ds);
stat->dds_guid = dsl_get_guid(ds);
stat->dds_origin[0] = '\0';
if (ds->ds_is_snapshot) {
stat->dds_is_snapshot = B_TRUE;
stat->dds_num_clones = dsl_get_numclones(ds);
} else {
stat->dds_is_snapshot = B_FALSE;
stat->dds_num_clones = 0;
if (dsl_dir_is_clone(ds->ds_dir)) {
dsl_dir_get_origin(ds->ds_dir, stat->dds_origin);
}
}
}
uint64_t
dsl_dataset_fsid_guid(dsl_dataset_t *ds)
{
return (ds->ds_fsid_guid);
}
void
dsl_dataset_space(dsl_dataset_t *ds,
uint64_t *refdbytesp, uint64_t *availbytesp,
uint64_t *usedobjsp, uint64_t *availobjsp)
{
*refdbytesp = dsl_dataset_phys(ds)->ds_referenced_bytes;
*availbytesp = dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE);
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes)
*availbytesp +=
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes;
if (ds->ds_quota != 0) {
/*
* Adjust available bytes according to refquota
*/
if (*refdbytesp < ds->ds_quota)
*availbytesp = MIN(*availbytesp,
ds->ds_quota - *refdbytesp);
else
*availbytesp = 0;
}
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
*usedobjsp = BP_GET_FILL(&dsl_dataset_phys(ds)->ds_bp);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
*availobjsp = DN_MAX_OBJECT - *usedobjsp;
}
boolean_t
dsl_dataset_modified_since_snap(dsl_dataset_t *ds, dsl_dataset_t *snap)
{
ASSERTV(dsl_pool_t *dp = ds->ds_dir->dd_pool);
uint64_t birth;
ASSERT(dsl_pool_config_held(dp));
if (snap == NULL)
return (B_FALSE);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
birth = dsl_dataset_get_blkptr(ds)->blk_birth;
rrw_exit(&ds->ds_bp_rwlock, FTAG);
if (birth > dsl_dataset_phys(snap)->ds_creation_txg) {
objset_t *os, *os_snap;
/*
* It may be that only the ZIL differs, because it was
* reset in the head. Don't count that as being
* modified.
*/
if (dmu_objset_from_ds(ds, &os) != 0)
return (B_TRUE);
if (dmu_objset_from_ds(snap, &os_snap) != 0)
return (B_TRUE);
return (bcmp(&os->os_phys->os_meta_dnode,
&os_snap->os_phys->os_meta_dnode,
sizeof (os->os_phys->os_meta_dnode)) != 0);
}
return (B_FALSE);
}
typedef struct dsl_dataset_rename_snapshot_arg {
const char *ddrsa_fsname;
const char *ddrsa_oldsnapname;
const char *ddrsa_newsnapname;
boolean_t ddrsa_recursive;
dmu_tx_t *ddrsa_tx;
} dsl_dataset_rename_snapshot_arg_t;
/* ARGSUSED */
static int
dsl_dataset_rename_snapshot_check_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
int error;
uint64_t val;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
if (error != 0) {
/* ignore nonexistent snapshots */
return (error == ENOENT ? 0 : error);
}
/* new name should not exist */
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_newsnapname, &val);
if (error == 0)
error = SET_ERROR(EEXIST);
else if (error == ENOENT)
error = 0;
/* dataset name + 1 for the "@" + the new snapshot name must fit */
if (dsl_dir_namelen(hds->ds_dir) + 1 +
strlen(ddrsa->ddrsa_newsnapname) >= ZFS_MAX_DATASET_NAME_LEN)
error = SET_ERROR(ENAMETOOLONG);
return (error);
}
static int
dsl_dataset_rename_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
int error;
error = dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds);
if (error != 0)
return (error);
if (ddrsa->ddrsa_recursive) {
error = dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_check_impl, ddrsa,
DS_FIND_CHILDREN);
} else {
error = dsl_dataset_rename_snapshot_check_impl(dp, hds, ddrsa);
}
dsl_dataset_rele(hds, FTAG);
return (error);
}
static int
dsl_dataset_rename_snapshot_sync_impl(dsl_pool_t *dp,
dsl_dataset_t *hds, void *arg)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_dataset_t *ds;
uint64_t val;
dmu_tx_t *tx = ddrsa->ddrsa_tx;
int error;
error = dsl_dataset_snap_lookup(hds, ddrsa->ddrsa_oldsnapname, &val);
ASSERT(error == 0 || error == ENOENT);
if (error == ENOENT) {
/* ignore nonexistent snapshots */
return (0);
}
VERIFY0(dsl_dataset_hold_obj(dp, val, FTAG, &ds));
/* log before we change the name */
spa_history_log_internal_ds(ds, "rename", tx,
"-> @%s", ddrsa->ddrsa_newsnapname);
VERIFY0(dsl_dataset_snap_remove(hds, ddrsa->ddrsa_oldsnapname, tx,
B_FALSE));
mutex_enter(&ds->ds_lock);
(void) strlcpy(ds->ds_snapname, ddrsa->ddrsa_newsnapname,
sizeof (ds->ds_snapname));
mutex_exit(&ds->ds_lock);
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj,
ds->ds_snapname, 8, 1, &ds->ds_object, tx));
zvol_rename_minors(dp->dp_spa, ddrsa->ddrsa_oldsnapname,
ddrsa->ddrsa_newsnapname, B_TRUE);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_rename_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rename_snapshot_arg_t *ddrsa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddrsa->ddrsa_fsname, FTAG, &hds));
ddrsa->ddrsa_tx = tx;
if (ddrsa->ddrsa_recursive) {
VERIFY0(dmu_objset_find_dp(dp, hds->ds_dir->dd_object,
dsl_dataset_rename_snapshot_sync_impl, ddrsa,
DS_FIND_CHILDREN));
} else {
VERIFY0(dsl_dataset_rename_snapshot_sync_impl(dp, hds, ddrsa));
}
dsl_dataset_rele(hds, FTAG);
}
int
dsl_dataset_rename_snapshot(const char *fsname,
const char *oldsnapname, const char *newsnapname, boolean_t recursive)
{
dsl_dataset_rename_snapshot_arg_t ddrsa;
ddrsa.ddrsa_fsname = fsname;
ddrsa.ddrsa_oldsnapname = oldsnapname;
ddrsa.ddrsa_newsnapname = newsnapname;
ddrsa.ddrsa_recursive = recursive;
return (dsl_sync_task(fsname, dsl_dataset_rename_snapshot_check,
dsl_dataset_rename_snapshot_sync, &ddrsa,
1, ZFS_SPACE_CHECK_RESERVED));
}
/*
* If we're doing an ownership handoff, we need to make sure that there is
* only one long hold on the dataset. We're not allowed to change anything here
* so we don't permanently release the long hold or regular hold here. We want
* to do this only when syncing to avoid the dataset unexpectedly going away
* when we release the long hold.
*/
static int
dsl_dataset_handoff_check(dsl_dataset_t *ds, void *owner, dmu_tx_t *tx)
{
boolean_t held;
if (!dmu_tx_is_syncing(tx))
return (0);
if (owner != NULL) {
VERIFY3P(ds->ds_owner, ==, owner);
dsl_dataset_long_rele(ds, owner);
}
held = dsl_dataset_long_held(ds);
if (owner != NULL)
dsl_dataset_long_hold(ds, owner);
if (held)
return (SET_ERROR(EBUSY));
return (0);
}
int
dsl_dataset_rollback_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int64_t unused_refres_delta;
int error;
error = dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds);
if (error != 0)
return (error);
/* must not be a snapshot */
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
/* must have a most recent snapshot */
if (dsl_dataset_phys(ds)->ds_prev_snap_txg < TXG_INITIAL) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ESRCH));
}
/*
* No rollback to a snapshot created in the current txg, because
* the rollback may dirty the dataset and create blocks that are
* not reachable from the rootbp while having a birth txg that
* falls into the snapshot's range.
*/
if (dmu_tx_is_syncing(tx) &&
dsl_dataset_phys(ds)->ds_prev_snap_txg >= tx->tx_txg) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EAGAIN));
}
/*
* If the expected target snapshot is specified, then check that
* the latest snapshot is it.
*/
if (ddra->ddra_tosnap != NULL) {
dsl_dataset_t *snapds;
/* Check if the target snapshot exists at all. */
error = dsl_dataset_hold(dp, ddra->ddra_tosnap, FTAG, &snapds);
if (error != 0) {
/*
* ESRCH is used to signal that the target snapshot does
* not exist, while ENOENT is used to report that
* the rolled back dataset does not exist.
* ESRCH is also used to cover other cases where the
* target snapshot is not related to the dataset being
* rolled back such as being in a different pool.
*/
if (error == ENOENT || error == EXDEV)
error = SET_ERROR(ESRCH);
dsl_dataset_rele(ds, FTAG);
return (error);
}
ASSERT(snapds->ds_is_snapshot);
/* Check if the snapshot is the latest snapshot indeed. */
if (snapds != ds->ds_prev) {
/*
* Distinguish between the case where the only problem
* is intervening snapshots (EEXIST) vs the snapshot
* not being a valid target for rollback (ESRCH).
*/
if (snapds->ds_dir == ds->ds_dir ||
(dsl_dir_is_clone(ds->ds_dir) &&
dsl_dir_phys(ds->ds_dir)->dd_origin_obj ==
snapds->ds_object)) {
error = SET_ERROR(EEXIST);
} else {
error = SET_ERROR(ESRCH);
}
dsl_dataset_rele(snapds, FTAG);
dsl_dataset_rele(ds, FTAG);
return (error);
}
dsl_dataset_rele(snapds, FTAG);
}
/* must not have any bookmarks after the most recent snapshot */
nvlist_t *proprequest = fnvlist_alloc();
fnvlist_add_boolean(proprequest, zfs_prop_to_name(ZFS_PROP_CREATETXG));
nvlist_t *bookmarks = fnvlist_alloc();
error = dsl_get_bookmarks_impl(ds, proprequest, bookmarks);
fnvlist_free(proprequest);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
for (nvpair_t *pair = nvlist_next_nvpair(bookmarks, NULL);
pair != NULL; pair = nvlist_next_nvpair(bookmarks, pair)) {
nvlist_t *valuenv =
fnvlist_lookup_nvlist(fnvpair_value_nvlist(pair),
zfs_prop_to_name(ZFS_PROP_CREATETXG));
uint64_t createtxg = fnvlist_lookup_uint64(valuenv, "value");
if (createtxg > dsl_dataset_phys(ds)->ds_prev_snap_txg) {
fnvlist_free(bookmarks);
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EEXIST));
}
}
fnvlist_free(bookmarks);
error = dsl_dataset_handoff_check(ds, ddra->ddra_owner, tx);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* Check if the snap we are rolling back to uses more than
* the refquota.
*/
if (ds->ds_quota != 0 &&
dsl_dataset_phys(ds->ds_prev)->ds_referenced_bytes > ds->ds_quota) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EDQUOT));
}
/*
* When we do the clone swap, we will temporarily use more space
* due to the refreservation (the head will no longer have any
* unique space, so the entire amount of the refreservation will need
* to be free). We will immediately destroy the clone, freeing
* this space, but the freeing happens over many txg's.
*/
unused_refres_delta = (int64_t)MIN(ds->ds_reserved,
dsl_dataset_phys(ds)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, TRUE)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_rollback_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_rollback_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds, *clone;
uint64_t cloneobj;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
VERIFY0(dsl_dataset_hold(dp, ddra->ddra_fsname, FTAG, &ds));
dsl_dataset_name(ds->ds_prev, namebuf);
fnvlist_add_string(ddra->ddra_result, "target", namebuf);
cloneobj = dsl_dataset_create_sync(ds->ds_dir, "%rollback",
ds->ds_prev, DS_CREATE_FLAG_NODIRTY, kcred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(dp, cloneobj, FTAG, &clone));
dsl_dataset_clone_swap_sync_impl(clone, ds, tx);
dsl_dataset_zero_zil(ds, tx);
dsl_destroy_head_sync_impl(clone, tx);
dsl_dataset_rele(clone, FTAG);
dsl_dataset_rele(ds, FTAG);
}
/*
* Rolls back the given filesystem or volume to the most recent snapshot.
* The name of the most recent snapshot will be returned under key "target"
* in the result nvlist.
*
* If owner != NULL:
* - The existing dataset MUST be owned by the specified owner at entry
* - Upon return, dataset will still be held by the same owner, whether we
* succeed or not.
*
* This mode is required any time the existing filesystem is mounted. See
* notes above zfs_suspend_fs() for further details.
*/
int
dsl_dataset_rollback(const char *fsname, const char *tosnap, void *owner,
nvlist_t *result)
{
dsl_dataset_rollback_arg_t ddra;
ddra.ddra_fsname = fsname;
ddra.ddra_tosnap = tosnap;
ddra.ddra_owner = owner;
ddra.ddra_result = result;
return (dsl_sync_task(fsname, dsl_dataset_rollback_check,
dsl_dataset_rollback_sync, &ddra,
1, ZFS_SPACE_CHECK_RESERVED));
}
struct promotenode {
list_node_t link;
dsl_dataset_t *ds;
};
static int snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep);
static int promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp,
void *tag);
static void promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag);
int
dsl_dataset_promote_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
dsl_dataset_t *origin_ds;
int err;
uint64_t unused;
uint64_t ss_mv_cnt;
size_t max_snap_len;
boolean_t conflicting_snaps;
err = promote_hold(ddpa, dp, FTAG);
if (err != 0)
return (err);
hds = ddpa->ddpa_clone;
max_snap_len = MAXNAMELEN - strlen(ddpa->ddpa_clonename) - 1;
if (dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE) {
promote_rele(ddpa, FTAG);
return (SET_ERROR(EXDEV));
}
snap = list_head(&ddpa->shared_snaps);
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
origin_ds = snap->ds;
/*
* Encrypted clones share a DSL Crypto Key with their origin's dsl dir.
* When doing a promote we must make sure the encryption root for
* both the target and the target's origin does not change to avoid
* needing to rewrap encryption keys
*/
err = dsl_dataset_promote_crypt_check(hds->ds_dir, origin_ds->ds_dir);
if (err != 0)
goto out;
/*
* Compute and check the amount of space to transfer. Since this is
* so expensive, don't do the preliminary check.
*/
if (!dmu_tx_is_syncing(tx)) {
promote_rele(ddpa, FTAG);
return (0);
}
/* compute origin's new unique space */
snap = list_tail(&ddpa->clone_snaps);
ASSERT(snap != NULL);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
dsl_dataset_phys(origin_ds)->ds_prev_snap_txg, UINT64_MAX,
&ddpa->unique, &unused, &unused);
/*
* Walk the snapshots that we are moving
*
* Compute space to transfer. Consider the incremental changes
* to used by each snapshot:
* (my used) = (prev's used) + (blocks born) - (blocks killed)
* So each snapshot gave birth to:
* (blocks born) = (my used) - (prev's used) + (blocks killed)
* So a sequence would look like:
* (uN - u(N-1) + kN) + ... + (u1 - u0 + k1) + (u0 - 0 + k0)
* Which simplifies to:
* uN + kN + kN-1 + ... + k1 + k0
* Note however, if we stop before we reach the ORIGIN we get:
* uN + kN + kN-1 + ... + kM - uM-1
*/
conflicting_snaps = B_FALSE;
ss_mv_cnt = 0;
ddpa->used = dsl_dataset_phys(origin_ds)->ds_referenced_bytes;
ddpa->comp = dsl_dataset_phys(origin_ds)->ds_compressed_bytes;
ddpa->uncomp = dsl_dataset_phys(origin_ds)->ds_uncompressed_bytes;
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
uint64_t val, dlused, dlcomp, dluncomp;
dsl_dataset_t *ds = snap->ds;
ss_mv_cnt++;
/*
* If there are long holds, we won't be able to evict
* the objset.
*/
if (dsl_dataset_long_held(ds)) {
err = SET_ERROR(EBUSY);
goto out;
}
/* Check that the snapshot name does not conflict */
VERIFY0(dsl_dataset_get_snapname(ds));
if (strlen(ds->ds_snapname) >= max_snap_len) {
err = SET_ERROR(ENAMETOOLONG);
goto out;
}
err = dsl_dataset_snap_lookup(hds, ds->ds_snapname, &val);
if (err == 0) {
fnvlist_add_boolean(ddpa->err_ds,
snap->ds->ds_snapname);
conflicting_snaps = B_TRUE;
} else if (err != ENOENT) {
goto out;
}
/* The very first snapshot does not have a deadlist */
if (dsl_dataset_phys(ds)->ds_prev_snap_obj == 0)
continue;
dsl_deadlist_space(&ds->ds_deadlist,
&dlused, &dlcomp, &dluncomp);
ddpa->used += dlused;
ddpa->comp += dlcomp;
ddpa->uncomp += dluncomp;
}
/*
* In order to return the full list of conflicting snapshots, we check
* whether there was a conflict after traversing all of them.
*/
if (conflicting_snaps) {
err = SET_ERROR(EEXIST);
goto out;
}
/*
* If we are a clone of a clone then we never reached ORIGIN,
* so we need to subtract out the clone origin's used space.
*/
if (ddpa->origin_origin) {
ddpa->used -=
dsl_dataset_phys(ddpa->origin_origin)->ds_referenced_bytes;
ddpa->comp -=
dsl_dataset_phys(ddpa->origin_origin)->ds_compressed_bytes;
ddpa->uncomp -=
dsl_dataset_phys(ddpa->origin_origin)->
ds_uncompressed_bytes;
}
/* Check that there is enough space and limit headroom here */
err = dsl_dir_transfer_possible(origin_ds->ds_dir, hds->ds_dir,
0, ss_mv_cnt, ddpa->used, ddpa->cr);
if (err != 0)
goto out;
/*
* Compute the amounts of space that will be used by snapshots
* after the promotion (for both origin and clone). For each,
* it is the amount of space that will be on all of their
* deadlists (that was not born before their new origin).
*/
if (dsl_dir_phys(hds->ds_dir)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
uint64_t space;
/*
* Note, typically this will not be a clone of a clone,
* so dd_origin_txg will be < TXG_INITIAL, so
* these snaplist_space() -> dsl_deadlist_space_range()
* calls will be fast because they do not have to
* iterate over all bps.
*/
snap = list_head(&ddpa->origin_snaps);
if (snap == NULL) {
err = SET_ERROR(ENOENT);
goto out;
}
err = snaplist_space(&ddpa->shared_snaps,
snap->ds->ds_dir->dd_origin_txg, &ddpa->cloneusedsnap);
if (err != 0)
goto out;
err = snaplist_space(&ddpa->clone_snaps,
snap->ds->ds_dir->dd_origin_txg, &space);
if (err != 0)
goto out;
ddpa->cloneusedsnap += space;
}
if (dsl_dir_phys(origin_ds->ds_dir)->dd_flags &
DD_FLAG_USED_BREAKDOWN) {
err = snaplist_space(&ddpa->origin_snaps,
dsl_dataset_phys(origin_ds)->ds_creation_txg,
&ddpa->originusedsnap);
if (err != 0)
goto out;
}
out:
promote_rele(ddpa, FTAG);
return (err);
}
void
dsl_dataset_promote_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_promote_arg_t *ddpa = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *hds;
struct promotenode *snap;
dsl_dataset_t *origin_ds;
dsl_dataset_t *origin_head;
dsl_dir_t *dd;
dsl_dir_t *odd = NULL;
uint64_t oldnext_obj;
int64_t delta;
VERIFY0(promote_hold(ddpa, dp, FTAG));
hds = ddpa->ddpa_clone;
ASSERT0(dsl_dataset_phys(hds)->ds_flags & DS_FLAG_NOPROMOTE);
snap = list_head(&ddpa->shared_snaps);
origin_ds = snap->ds;
dd = hds->ds_dir;
snap = list_head(&ddpa->origin_snaps);
origin_head = snap->ds;
/*
* We need to explicitly open odd, since origin_ds's dd will be
* changing.
*/
VERIFY0(dsl_dir_hold_obj(dp, origin_ds->ds_dir->dd_object,
NULL, FTAG, &odd));
dsl_dataset_promote_crypt_sync(hds->ds_dir, odd, tx);
/* change origin's next snap */
dmu_buf_will_dirty(origin_ds->ds_dbuf, tx);
oldnext_obj = dsl_dataset_phys(origin_ds)->ds_next_snap_obj;
snap = list_tail(&ddpa->clone_snaps);
ASSERT3U(dsl_dataset_phys(snap->ds)->ds_prev_snap_obj, ==,
origin_ds->ds_object);
dsl_dataset_phys(origin_ds)->ds_next_snap_obj = snap->ds->ds_object;
/* change the origin's next clone */
if (dsl_dataset_phys(origin_ds)->ds_next_clones_obj) {
dsl_dataset_remove_from_next_clones(origin_ds,
snap->ds->ds_object, tx);
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dataset_phys(origin_ds)->ds_next_clones_obj,
oldnext_obj, tx));
}
/* change origin */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
ASSERT3U(dsl_dir_phys(dd)->dd_origin_obj, ==, origin_ds->ds_object);
dsl_dir_phys(dd)->dd_origin_obj = dsl_dir_phys(odd)->dd_origin_obj;
dd->dd_origin_txg = origin_head->ds_dir->dd_origin_txg;
dmu_buf_will_dirty(odd->dd_dbuf, tx);
dsl_dir_phys(odd)->dd_origin_obj = origin_ds->ds_object;
origin_head->ds_dir->dd_origin_txg =
dsl_dataset_phys(origin_ds)->ds_creation_txg;
/* change dd_clone entries */
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, hds->ds_object, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
hds->ds_object, tx));
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(ddpa->origin_origin->ds_dir)->dd_clones,
origin_head->ds_object, tx));
if (dsl_dir_phys(dd)->dd_clones == 0) {
dsl_dir_phys(dd)->dd_clones =
zap_create(dp->dp_meta_objset, DMU_OT_DSL_CLONES,
DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, origin_head->ds_object, tx));
}
/* move snapshots to this dir */
for (snap = list_head(&ddpa->shared_snaps); snap;
snap = list_next(&ddpa->shared_snaps, snap)) {
dsl_dataset_t *ds = snap->ds;
/*
* Property callbacks are registered to a particular
* dsl_dir. Since ours is changing, evict the objset
* so that they will be unregistered from the old dsl_dir.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* move snap name entry */
VERIFY0(dsl_dataset_get_snapname(ds));
VERIFY0(dsl_dataset_snap_remove(origin_head,
ds->ds_snapname, tx, B_TRUE));
VERIFY0(zap_add(dp->dp_meta_objset,
dsl_dataset_phys(hds)->ds_snapnames_zapobj, ds->ds_snapname,
8, 1, &ds->ds_object, tx));
dsl_fs_ss_count_adjust(hds->ds_dir, 1,
DD_FIELD_SNAPSHOT_COUNT, tx);
/* change containing dsl_dir */
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ASSERT3U(dsl_dataset_phys(ds)->ds_dir_obj, ==, odd->dd_object);
dsl_dataset_phys(ds)->ds_dir_obj = dd->dd_object;
ASSERT3P(ds->ds_dir, ==, odd);
dsl_dir_rele(ds->ds_dir, ds);
VERIFY0(dsl_dir_hold_obj(dp, dd->dd_object,
NULL, ds, &ds->ds_dir));
/* move any clone references */
if (dsl_dataset_phys(ds)->ds_next_clones_obj &&
spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_dataset_t *cnds;
uint64_t o;
if (za.za_first_integer == oldnext_obj) {
/*
* We've already moved the
* origin's reference.
*/
continue;
}
VERIFY0(dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &cnds));
o = dsl_dir_phys(cnds->ds_dir)->
dd_head_dataset_obj;
VERIFY0(zap_remove_int(dp->dp_meta_objset,
dsl_dir_phys(odd)->dd_clones, o, tx));
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_clones, o, tx));
dsl_dataset_rele(cnds, FTAG);
}
zap_cursor_fini(&zc);
}
ASSERT(!dsl_prop_hascb(ds));
}
/*
* Change space accounting.
* Note, pa->*usedsnap and dd_used_breakdown[SNAP] will either
* both be valid, or both be 0 (resulting in delta == 0). This
* is true for each of {clone,origin} independently.
*/
delta = ddpa->cloneusedsnap -
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, >=, 0);
ASSERT3U(ddpa->used, >=, delta);
dsl_dir_diduse_space(dd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(dd, DD_USED_HEAD,
ddpa->used - delta, ddpa->comp, ddpa->uncomp, tx);
delta = ddpa->originusedsnap -
dsl_dir_phys(odd)->dd_used_breakdown[DD_USED_SNAP];
ASSERT3S(delta, <=, 0);
ASSERT3U(ddpa->used, >=, -delta);
dsl_dir_diduse_space(odd, DD_USED_SNAP, delta, 0, 0, tx);
dsl_dir_diduse_space(odd, DD_USED_HEAD,
-ddpa->used - delta, -ddpa->comp, -ddpa->uncomp, tx);
dsl_dataset_phys(origin_ds)->ds_unique_bytes = ddpa->unique;
/* log history record */
spa_history_log_internal_ds(hds, "promote", tx, "");
dsl_dir_rele(odd, FTAG);
promote_rele(ddpa, FTAG);
}
/*
* Make a list of dsl_dataset_t's for the snapshots between first_obj
* (exclusive) and last_obj (inclusive). The list will be in reverse
* order (last_obj will be the list_head()). If first_obj == 0, do all
* snapshots back to this dataset's origin.
*/
static int
snaplist_make(dsl_pool_t *dp,
uint64_t first_obj, uint64_t last_obj, list_t *l, void *tag)
{
uint64_t obj = last_obj;
list_create(l, sizeof (struct promotenode),
offsetof(struct promotenode, link));
while (obj != first_obj) {
dsl_dataset_t *ds;
struct promotenode *snap;
int err;
err = dsl_dataset_hold_obj(dp, obj, tag, &ds);
ASSERT(err != ENOENT);
if (err != 0)
return (err);
if (first_obj == 0)
first_obj = dsl_dir_phys(ds->ds_dir)->dd_origin_obj;
snap = kmem_alloc(sizeof (*snap), KM_SLEEP);
snap->ds = ds;
list_insert_tail(l, snap);
obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
}
return (0);
}
static int
snaplist_space(list_t *l, uint64_t mintxg, uint64_t *spacep)
{
struct promotenode *snap;
*spacep = 0;
for (snap = list_head(l); snap; snap = list_next(l, snap)) {
uint64_t used, comp, uncomp;
dsl_deadlist_space_range(&snap->ds->ds_deadlist,
mintxg, UINT64_MAX, &used, &comp, &uncomp);
*spacep += used;
}
return (0);
}
static void
snaplist_destroy(list_t *l, void *tag)
{
struct promotenode *snap;
if (l == NULL || !list_link_active(&l->list_head))
return;
while ((snap = list_tail(l)) != NULL) {
list_remove(l, snap);
dsl_dataset_rele(snap->ds, tag);
kmem_free(snap, sizeof (*snap));
}
list_destroy(l);
}
static int
promote_hold(dsl_dataset_promote_arg_t *ddpa, dsl_pool_t *dp, void *tag)
{
int error;
dsl_dir_t *dd;
struct promotenode *snap;
error = dsl_dataset_hold(dp, ddpa->ddpa_clonename, tag,
&ddpa->ddpa_clone);
if (error != 0)
return (error);
dd = ddpa->ddpa_clone->ds_dir;
if (ddpa->ddpa_clone->ds_is_snapshot ||
!dsl_dir_is_clone(dd)) {
dsl_dataset_rele(ddpa->ddpa_clone, tag);
return (SET_ERROR(EINVAL));
}
error = snaplist_make(dp, 0, dsl_dir_phys(dd)->dd_origin_obj,
&ddpa->shared_snaps, tag);
if (error != 0)
goto out;
error = snaplist_make(dp, 0, ddpa->ddpa_clone->ds_object,
&ddpa->clone_snaps, tag);
if (error != 0)
goto out;
snap = list_head(&ddpa->shared_snaps);
ASSERT3U(snap->ds->ds_object, ==, dsl_dir_phys(dd)->dd_origin_obj);
error = snaplist_make(dp, dsl_dir_phys(dd)->dd_origin_obj,
dsl_dir_phys(snap->ds->ds_dir)->dd_head_dataset_obj,
&ddpa->origin_snaps, tag);
if (error != 0)
goto out;
if (dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj != 0) {
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(snap->ds->ds_dir)->dd_origin_obj,
tag, &ddpa->origin_origin);
if (error != 0)
goto out;
}
out:
if (error != 0)
promote_rele(ddpa, tag);
return (error);
}
static void
promote_rele(dsl_dataset_promote_arg_t *ddpa, void *tag)
{
snaplist_destroy(&ddpa->shared_snaps, tag);
snaplist_destroy(&ddpa->clone_snaps, tag);
snaplist_destroy(&ddpa->origin_snaps, tag);
if (ddpa->origin_origin != NULL)
dsl_dataset_rele(ddpa->origin_origin, tag);
dsl_dataset_rele(ddpa->ddpa_clone, tag);
}
/*
* Promote a clone.
*
* If it fails due to a conflicting snapshot name, "conflsnap" will be filled
* in with the name. (It must be at least ZFS_MAX_DATASET_NAME_LEN bytes long.)
*/
int
dsl_dataset_promote(const char *name, char *conflsnap)
{
dsl_dataset_promote_arg_t ddpa = { 0 };
uint64_t numsnaps;
int error;
nvpair_t *snap_pair;
objset_t *os;
/*
* We will modify space proportional to the number of
* snapshots. Compute numsnaps.
*/
error = dmu_objset_hold(name, FTAG, &os);
if (error != 0)
return (error);
error = zap_count(dmu_objset_pool(os)->dp_meta_objset,
dsl_dataset_phys(dmu_objset_ds(os))->ds_snapnames_zapobj,
&numsnaps);
dmu_objset_rele(os, FTAG);
if (error != 0)
return (error);
ddpa.ddpa_clonename = name;
ddpa.err_ds = fnvlist_alloc();
ddpa.cr = CRED();
error = dsl_sync_task(name, dsl_dataset_promote_check,
dsl_dataset_promote_sync, &ddpa,
2 + numsnaps, ZFS_SPACE_CHECK_RESERVED);
/*
* Return the first conflicting snapshot found.
*/
snap_pair = nvlist_next_nvpair(ddpa.err_ds, NULL);
if (snap_pair != NULL && conflsnap != NULL)
(void) strcpy(conflsnap, nvpair_name(snap_pair));
fnvlist_free(ddpa.err_ds);
return (error);
}
int
dsl_dataset_clone_swap_check_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, boolean_t force, void *owner, dmu_tx_t *tx)
{
/*
* "slack" factor for received datasets with refquota set on them.
* See the bottom of this function for details on its use.
*/
uint64_t refquota_slack = (uint64_t)DMU_MAX_ACCESS *
spa_asize_inflation;
int64_t unused_refres_delta;
/* they should both be heads */
if (clone->ds_is_snapshot ||
origin_head->ds_is_snapshot)
return (SET_ERROR(EINVAL));
/* if we are not forcing, the branch point should be just before them */
if (!force && clone->ds_prev != origin_head->ds_prev)
return (SET_ERROR(EINVAL));
/* clone should be the clone (unless they are unrelated) */
if (clone->ds_prev != NULL &&
clone->ds_prev != clone->ds_dir->dd_pool->dp_origin_snap &&
origin_head->ds_dir != clone->ds_prev->ds_dir)
return (SET_ERROR(EINVAL));
/* the clone should be a child of the origin */
if (clone->ds_dir->dd_parent != origin_head->ds_dir)
return (SET_ERROR(EINVAL));
/* origin_head shouldn't be modified unless 'force' */
if (!force &&
dsl_dataset_modified_since_snap(origin_head, origin_head->ds_prev))
return (SET_ERROR(ETXTBSY));
/* origin_head should have no long holds (e.g. is not mounted) */
if (dsl_dataset_handoff_check(origin_head, owner, tx))
return (SET_ERROR(EBUSY));
/* check amount of any unconsumed refreservation */
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
if (unused_refres_delta > 0 &&
unused_refres_delta >
dsl_dir_space_available(origin_head->ds_dir, NULL, 0, TRUE))
return (SET_ERROR(ENOSPC));
/*
* The clone can't be too much over the head's refquota.
*
* To ensure that the entire refquota can be used, we allow one
* transaction to exceed the the refquota. Therefore, this check
* needs to also allow for the space referenced to be more than the
* refquota. The maximum amount of space that one transaction can use
* on disk is DMU_MAX_ACCESS * spa_asize_inflation. Allowing this
* overage ensures that we are able to receive a filesystem that
* exceeds the refquota on the source system.
*
* So that overage is the refquota_slack we use below.
*/
if (origin_head->ds_quota != 0 &&
dsl_dataset_phys(clone)->ds_referenced_bytes >
origin_head->ds_quota + refquota_slack)
return (SET_ERROR(EDQUOT));
return (0);
}
+static void
+dsl_dataset_swap_remap_deadlists(dsl_dataset_t *clone,
+ dsl_dataset_t *origin, dmu_tx_t *tx)
+{
+ uint64_t clone_remap_dl_obj, origin_remap_dl_obj;
+ dsl_pool_t *dp = dmu_tx_pool(tx);
+
+ ASSERT(dsl_pool_sync_context(dp));
+
+ clone_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(clone);
+ origin_remap_dl_obj = dsl_dataset_get_remap_deadlist_object(origin);
+
+ if (clone_remap_dl_obj != 0) {
+ dsl_deadlist_close(&clone->ds_remap_deadlist);
+ dsl_dataset_unset_remap_deadlist_object(clone, tx);
+ }
+ if (origin_remap_dl_obj != 0) {
+ dsl_deadlist_close(&origin->ds_remap_deadlist);
+ dsl_dataset_unset_remap_deadlist_object(origin, tx);
+ }
+
+ if (clone_remap_dl_obj != 0) {
+ dsl_dataset_set_remap_deadlist_object(origin,
+ clone_remap_dl_obj, tx);
+ dsl_deadlist_open(&origin->ds_remap_deadlist,
+ dp->dp_meta_objset, clone_remap_dl_obj);
+ }
+ if (origin_remap_dl_obj != 0) {
+ dsl_dataset_set_remap_deadlist_object(clone,
+ origin_remap_dl_obj, tx);
+ dsl_deadlist_open(&clone->ds_remap_deadlist,
+ dp->dp_meta_objset, origin_remap_dl_obj);
+ }
+}
+
void
dsl_dataset_clone_swap_sync_impl(dsl_dataset_t *clone,
dsl_dataset_t *origin_head, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
int64_t unused_refres_delta;
ASSERT(clone->ds_reserved == 0);
/*
* NOTE: On DEBUG kernels there could be a race between this and
* the check function if spa_asize_inflation is adjusted...
*/
ASSERT(origin_head->ds_quota == 0 ||
dsl_dataset_phys(clone)->ds_unique_bytes <= origin_head->ds_quota +
DMU_MAX_ACCESS * spa_asize_inflation);
ASSERT3P(clone->ds_prev, ==, origin_head->ds_prev);
/*
* Swap per-dataset feature flags.
*/
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (!(spa_feature_table[f].fi_flags &
ZFEATURE_FLAG_PER_DATASET)) {
ASSERT(!clone->ds_feature_inuse[f]);
ASSERT(!origin_head->ds_feature_inuse[f]);
continue;
}
boolean_t clone_inuse = clone->ds_feature_inuse[f];
boolean_t origin_head_inuse = origin_head->ds_feature_inuse[f];
if (clone_inuse) {
dsl_dataset_deactivate_feature(clone->ds_object, f, tx);
clone->ds_feature_inuse[f] = B_FALSE;
}
if (origin_head_inuse) {
dsl_dataset_deactivate_feature(origin_head->ds_object,
f, tx);
origin_head->ds_feature_inuse[f] = B_FALSE;
}
if (clone_inuse) {
dsl_dataset_activate_feature(origin_head->ds_object,
f, tx);
origin_head->ds_feature_inuse[f] = B_TRUE;
}
if (origin_head_inuse) {
dsl_dataset_activate_feature(clone->ds_object, f, tx);
clone->ds_feature_inuse[f] = B_TRUE;
}
}
dmu_buf_will_dirty(clone->ds_dbuf, tx);
dmu_buf_will_dirty(origin_head->ds_dbuf, tx);
if (clone->ds_objset != NULL) {
dmu_objset_evict(clone->ds_objset);
clone->ds_objset = NULL;
}
if (origin_head->ds_objset != NULL) {
dmu_objset_evict(origin_head->ds_objset);
origin_head->ds_objset = NULL;
}
unused_refres_delta =
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(origin_head)->ds_unique_bytes) -
(int64_t)MIN(origin_head->ds_reserved,
dsl_dataset_phys(clone)->ds_unique_bytes);
/*
* Reset origin's unique bytes, if it exists.
*/
if (clone->ds_prev) {
dsl_dataset_t *origin = clone->ds_prev;
uint64_t comp, uncomp;
dmu_buf_will_dirty(origin->ds_dbuf, tx);
dsl_deadlist_space_range(&clone->ds_deadlist,
dsl_dataset_phys(origin)->ds_prev_snap_txg, UINT64_MAX,
&dsl_dataset_phys(origin)->ds_unique_bytes, &comp, &uncomp);
}
/* swap blkptrs */
{
rrw_enter(&clone->ds_bp_rwlock, RW_WRITER, FTAG);
rrw_enter(&origin_head->ds_bp_rwlock, RW_WRITER, FTAG);
blkptr_t tmp;
tmp = dsl_dataset_phys(origin_head)->ds_bp;
dsl_dataset_phys(origin_head)->ds_bp =
dsl_dataset_phys(clone)->ds_bp;
dsl_dataset_phys(clone)->ds_bp = tmp;
rrw_exit(&origin_head->ds_bp_rwlock, FTAG);
rrw_exit(&clone->ds_bp_rwlock, FTAG);
}
/* set dd_*_bytes */
{
int64_t dused, dcomp, duncomp;
uint64_t cdl_used, cdl_comp, cdl_uncomp;
uint64_t odl_used, odl_comp, odl_uncomp;
ASSERT3U(dsl_dir_phys(clone->ds_dir)->
dd_used_breakdown[DD_USED_SNAP], ==, 0);
dsl_deadlist_space(&clone->ds_deadlist,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space(&origin_head->ds_deadlist,
&odl_used, &odl_comp, &odl_uncomp);
dused = dsl_dataset_phys(clone)->ds_referenced_bytes +
cdl_used -
(dsl_dataset_phys(origin_head)->ds_referenced_bytes +
odl_used);
dcomp = dsl_dataset_phys(clone)->ds_compressed_bytes +
cdl_comp -
(dsl_dataset_phys(origin_head)->ds_compressed_bytes +
odl_comp);
duncomp = dsl_dataset_phys(clone)->ds_uncompressed_bytes +
cdl_uncomp -
(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes +
odl_uncomp);
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_HEAD,
dused, dcomp, duncomp, tx);
dsl_dir_diduse_space(clone->ds_dir, DD_USED_HEAD,
-dused, -dcomp, -duncomp, tx);
/*
* The difference in the space used by snapshots is the
* difference in snapshot space due to the head's
* deadlist (since that's the only thing that's
* changing that affects the snapused).
*/
dsl_deadlist_space_range(&clone->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&cdl_used, &cdl_comp, &cdl_uncomp);
dsl_deadlist_space_range(&origin_head->ds_deadlist,
origin_head->ds_dir->dd_origin_txg, UINT64_MAX,
&odl_used, &odl_comp, &odl_uncomp);
dsl_dir_transfer_space(origin_head->ds_dir, cdl_used - odl_used,
DD_USED_HEAD, DD_USED_SNAP, tx);
}
/* swap ds_*_bytes */
SWITCH64(dsl_dataset_phys(origin_head)->ds_referenced_bytes,
dsl_dataset_phys(clone)->ds_referenced_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_compressed_bytes,
dsl_dataset_phys(clone)->ds_compressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_uncompressed_bytes,
dsl_dataset_phys(clone)->ds_uncompressed_bytes);
SWITCH64(dsl_dataset_phys(origin_head)->ds_unique_bytes,
dsl_dataset_phys(clone)->ds_unique_bytes);
/* apply any parent delta for change in unconsumed refreservation */
dsl_dir_diduse_space(origin_head->ds_dir, DD_USED_REFRSRV,
unused_refres_delta, 0, 0, tx);
/*
* Swap deadlists.
*/
dsl_deadlist_close(&clone->ds_deadlist);
dsl_deadlist_close(&origin_head->ds_deadlist);
SWITCH64(dsl_dataset_phys(origin_head)->ds_deadlist_obj,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&clone->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(clone)->ds_deadlist_obj);
dsl_deadlist_open(&origin_head->ds_deadlist, dp->dp_meta_objset,
dsl_dataset_phys(origin_head)->ds_deadlist_obj);
+ dsl_dataset_swap_remap_deadlists(clone, origin_head, tx);
dsl_scan_ds_clone_swapped(origin_head, clone, tx);
spa_history_log_internal_ds(clone, "clone swap", tx,
"parent=%s", origin_head->ds_dir->dd_myname);
}
/*
* Given a pool name and a dataset object number in that pool,
* return the name of that dataset.
*/
int
dsl_dsobj_to_dsname(char *pname, uint64_t obj, char *buf)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
int error;
error = dsl_pool_hold(pname, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, obj, FTAG, &ds);
if (error == 0) {
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
dsl_pool_rele(dp, FTAG);
return (error);
}
int
dsl_dataset_check_quota(dsl_dataset_t *ds, boolean_t check_quota,
uint64_t asize, uint64_t inflight, uint64_t *used, uint64_t *ref_rsrv)
{
int error = 0;
ASSERT3S(asize, >, 0);
/*
* *ref_rsrv is the portion of asize that will come from any
* unconsumed refreservation space.
*/
*ref_rsrv = 0;
mutex_enter(&ds->ds_lock);
/*
* Make a space adjustment for reserved bytes.
*/
if (ds->ds_reserved > dsl_dataset_phys(ds)->ds_unique_bytes) {
ASSERT3U(*used, >=,
ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*used -=
(ds->ds_reserved - dsl_dataset_phys(ds)->ds_unique_bytes);
*ref_rsrv =
asize - MIN(asize, parent_delta(ds, asize + inflight));
}
if (!check_quota || ds->ds_quota == 0) {
mutex_exit(&ds->ds_lock);
return (0);
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes (which
* may free up space for us).
*/
if (dsl_dataset_phys(ds)->ds_referenced_bytes + inflight >=
ds->ds_quota) {
if (inflight > 0 ||
dsl_dataset_phys(ds)->ds_referenced_bytes < ds->ds_quota)
error = SET_ERROR(ERESTART);
else
error = SET_ERROR(EDQUOT);
}
mutex_exit(&ds->ds_lock);
return (error);
}
typedef struct dsl_dataset_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dataset_set_qr_arg_t;
/* ARGSUSED */
static int
dsl_dataset_set_refquota_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFQUOTA)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
if (newval < dsl_dataset_phys(ds)->ds_referenced_bytes ||
newval < ds->ds_reserved) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dataset_set_refquota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFQUOTA), &newval));
if (ds->ds_quota != newval) {
dmu_buf_will_dirty(ds->ds_dbuf, tx);
ds->ds_quota = newval;
}
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refquota(const char *dsname, zprop_source_t source,
uint64_t refquota)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refquota;
return (dsl_sync_task(dsname, dsl_dataset_set_refquota_check,
dsl_dataset_set_refquota_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
}
static int
dsl_dataset_set_refreservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t newval, unique;
if (spa_version(dp->dp_spa) < SPA_VERSION_REFRESERVATION)
return (SET_ERROR(ENOTSUP));
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
if (ds->ds_is_snapshot) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_lock);
if (!DS_UNIQUE_IS_ACCURATE(ds))
dsl_dataset_recalc_head_uniq(ds);
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
mutex_exit(&ds->ds_lock);
if (MAX(unique, newval) > MAX(unique, ds->ds_reserved)) {
uint64_t delta = MAX(unique, newval) -
MAX(unique, ds->ds_reserved);
if (delta >
dsl_dir_space_available(ds->ds_dir, NULL, 0, B_TRUE) ||
(ds->ds_quota > 0 && newval > ds->ds_quota)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOSPC));
}
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
void
dsl_dataset_set_refreservation_sync_impl(dsl_dataset_t *ds,
zprop_source_t source, uint64_t value, dmu_tx_t *tx)
{
uint64_t newval;
uint64_t unique;
int64_t delta;
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_REFRESERVATION),
source, sizeof (value), 1, &value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_REFRESERVATION), &newval));
dmu_buf_will_dirty(ds->ds_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
mutex_enter(&ds->ds_lock);
ASSERT(DS_UNIQUE_IS_ACCURATE(ds));
unique = dsl_dataset_phys(ds)->ds_unique_bytes;
delta = MAX(0, (int64_t)(newval - unique)) -
MAX(0, (int64_t)(ds->ds_reserved - unique));
ds->ds_reserved = newval;
mutex_exit(&ds->ds_lock);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_REFRSRV, delta, 0, 0, tx);
mutex_exit(&ds->ds_dir->dd_lock);
}
static void
dsl_dataset_set_refreservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dataset_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds = NULL;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
dsl_dataset_set_refreservation_sync_impl(ds,
ddsqra->ddsqra_source, ddsqra->ddsqra_value, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dataset_set_refreservation(const char *dsname, zprop_source_t source,
uint64_t refreservation)
{
dsl_dataset_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = dsname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = refreservation;
return (dsl_sync_task(dsname, dsl_dataset_set_refreservation_check,
dsl_dataset_set_refreservation_sync, &ddsqra,
0, ZFS_SPACE_CHECK_NONE));
}
/*
* Return (in *usedp) the amount of space written in new that is not
* present in oldsnap. New may be a snapshot or the head. Old must be
* a snapshot before new, in new's filesystem (or its origin). If not then
* fail and return EINVAL.
*
* The written space is calculated by considering two components: First, we
* ignore any freed space, and calculate the written as new's used space
* minus old's used space. Next, we add in the amount of space that was freed
* between the two snapshots, thus reducing new's used space relative to old's.
* Specifically, this is the space that was born before old->ds_creation_txg,
* and freed before new (ie. on new's deadlist or a previous deadlist).
*
* space freed [---------------------]
* snapshots ---O-------O--------O-------O------
* oldsnap new
*/
int
dsl_dataset_space_written(dsl_dataset_t *oldsnap, dsl_dataset_t *new,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
uint64_t snapobj;
dsl_pool_t *dp = new->ds_dir->dd_pool;
ASSERT(dsl_pool_config_held(dp));
*usedp = 0;
*usedp += dsl_dataset_phys(new)->ds_referenced_bytes;
*usedp -= dsl_dataset_phys(oldsnap)->ds_referenced_bytes;
*compp = 0;
*compp += dsl_dataset_phys(new)->ds_compressed_bytes;
*compp -= dsl_dataset_phys(oldsnap)->ds_compressed_bytes;
*uncompp = 0;
*uncompp += dsl_dataset_phys(new)->ds_uncompressed_bytes;
*uncompp -= dsl_dataset_phys(oldsnap)->ds_uncompressed_bytes;
snapobj = new->ds_object;
while (snapobj != oldsnap->ds_object) {
dsl_dataset_t *snap;
uint64_t used, comp, uncomp;
if (snapobj == new->ds_object) {
snap = new;
} else {
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &snap);
if (err != 0)
break;
}
if (dsl_dataset_phys(snap)->ds_prev_snap_txg ==
dsl_dataset_phys(oldsnap)->ds_creation_txg) {
/*
* The blocks in the deadlist can not be born after
* ds_prev_snap_txg, so get the whole deadlist space,
* which is more efficient (especially for old-format
* deadlists). Unfortunately the deadlist code
* doesn't have enough information to make this
* optimization itself.
*/
dsl_deadlist_space(&snap->ds_deadlist,
&used, &comp, &uncomp);
} else {
dsl_deadlist_space_range(&snap->ds_deadlist,
0, dsl_dataset_phys(oldsnap)->ds_creation_txg,
&used, &comp, &uncomp);
}
*usedp += used;
*compp += comp;
*uncompp += uncomp;
/*
* If we get to the beginning of the chain of snapshots
* (ds_prev_snap_obj == 0) before oldsnap, then oldsnap
* was not a snapshot of/before new.
*/
snapobj = dsl_dataset_phys(snap)->ds_prev_snap_obj;
if (snap != new)
dsl_dataset_rele(snap, FTAG);
if (snapobj == 0) {
err = SET_ERROR(EINVAL);
break;
}
}
return (err);
}
/*
* Return (in *usedp) the amount of space that will be reclaimed if firstsnap,
* lastsnap, and all snapshots in between are deleted.
*
* blocks that would be freed [---------------------------]
* snapshots ---O-------O--------O-------O--------O
* firstsnap lastsnap
*
* This is the set of blocks that were born after the snap before firstsnap,
* (birth > firstsnap->prev_snap_txg) and died before the snap after the
* last snap (ie, is on lastsnap->ds_next->ds_deadlist or an earlier deadlist).
* We calculate this by iterating over the relevant deadlists (from the snap
* after lastsnap, backward to the snap after firstsnap), summing up the
* space on the deadlist that was born after the snap before firstsnap.
*/
int
dsl_dataset_space_wouldfree(dsl_dataset_t *firstsnap,
dsl_dataset_t *lastsnap,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
int err = 0;
uint64_t snapobj;
dsl_pool_t *dp = firstsnap->ds_dir->dd_pool;
ASSERT(firstsnap->ds_is_snapshot);
ASSERT(lastsnap->ds_is_snapshot);
/*
* Check that the snapshots are in the same dsl_dir, and firstsnap
* is before lastsnap.
*/
if (firstsnap->ds_dir != lastsnap->ds_dir ||
dsl_dataset_phys(firstsnap)->ds_creation_txg >
dsl_dataset_phys(lastsnap)->ds_creation_txg)
return (SET_ERROR(EINVAL));
*usedp = *compp = *uncompp = 0;
snapobj = dsl_dataset_phys(lastsnap)->ds_next_snap_obj;
while (snapobj != firstsnap->ds_object) {
dsl_dataset_t *ds;
uint64_t used, comp, uncomp;
err = dsl_dataset_hold_obj(dp, snapobj, FTAG, &ds);
if (err != 0)
break;
dsl_deadlist_space_range(&ds->ds_deadlist,
dsl_dataset_phys(firstsnap)->ds_prev_snap_txg, UINT64_MAX,
&used, &comp, &uncomp);
*usedp += used;
*compp += comp;
*uncompp += uncomp;
snapobj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
ASSERT3U(snapobj, !=, 0);
dsl_dataset_rele(ds, FTAG);
}
return (err);
}
/*
* Return TRUE if 'earlier' is an earlier snapshot in 'later's timeline.
* For example, they could both be snapshots of the same filesystem, and
* 'earlier' is before 'later'. Or 'earlier' could be the origin of
* 'later's filesystem. Or 'earlier' could be an older snapshot in the origin's
* filesystem. Or 'earlier' could be the origin's origin.
*
* If non-zero, earlier_txg is used instead of earlier's ds_creation_txg.
*/
boolean_t
dsl_dataset_is_before(dsl_dataset_t *later, dsl_dataset_t *earlier,
uint64_t earlier_txg)
{
dsl_pool_t *dp = later->ds_dir->dd_pool;
int error;
boolean_t ret;
ASSERT(dsl_pool_config_held(dp));
ASSERT(earlier->ds_is_snapshot || earlier_txg != 0);
if (earlier_txg == 0)
earlier_txg = dsl_dataset_phys(earlier)->ds_creation_txg;
if (later->ds_is_snapshot &&
earlier_txg >= dsl_dataset_phys(later)->ds_creation_txg)
return (B_FALSE);
if (later->ds_dir == earlier->ds_dir)
return (B_TRUE);
if (!dsl_dir_is_clone(later->ds_dir))
return (B_FALSE);
if (dsl_dir_phys(later->ds_dir)->dd_origin_obj == earlier->ds_object)
return (B_TRUE);
dsl_dataset_t *origin;
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(later->ds_dir)->dd_origin_obj, FTAG, &origin);
if (error != 0)
return (B_FALSE);
ret = dsl_dataset_is_before(origin, earlier, earlier_txg);
dsl_dataset_rele(origin, FTAG);
return (ret);
}
void
dsl_dataset_zapify(dsl_dataset_t *ds, dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, ds->ds_object, DMU_OT_DSL_DATASET, tx);
}
boolean_t
dsl_dataset_is_zapified(dsl_dataset_t *ds)
{
dmu_object_info_t doi;
dmu_object_info_from_db(ds->ds_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
boolean_t
dsl_dataset_has_resume_receive_state(dsl_dataset_t *ds)
{
return (dsl_dataset_is_zapified(ds) &&
zap_contains(ds->ds_dir->dd_pool->dp_meta_objset,
ds->ds_object, DS_FIELD_RESUME_TOGUID) == 0);
}
+uint64_t
+dsl_dataset_get_remap_deadlist_object(dsl_dataset_t *ds)
+{
+ uint64_t remap_deadlist_obj;
+ int err;
+
+ if (!dsl_dataset_is_zapified(ds))
+ return (0);
+
+ err = zap_lookup(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
+ DS_FIELD_REMAP_DEADLIST, sizeof (remap_deadlist_obj), 1,
+ &remap_deadlist_obj);
+
+ if (err != 0) {
+ VERIFY3S(err, ==, ENOENT);
+ return (0);
+ }
+
+ ASSERT(remap_deadlist_obj != 0);
+ return (remap_deadlist_obj);
+}
+
+boolean_t
+dsl_dataset_remap_deadlist_exists(dsl_dataset_t *ds)
+{
+ EQUIV(dsl_deadlist_is_open(&ds->ds_remap_deadlist),
+ dsl_dataset_get_remap_deadlist_object(ds) != 0);
+ return (dsl_deadlist_is_open(&ds->ds_remap_deadlist));
+}
+
+static void
+dsl_dataset_set_remap_deadlist_object(dsl_dataset_t *ds, uint64_t obj,
+ dmu_tx_t *tx)
+{
+ ASSERT(obj != 0);
+ dsl_dataset_zapify(ds, tx);
+ VERIFY0(zap_add(ds->ds_dir->dd_pool->dp_meta_objset, ds->ds_object,
+ DS_FIELD_REMAP_DEADLIST, sizeof (obj), 1, &obj, tx));
+}
+
+static void
+dsl_dataset_unset_remap_deadlist_object(dsl_dataset_t *ds, dmu_tx_t *tx)
+{
+ VERIFY0(zap_remove(ds->ds_dir->dd_pool->dp_meta_objset,
+ ds->ds_object, DS_FIELD_REMAP_DEADLIST, tx));
+}
+
+void
+dsl_dataset_destroy_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
+{
+ uint64_t remap_deadlist_object;
+ spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
+
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT(dsl_dataset_remap_deadlist_exists(ds));
+
+ remap_deadlist_object = ds->ds_remap_deadlist.dl_object;
+ dsl_deadlist_close(&ds->ds_remap_deadlist);
+ dsl_deadlist_free(spa_meta_objset(spa), remap_deadlist_object, tx);
+ dsl_dataset_unset_remap_deadlist_object(ds, tx);
+ spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+}
+
+void
+dsl_dataset_create_remap_deadlist(dsl_dataset_t *ds, dmu_tx_t *tx)
+{
+ uint64_t remap_deadlist_obj;
+ spa_t *spa = ds->ds_dir->dd_pool->dp_spa;
+
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT(MUTEX_HELD(&ds->ds_remap_deadlist_lock));
+ /*
+ * Currently we only create remap deadlists when there are indirect
+ * vdevs with referenced mappings.
+ */
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+ remap_deadlist_obj = dsl_deadlist_clone(
+ &ds->ds_deadlist, UINT64_MAX,
+ dsl_dataset_phys(ds)->ds_prev_snap_obj, tx);
+ dsl_dataset_set_remap_deadlist_object(ds,
+ remap_deadlist_obj, tx);
+ dsl_deadlist_open(&ds->ds_remap_deadlist, spa_meta_objset(spa),
+ remap_deadlist_obj);
+ spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+}
+
#if defined(_KERNEL) && defined(HAVE_SPL)
#if defined(_LP64)
module_param(zfs_max_recordsize, int, 0644);
MODULE_PARM_DESC(zfs_max_recordsize, "Max allowed record size");
#else
/* Limited to 1M on 32-bit platforms due to lack of virtual address space */
module_param(zfs_max_recordsize, int, 0444);
MODULE_PARM_DESC(zfs_max_recordsize, "Max allowed record size");
#endif
EXPORT_SYMBOL(dsl_dataset_hold);
EXPORT_SYMBOL(dsl_dataset_hold_flags);
EXPORT_SYMBOL(dsl_dataset_hold_obj);
EXPORT_SYMBOL(dsl_dataset_hold_obj_flags);
EXPORT_SYMBOL(dsl_dataset_own);
EXPORT_SYMBOL(dsl_dataset_own_obj);
EXPORT_SYMBOL(dsl_dataset_name);
EXPORT_SYMBOL(dsl_dataset_rele);
EXPORT_SYMBOL(dsl_dataset_rele_flags);
EXPORT_SYMBOL(dsl_dataset_disown);
EXPORT_SYMBOL(dsl_dataset_tryown);
EXPORT_SYMBOL(dsl_dataset_create_sync);
EXPORT_SYMBOL(dsl_dataset_create_sync_dd);
EXPORT_SYMBOL(dsl_dataset_snapshot_check);
EXPORT_SYMBOL(dsl_dataset_snapshot_sync);
EXPORT_SYMBOL(dsl_dataset_promote);
EXPORT_SYMBOL(dsl_dataset_user_hold);
EXPORT_SYMBOL(dsl_dataset_user_release);
EXPORT_SYMBOL(dsl_dataset_get_holds);
EXPORT_SYMBOL(dsl_dataset_get_blkptr);
EXPORT_SYMBOL(dsl_dataset_get_spa);
EXPORT_SYMBOL(dsl_dataset_modified_since_snap);
EXPORT_SYMBOL(dsl_dataset_space_written);
EXPORT_SYMBOL(dsl_dataset_space_wouldfree);
EXPORT_SYMBOL(dsl_dataset_sync);
EXPORT_SYMBOL(dsl_dataset_block_born);
EXPORT_SYMBOL(dsl_dataset_block_kill);
EXPORT_SYMBOL(dsl_dataset_dirty);
EXPORT_SYMBOL(dsl_dataset_stats);
EXPORT_SYMBOL(dsl_dataset_fast_stat);
EXPORT_SYMBOL(dsl_dataset_space);
EXPORT_SYMBOL(dsl_dataset_fsid_guid);
EXPORT_SYMBOL(dsl_dsobj_to_dsname);
EXPORT_SYMBOL(dsl_dataset_check_quota);
EXPORT_SYMBOL(dsl_dataset_clone_swap_check_impl);
EXPORT_SYMBOL(dsl_dataset_clone_swap_sync_impl);
#endif
diff --git a/module/zfs/dsl_deadlist.c b/module/zfs/dsl_deadlist.c
index 0be0d7420940..10846a3249db 100644
--- a/module/zfs/dsl_deadlist.c
+++ b/module/zfs/dsl_deadlist.c
@@ -1,555 +1,568 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
*/
#include <sys/dsl_dataset.h>
#include <sys/dmu.h>
#include <sys/refcount.h>
#include <sys/zap.h>
#include <sys/zfs_context.h>
#include <sys/dsl_pool.h>
/*
* Deadlist concurrency:
*
* Deadlists can only be modified from the syncing thread.
*
* Except for dsl_deadlist_insert(), it can only be modified with the
* dp_config_rwlock held with RW_WRITER.
*
* The accessors (dsl_deadlist_space() and dsl_deadlist_space_range()) can
* be called concurrently, from open context, with the dl_config_rwlock held
* with RW_READER.
*
* Therefore, we only need to provide locking between dsl_deadlist_insert() and
* the accessors, protecting:
* dl_phys->dl_used,comp,uncomp
* and protecting the dl_tree from being loaded.
* The locking is provided by dl_lock. Note that locking on the bpobj_t
* provides its own locking, and dl_oldfmt is immutable.
*/
static int
dsl_deadlist_compare(const void *arg1, const void *arg2)
{
const dsl_deadlist_entry_t *dle1 = (const dsl_deadlist_entry_t *)arg1;
const dsl_deadlist_entry_t *dle2 = (const dsl_deadlist_entry_t *)arg2;
return (AVL_CMP(dle1->dle_mintxg, dle2->dle_mintxg));
}
static void
dsl_deadlist_load_tree(dsl_deadlist_t *dl)
{
zap_cursor_t zc;
zap_attribute_t za;
ASSERT(MUTEX_HELD(&dl->dl_lock));
ASSERT(!dl->dl_oldfmt);
if (dl->dl_havetree)
return;
avl_create(&dl->dl_tree, dsl_deadlist_compare,
sizeof (dsl_deadlist_entry_t),
offsetof(dsl_deadlist_entry_t, dle_node));
for (zap_cursor_init(&zc, dl->dl_os, dl->dl_object);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
dsl_deadlist_entry_t *dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dle->dle_mintxg = zfs_strtonum(za.za_name, NULL);
VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os,
za.za_first_integer));
avl_add(&dl->dl_tree, dle);
}
zap_cursor_fini(&zc);
dl->dl_havetree = B_TRUE;
}
void
dsl_deadlist_open(dsl_deadlist_t *dl, objset_t *os, uint64_t object)
{
dmu_object_info_t doi;
+ ASSERT(!dsl_deadlist_is_open(dl));
+
mutex_init(&dl->dl_lock, NULL, MUTEX_DEFAULT, NULL);
dl->dl_os = os;
dl->dl_object = object;
VERIFY3U(0, ==, dmu_bonus_hold(os, object, dl, &dl->dl_dbuf));
dmu_object_info_from_db(dl->dl_dbuf, &doi);
if (doi.doi_type == DMU_OT_BPOBJ) {
dmu_buf_rele(dl->dl_dbuf, dl);
dl->dl_dbuf = NULL;
dl->dl_oldfmt = B_TRUE;
VERIFY3U(0, ==, bpobj_open(&dl->dl_bpobj, os, object));
return;
}
dl->dl_oldfmt = B_FALSE;
dl->dl_phys = dl->dl_dbuf->db_data;
dl->dl_havetree = B_FALSE;
}
+boolean_t
+dsl_deadlist_is_open(dsl_deadlist_t *dl)
+{
+ return (dl->dl_os != NULL);
+}
+
void
dsl_deadlist_close(dsl_deadlist_t *dl)
{
void *cookie = NULL;
dsl_deadlist_entry_t *dle;
- dl->dl_os = NULL;
+ ASSERT(dsl_deadlist_is_open(dl));
mutex_destroy(&dl->dl_lock);
if (dl->dl_oldfmt) {
dl->dl_oldfmt = B_FALSE;
bpobj_close(&dl->dl_bpobj);
+ dl->dl_os = NULL;
+ dl->dl_object = 0;
return;
}
if (dl->dl_havetree) {
while ((dle = avl_destroy_nodes(&dl->dl_tree, &cookie))
!= NULL) {
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
}
avl_destroy(&dl->dl_tree);
}
dmu_buf_rele(dl->dl_dbuf, dl);
dl->dl_dbuf = NULL;
dl->dl_phys = NULL;
+ dl->dl_os = NULL;
+ dl->dl_object = 0;
}
uint64_t
dsl_deadlist_alloc(objset_t *os, dmu_tx_t *tx)
{
if (spa_version(dmu_objset_spa(os)) < SPA_VERSION_DEADLISTS)
return (bpobj_alloc(os, SPA_OLD_MAXBLOCKSIZE, tx));
return (zap_create(os, DMU_OT_DEADLIST, DMU_OT_DEADLIST_HDR,
sizeof (dsl_deadlist_phys_t), tx));
}
void
dsl_deadlist_free(objset_t *os, uint64_t dlobj, dmu_tx_t *tx)
{
dmu_object_info_t doi;
zap_cursor_t zc;
zap_attribute_t za;
VERIFY3U(0, ==, dmu_object_info(os, dlobj, &doi));
if (doi.doi_type == DMU_OT_BPOBJ) {
bpobj_free(os, dlobj, tx);
return;
}
for (zap_cursor_init(&zc, os, dlobj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t obj = za.za_first_integer;
if (obj == dmu_objset_pool(os)->dp_empty_bpobj)
bpobj_decr_empty(os, tx);
else
bpobj_free(os, obj, tx);
}
zap_cursor_fini(&zc);
VERIFY3U(0, ==, dmu_object_free(os, dlobj, tx));
}
static void
dle_enqueue(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
const blkptr_t *bp, dmu_tx_t *tx)
{
ASSERT(MUTEX_HELD(&dl->dl_lock));
if (dle->dle_bpobj.bpo_object ==
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
uint64_t obj = bpobj_alloc(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
bpobj_close(&dle->dle_bpobj);
bpobj_decr_empty(dl->dl_os, tx);
VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
dle->dle_mintxg, obj, tx));
}
bpobj_enqueue(&dle->dle_bpobj, bp, tx);
}
static void
dle_enqueue_subobj(dsl_deadlist_t *dl, dsl_deadlist_entry_t *dle,
uint64_t obj, dmu_tx_t *tx)
{
ASSERT(MUTEX_HELD(&dl->dl_lock));
if (dle->dle_bpobj.bpo_object !=
dmu_objset_pool(dl->dl_os)->dp_empty_bpobj) {
bpobj_enqueue_subobj(&dle->dle_bpobj, obj, tx);
} else {
bpobj_close(&dle->dle_bpobj);
bpobj_decr_empty(dl->dl_os, tx);
VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
VERIFY3U(0, ==, zap_update_int_key(dl->dl_os, dl->dl_object,
dle->dle_mintxg, obj, tx));
}
}
void
dsl_deadlist_insert(dsl_deadlist_t *dl, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
if (dl->dl_oldfmt) {
bpobj_enqueue(&dl->dl_bpobj, bp, tx);
return;
}
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used +=
bp_get_dsize_sync(dmu_objset_spa(dl->dl_os), bp);
dl->dl_phys->dl_comp += BP_GET_PSIZE(bp);
dl->dl_phys->dl_uncomp += BP_GET_UCSIZE(bp);
dle_tofind.dle_mintxg = bp->blk_birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
else
dle = AVL_PREV(&dl->dl_tree, dle);
if (dle == NULL) {
zfs_panic_recover("blkptr at %p has invalid BLK_BIRTH %llu",
bp, (longlong_t)bp->blk_birth);
dle = avl_first(&dl->dl_tree);
}
ASSERT3P(dle, !=, NULL);
dle_enqueue(dl, dle, bp, tx);
mutex_exit(&dl->dl_lock);
}
/*
* Insert new key in deadlist, which must be > all current entries.
* mintxg is not inclusive.
*/
void
dsl_deadlist_add_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
uint64_t obj;
dsl_deadlist_entry_t *dle;
if (dl->dl_oldfmt)
return;
dle = kmem_alloc(sizeof (*dle), KM_SLEEP);
dle->dle_mintxg = mintxg;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY3U(0, ==, bpobj_open(&dle->dle_bpobj, dl->dl_os, obj));
avl_add(&dl->dl_tree, dle);
VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, dl->dl_object,
mintxg, obj, tx));
mutex_exit(&dl->dl_lock);
}
/*
* Remove this key, merging its entries into the previous key.
*/
void
dsl_deadlist_remove_key(dsl_deadlist_t *dl, uint64_t mintxg, dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle, *dle_prev;
if (dl->dl_oldfmt)
return;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, NULL);
dle_prev = AVL_PREV(&dl->dl_tree, dle);
dle_enqueue_subobj(dl, dle_prev, dle->dle_bpobj.bpo_object, tx);
avl_remove(&dl->dl_tree, dle);
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object, mintxg, tx));
mutex_exit(&dl->dl_lock);
}
/*
* Walk ds's snapshots to regenerate generate ZAP & AVL.
*/
static void
dsl_deadlist_regenerate(objset_t *os, uint64_t dlobj,
uint64_t mrs_obj, dmu_tx_t *tx)
{
- dsl_deadlist_t dl;
+ dsl_deadlist_t dl = { 0 };
dsl_pool_t *dp = dmu_objset_pool(os);
dsl_deadlist_open(&dl, os, dlobj);
if (dl.dl_oldfmt) {
dsl_deadlist_close(&dl);
return;
}
while (mrs_obj != 0) {
dsl_dataset_t *ds;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, mrs_obj, FTAG, &ds));
dsl_deadlist_add_key(&dl,
dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
mrs_obj = dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_rele(ds, FTAG);
}
dsl_deadlist_close(&dl);
}
uint64_t
dsl_deadlist_clone(dsl_deadlist_t *dl, uint64_t maxtxg,
uint64_t mrs_obj, dmu_tx_t *tx)
{
dsl_deadlist_entry_t *dle;
uint64_t newobj;
newobj = dsl_deadlist_alloc(dl->dl_os, tx);
if (dl->dl_oldfmt) {
dsl_deadlist_regenerate(dl->dl_os, newobj, mrs_obj, tx);
return (newobj);
}
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
for (dle = avl_first(&dl->dl_tree); dle;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
uint64_t obj;
if (dle->dle_mintxg >= maxtxg)
break;
obj = bpobj_alloc_empty(dl->dl_os, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY3U(0, ==, zap_add_int_key(dl->dl_os, newobj,
dle->dle_mintxg, obj, tx));
}
mutex_exit(&dl->dl_lock);
return (newobj);
}
void
dsl_deadlist_space(dsl_deadlist_t *dl,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
+ ASSERT(dsl_deadlist_is_open(dl));
if (dl->dl_oldfmt) {
VERIFY3U(0, ==, bpobj_space(&dl->dl_bpobj,
usedp, compp, uncompp));
return;
}
mutex_enter(&dl->dl_lock);
*usedp = dl->dl_phys->dl_used;
*compp = dl->dl_phys->dl_comp;
*uncompp = dl->dl_phys->dl_uncomp;
mutex_exit(&dl->dl_lock);
}
/*
* return space used in the range (mintxg, maxtxg].
* Includes maxtxg, does not include mintxg.
* mintxg and maxtxg must both be keys in the deadlist (unless maxtxg is
* larger than any bp in the deadlist (eg. UINT64_MAX)).
*/
void
dsl_deadlist_space_range(dsl_deadlist_t *dl, uint64_t mintxg, uint64_t maxtxg,
uint64_t *usedp, uint64_t *compp, uint64_t *uncompp)
{
dsl_deadlist_entry_t *dle;
dsl_deadlist_entry_t dle_tofind;
avl_index_t where;
if (dl->dl_oldfmt) {
VERIFY3U(0, ==, bpobj_space_range(&dl->dl_bpobj,
mintxg, maxtxg, usedp, compp, uncompp));
return;
}
*usedp = *compp = *uncompp = 0;
mutex_enter(&dl->dl_lock);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
/*
* If we don't find this mintxg, there shouldn't be anything
* after it either.
*/
ASSERT(dle != NULL ||
avl_nearest(&dl->dl_tree, where, AVL_AFTER) == NULL);
for (; dle && dle->dle_mintxg < maxtxg;
dle = AVL_NEXT(&dl->dl_tree, dle)) {
uint64_t used, comp, uncomp;
VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
&used, &comp, &uncomp));
*usedp += used;
*compp += comp;
*uncompp += uncomp;
}
mutex_exit(&dl->dl_lock);
}
static void
dsl_deadlist_insert_bpobj(dsl_deadlist_t *dl, uint64_t obj, uint64_t birth,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
uint64_t used, comp, uncomp;
bpobj_t bpo;
ASSERT(MUTEX_HELD(&dl->dl_lock));
VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
VERIFY3U(0, ==, bpobj_space(&bpo, &used, &comp, &uncomp));
bpobj_close(&bpo);
dsl_deadlist_load_tree(dl);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dl->dl_phys->dl_used += used;
dl->dl_phys->dl_comp += comp;
dl->dl_phys->dl_uncomp += uncomp;
dle_tofind.dle_mintxg = birth;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_BEFORE);
dle_enqueue_subobj(dl, dle, obj, tx);
}
static int
dsl_deadlist_insert_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_deadlist_t *dl = arg;
dsl_deadlist_insert(dl, bp, tx);
return (0);
}
/*
* Merge the deadlist pointed to by 'obj' into dl. obj will be left as
* an empty deadlist.
*/
void
dsl_deadlist_merge(dsl_deadlist_t *dl, uint64_t obj, dmu_tx_t *tx)
{
zap_cursor_t zc;
zap_attribute_t za;
dmu_buf_t *bonus;
dsl_deadlist_phys_t *dlp;
dmu_object_info_t doi;
VERIFY3U(0, ==, dmu_object_info(dl->dl_os, obj, &doi));
if (doi.doi_type == DMU_OT_BPOBJ) {
bpobj_t bpo;
VERIFY3U(0, ==, bpobj_open(&bpo, dl->dl_os, obj));
VERIFY3U(0, ==, bpobj_iterate(&bpo,
dsl_deadlist_insert_cb, dl, tx));
bpobj_close(&bpo);
return;
}
mutex_enter(&dl->dl_lock);
for (zap_cursor_init(&zc, dl->dl_os, obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t mintxg = zfs_strtonum(za.za_name, NULL);
dsl_deadlist_insert_bpobj(dl, za.za_first_integer, mintxg, tx);
VERIFY3U(0, ==, zap_remove_int(dl->dl_os, obj, mintxg, tx));
}
zap_cursor_fini(&zc);
VERIFY3U(0, ==, dmu_bonus_hold(dl->dl_os, obj, FTAG, &bonus));
dlp = bonus->db_data;
dmu_buf_will_dirty(bonus, tx);
bzero(dlp, sizeof (*dlp));
dmu_buf_rele(bonus, FTAG);
mutex_exit(&dl->dl_lock);
}
/*
* Remove entries on dl that are >= mintxg, and put them on the bpobj.
*/
void
dsl_deadlist_move_bpobj(dsl_deadlist_t *dl, bpobj_t *bpo, uint64_t mintxg,
dmu_tx_t *tx)
{
dsl_deadlist_entry_t dle_tofind;
dsl_deadlist_entry_t *dle;
avl_index_t where;
ASSERT(!dl->dl_oldfmt);
mutex_enter(&dl->dl_lock);
dmu_buf_will_dirty(dl->dl_dbuf, tx);
dsl_deadlist_load_tree(dl);
dle_tofind.dle_mintxg = mintxg;
dle = avl_find(&dl->dl_tree, &dle_tofind, &where);
if (dle == NULL)
dle = avl_nearest(&dl->dl_tree, where, AVL_AFTER);
while (dle) {
uint64_t used, comp, uncomp;
dsl_deadlist_entry_t *dle_next;
bpobj_enqueue_subobj(bpo, dle->dle_bpobj.bpo_object, tx);
VERIFY3U(0, ==, bpobj_space(&dle->dle_bpobj,
&used, &comp, &uncomp));
ASSERT3U(dl->dl_phys->dl_used, >=, used);
ASSERT3U(dl->dl_phys->dl_comp, >=, comp);
ASSERT3U(dl->dl_phys->dl_uncomp, >=, uncomp);
dl->dl_phys->dl_used -= used;
dl->dl_phys->dl_comp -= comp;
dl->dl_phys->dl_uncomp -= uncomp;
VERIFY3U(0, ==, zap_remove_int(dl->dl_os, dl->dl_object,
dle->dle_mintxg, tx));
dle_next = AVL_NEXT(&dl->dl_tree, dle);
avl_remove(&dl->dl_tree, dle);
bpobj_close(&dle->dle_bpobj);
kmem_free(dle, sizeof (*dle));
dle = dle_next;
}
mutex_exit(&dl->dl_lock);
}
diff --git a/module/zfs/dsl_destroy.c b/module/zfs/dsl_destroy.c
index d0fcacaedc6b..e11508c90d0f 100644
--- a/module/zfs/dsl_destroy.c
+++ b/module/zfs/dsl_destroy.c
@@ -1,1058 +1,1106 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2013 by Joyent, Inc. All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dsl_userhold.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_destroy.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dmu_traverse.h>
#include <sys/dsl_scan.h>
#include <sys/dmu_objset.h>
#include <sys/zap.h>
#include <sys/zfeature.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
#include <sys/zvol.h>
#include <sys/zcp.h>
int
dsl_destroy_snapshot_check_impl(dsl_dataset_t *ds, boolean_t defer)
{
if (!ds->ds_is_snapshot)
return (SET_ERROR(EINVAL));
if (dsl_dataset_long_held(ds))
return (SET_ERROR(EBUSY));
/*
* Only allow deferred destroy on pools that support it.
* NOTE: deferred destroy is only supported on snapshots.
*/
if (defer) {
if (spa_version(ds->ds_dir->dd_pool->dp_spa) <
SPA_VERSION_USERREFS)
return (SET_ERROR(ENOTSUP));
return (0);
}
/*
* If this snapshot has an elevated user reference count,
* we can't destroy it yet.
*/
if (ds->ds_userrefs > 0)
return (SET_ERROR(EBUSY));
/*
* Can't delete a branch point.
*/
if (dsl_dataset_phys(ds)->ds_num_children > 1)
return (SET_ERROR(EEXIST));
return (0);
}
int
dsl_destroy_snapshot_check(void *arg, dmu_tx_t *tx)
{
dsl_destroy_snapshot_arg_t *ddsa = arg;
const char *dsname = ddsa->ddsa_name;
boolean_t defer = ddsa->ddsa_defer;
dsl_pool_t *dp = dmu_tx_pool(tx);
int error = 0;
dsl_dataset_t *ds;
error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
/*
* If the snapshot does not exist, silently ignore it, and
* dsl_destroy_snapshot_sync() will be a no-op
* (it's "already destroyed").
*/
if (error == ENOENT)
return (0);
if (error == 0) {
error = dsl_destroy_snapshot_check_impl(ds, defer);
dsl_dataset_rele(ds, FTAG);
}
return (error);
}
struct process_old_arg {
dsl_dataset_t *ds;
dsl_dataset_t *ds_prev;
boolean_t after_branch_point;
zio_t *pio;
uint64_t used, comp, uncomp;
};
static int
process_old_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
struct process_old_arg *poa = arg;
dsl_pool_t *dp = poa->ds->ds_dir->dd_pool;
ASSERT(!BP_IS_HOLE(bp));
if (bp->blk_birth <= dsl_dataset_phys(poa->ds)->ds_prev_snap_txg) {
dsl_deadlist_insert(&poa->ds->ds_deadlist, bp, tx);
if (poa->ds_prev && !poa->after_branch_point &&
bp->blk_birth >
dsl_dataset_phys(poa->ds_prev)->ds_prev_snap_txg) {
dsl_dataset_phys(poa->ds_prev)->ds_unique_bytes +=
bp_get_dsize_sync(dp->dp_spa, bp);
}
} else {
poa->used += bp_get_dsize_sync(dp->dp_spa, bp);
poa->comp += BP_GET_PSIZE(bp);
poa->uncomp += BP_GET_UCSIZE(bp);
dsl_free_sync(poa->pio, dp, tx->tx_txg, bp);
}
return (0);
}
static void
process_old_deadlist(dsl_dataset_t *ds, dsl_dataset_t *ds_prev,
dsl_dataset_t *ds_next, boolean_t after_branch_point, dmu_tx_t *tx)
{
struct process_old_arg poa = { 0 };
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
uint64_t deadlist_obj;
ASSERT(ds->ds_deadlist.dl_oldfmt);
ASSERT(ds_next->ds_deadlist.dl_oldfmt);
poa.ds = ds;
poa.ds_prev = ds_prev;
poa.after_branch_point = after_branch_point;
poa.pio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
VERIFY0(bpobj_iterate(&ds_next->ds_deadlist.dl_bpobj,
process_old_cb, &poa, tx));
VERIFY0(zio_wait(poa.pio));
ASSERT3U(poa.used, ==, dsl_dataset_phys(ds)->ds_unique_bytes);
/* change snapused */
dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
-poa.used, -poa.comp, -poa.uncomp, tx);
/* swap next's deadlist to our deadlist */
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_close(&ds_next->ds_deadlist);
deadlist_obj = dsl_dataset_phys(ds)->ds_deadlist_obj;
dsl_dataset_phys(ds)->ds_deadlist_obj =
dsl_dataset_phys(ds_next)->ds_deadlist_obj;
dsl_dataset_phys(ds_next)->ds_deadlist_obj = deadlist_obj;
dsl_deadlist_open(&ds->ds_deadlist, mos,
dsl_dataset_phys(ds)->ds_deadlist_obj);
dsl_deadlist_open(&ds_next->ds_deadlist, mos,
dsl_dataset_phys(ds_next)->ds_deadlist_obj);
}
static void
dsl_dataset_remove_clones_key(dsl_dataset_t *ds, uint64_t mintxg, dmu_tx_t *tx)
{
objset_t *mos = ds->ds_dir->dd_pool->dp_meta_objset;
zap_cursor_t *zc;
zap_attribute_t *za;
/*
* If it is the old version, dd_clones doesn't exist so we can't
* find the clones, but dsl_deadlist_remove_key() is a no-op so it
* doesn't matter.
*/
if (dsl_dir_phys(ds->ds_dir)->dd_clones == 0)
return;
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
for (zap_cursor_init(zc, mos, dsl_dir_phys(ds->ds_dir)->dd_clones);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
dsl_dataset_t *clone;
VERIFY0(dsl_dataset_hold_obj(ds->ds_dir->dd_pool,
za->za_first_integer, FTAG, &clone));
if (clone->ds_dir->dd_origin_txg > mintxg) {
dsl_deadlist_remove_key(&clone->ds_deadlist,
mintxg, tx);
+ if (dsl_dataset_remap_deadlist_exists(clone)) {
+ dsl_deadlist_remove_key(
+ &clone->ds_remap_deadlist, mintxg, tx);
+ }
dsl_dataset_remove_clones_key(clone, mintxg, tx);
}
dsl_dataset_rele(clone, FTAG);
}
zap_cursor_fini(zc);
kmem_free(za, sizeof (zap_attribute_t));
kmem_free(zc, sizeof (zap_cursor_t));
}
+static void
+dsl_destroy_snapshot_handle_remaps(dsl_dataset_t *ds, dsl_dataset_t *ds_next,
+ dmu_tx_t *tx)
+{
+ dsl_pool_t *dp = ds->ds_dir->dd_pool;
+
+ /* Move blocks to be obsoleted to pool's obsolete list. */
+ if (dsl_dataset_remap_deadlist_exists(ds_next)) {
+ if (!bpobj_is_open(&dp->dp_obsolete_bpobj))
+ dsl_pool_create_obsolete_bpobj(dp, tx);
+
+ dsl_deadlist_move_bpobj(&ds_next->ds_remap_deadlist,
+ &dp->dp_obsolete_bpobj,
+ dsl_dataset_phys(ds)->ds_prev_snap_txg, tx);
+ }
+
+ /* Merge our deadlist into next's and free it. */
+ if (dsl_dataset_remap_deadlist_exists(ds)) {
+ uint64_t remap_deadlist_object =
+ dsl_dataset_get_remap_deadlist_object(ds);
+ ASSERT(remap_deadlist_object != 0);
+
+ mutex_enter(&ds_next->ds_remap_deadlist_lock);
+ if (!dsl_dataset_remap_deadlist_exists(ds_next))
+ dsl_dataset_create_remap_deadlist(ds_next, tx);
+ mutex_exit(&ds_next->ds_remap_deadlist_lock);
+
+ dsl_deadlist_merge(&ds_next->ds_remap_deadlist,
+ remap_deadlist_object, tx);
+ dsl_dataset_destroy_remap_deadlist(ds, tx);
+ }
+}
+
void
dsl_destroy_snapshot_sync_impl(dsl_dataset_t *ds, boolean_t defer, dmu_tx_t *tx)
{
int after_branch_point = FALSE;
dsl_pool_t *dp = ds->ds_dir->dd_pool;
objset_t *mos = dp->dp_meta_objset;
dsl_dataset_t *ds_prev = NULL;
uint64_t obj;
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
ASSERT(refcount_is_zero(&ds->ds_longholds));
if (defer &&
(ds->ds_userrefs > 0 ||
dsl_dataset_phys(ds)->ds_num_children > 1)) {
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_DEFER_DESTROY;
spa_history_log_internal_ds(ds, "defer_destroy", tx, "");
return;
}
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
/* We need to log before removing it from the namespace. */
spa_history_log_internal_ds(ds, "destroy", tx, "");
dsl_scan_ds_destroyed(ds, tx);
obj = ds->ds_object;
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f]) {
dsl_dataset_deactivate_feature(obj, f, tx);
ds->ds_feature_inuse[f] = B_FALSE;
}
}
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
ASSERT3P(ds->ds_prev, ==, NULL);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &ds_prev));
after_branch_point =
(dsl_dataset_phys(ds_prev)->ds_next_snap_obj != obj);
dmu_buf_will_dirty(ds_prev->ds_dbuf, tx);
if (after_branch_point &&
dsl_dataset_phys(ds_prev)->ds_next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds_prev, obj, tx);
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
VERIFY0(zap_add_int(mos,
dsl_dataset_phys(ds_prev)->
ds_next_clones_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
tx));
}
}
if (!after_branch_point) {
dsl_dataset_phys(ds_prev)->ds_next_snap_obj =
dsl_dataset_phys(ds)->ds_next_snap_obj;
}
}
dsl_dataset_t *ds_next;
uint64_t old_unique;
uint64_t used = 0, comp = 0, uncomp = 0;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_next_snap_obj, FTAG, &ds_next));
ASSERT3U(dsl_dataset_phys(ds_next)->ds_prev_snap_obj, ==, obj);
old_unique = dsl_dataset_phys(ds_next)->ds_unique_bytes;
dmu_buf_will_dirty(ds_next->ds_dbuf, tx);
dsl_dataset_phys(ds_next)->ds_prev_snap_obj =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
dsl_dataset_phys(ds_next)->ds_prev_snap_txg =
dsl_dataset_phys(ds)->ds_prev_snap_txg;
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_txg, ==,
ds_prev ? dsl_dataset_phys(ds_prev)->ds_creation_txg : 0);
if (ds_next->ds_deadlist.dl_oldfmt) {
process_old_deadlist(ds, ds_prev, ds_next,
after_branch_point, tx);
} else {
/* Adjust prev's unique space. */
if (ds_prev && !after_branch_point) {
dsl_deadlist_space_range(&ds_next->ds_deadlist,
dsl_dataset_phys(ds_prev)->ds_prev_snap_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg,
&used, &comp, &uncomp);
dsl_dataset_phys(ds_prev)->ds_unique_bytes += used;
}
/* Adjust snapused. */
dsl_deadlist_space_range(&ds_next->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg, UINT64_MAX,
&used, &comp, &uncomp);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_SNAP,
-used, -comp, -uncomp, tx);
/* Move blocks to be freed to pool's free list. */
dsl_deadlist_move_bpobj(&ds_next->ds_deadlist,
&dp->dp_free_bpobj, dsl_dataset_phys(ds)->ds_prev_snap_txg,
tx);
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir,
DD_USED_HEAD, used, comp, uncomp, tx);
/* Merge our deadlist into next's and free it. */
dsl_deadlist_merge(&ds_next->ds_deadlist,
dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
}
+
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
+ dsl_destroy_snapshot_handle_remaps(ds, ds_next, tx);
+
/* Collapse range in clone heads */
dsl_dataset_remove_clones_key(ds,
dsl_dataset_phys(ds)->ds_creation_txg, tx);
if (ds_next->ds_is_snapshot) {
dsl_dataset_t *ds_nextnext;
/*
* Update next's unique to include blocks which
* were previously shared by only this snapshot
* and it. Those blocks will be born after the
* prev snap and before this snap, and will have
* died after the next snap and before the one
* after that (ie. be on the snap after next's
* deadlist).
*/
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds_next)->ds_next_snap_obj,
FTAG, &ds_nextnext));
dsl_deadlist_space_range(&ds_nextnext->ds_deadlist,
dsl_dataset_phys(ds)->ds_prev_snap_txg,
dsl_dataset_phys(ds)->ds_creation_txg,
&used, &comp, &uncomp);
dsl_dataset_phys(ds_next)->ds_unique_bytes += used;
dsl_dataset_rele(ds_nextnext, FTAG);
ASSERT3P(ds_next->ds_prev, ==, NULL);
/* Collapse range in this head. */
dsl_dataset_t *hds;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &hds));
dsl_deadlist_remove_key(&hds->ds_deadlist,
dsl_dataset_phys(ds)->ds_creation_txg, tx);
+ if (dsl_dataset_remap_deadlist_exists(hds)) {
+ dsl_deadlist_remove_key(&hds->ds_remap_deadlist,
+ dsl_dataset_phys(ds)->ds_creation_txg, tx);
+ }
dsl_dataset_rele(hds, FTAG);
} else {
ASSERT3P(ds_next->ds_prev, ==, ds);
dsl_dataset_rele(ds_next->ds_prev, ds_next);
ds_next->ds_prev = NULL;
if (ds_prev) {
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds_next, &ds_next->ds_prev));
}
dsl_dataset_recalc_head_uniq(ds_next);
/*
* Reduce the amount of our unconsumed refreservation
* being charged to our parent by the amount of
* new unique data we have gained.
*/
if (old_unique < ds_next->ds_reserved) {
int64_t mrsdelta;
uint64_t new_unique =
dsl_dataset_phys(ds_next)->ds_unique_bytes;
ASSERT(old_unique <= new_unique);
mrsdelta = MIN(new_unique - old_unique,
ds_next->ds_reserved - old_unique);
dsl_dir_diduse_space(ds->ds_dir,
DD_USED_REFRSRV, -mrsdelta, 0, 0, tx);
}
}
dsl_dataset_rele(ds_next, FTAG);
/*
* This must be done after the dsl_traverse(), because it will
* re-open the objset.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* remove from snapshot namespace */
dsl_dataset_t *ds_head;
ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj, FTAG, &ds_head));
VERIFY0(dsl_dataset_get_snapname(ds));
#ifdef ZFS_DEBUG
{
uint64_t val;
int err;
err = dsl_dataset_snap_lookup(ds_head,
ds->ds_snapname, &val);
ASSERT0(err);
ASSERT3U(val, ==, obj);
}
#endif
VERIFY0(dsl_dataset_snap_remove(ds_head, ds->ds_snapname, tx, B_TRUE));
dsl_dataset_rele(ds_head, FTAG);
if (ds_prev != NULL)
dsl_dataset_rele(ds_prev, FTAG);
spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
ASSERTV(uint64_t count);
ASSERT0(zap_count(mos,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count) &&
count == 0);
VERIFY0(dmu_object_free(mos,
dsl_dataset_phys(ds)->ds_next_clones_obj, tx));
}
if (dsl_dataset_phys(ds)->ds_props_obj != 0)
VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_props_obj,
tx));
if (dsl_dataset_phys(ds)->ds_userrefs_obj != 0)
VERIFY0(zap_destroy(mos, dsl_dataset_phys(ds)->ds_userrefs_obj,
tx));
dsl_dir_rele(ds->ds_dir, ds);
ds->ds_dir = NULL;
dmu_object_free_zapified(mos, obj, tx);
}
void
dsl_destroy_snapshot_sync(void *arg, dmu_tx_t *tx)
{
dsl_destroy_snapshot_arg_t *ddsa = arg;
const char *dsname = ddsa->ddsa_name;
boolean_t defer = ddsa->ddsa_defer;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error = dsl_dataset_hold(dp, dsname, FTAG, &ds);
if (error == ENOENT)
return;
ASSERT0(error);
dsl_destroy_snapshot_sync_impl(ds, defer, tx);
zvol_remove_minors(dp->dp_spa, dsname, B_TRUE);
dsl_dataset_rele(ds, FTAG);
}
/*
* The semantics of this function are described in the comment above
* lzc_destroy_snaps(). To summarize:
*
* The snapshots must all be in the same pool.
*
* Snapshots that don't exist will be silently ignored (considered to be
* "already deleted").
*
* On success, all snaps will be destroyed and this will return 0.
* On failure, no snaps will be destroyed, the errlist will be filled in,
* and this will return an errno.
*/
int
dsl_destroy_snapshots_nvl(nvlist_t *snaps, boolean_t defer,
nvlist_t *errlist)
{
if (nvlist_next_nvpair(snaps, NULL) == NULL)
return (0);
/*
* lzc_destroy_snaps() is documented to take an nvlist whose
* values "don't matter". We need to convert that nvlist to
* one that we know can be converted to LUA. We also don't
* care about any duplicate entries because the nvlist will
* be converted to a LUA table which should take care of this.
*/
nvlist_t *snaps_normalized;
VERIFY0(nvlist_alloc(&snaps_normalized, 0, KM_SLEEP));
for (nvpair_t *pair = nvlist_next_nvpair(snaps, NULL);
pair != NULL; pair = nvlist_next_nvpair(snaps, pair)) {
fnvlist_add_boolean_value(snaps_normalized,
nvpair_name(pair), B_TRUE);
}
nvlist_t *arg;
VERIFY0(nvlist_alloc(&arg, 0, KM_SLEEP));
fnvlist_add_nvlist(arg, "snaps", snaps_normalized);
fnvlist_free(snaps_normalized);
fnvlist_add_boolean_value(arg, "defer", defer);
nvlist_t *wrapper;
VERIFY0(nvlist_alloc(&wrapper, 0, KM_SLEEP));
fnvlist_add_nvlist(wrapper, ZCP_ARG_ARGLIST, arg);
fnvlist_free(arg);
const char *program =
"arg = ...\n"
"snaps = arg['snaps']\n"
"defer = arg['defer']\n"
"errors = { }\n"
"has_errors = false\n"
"for snap, v in pairs(snaps) do\n"
" errno = zfs.check.destroy{snap, defer=defer}\n"
" zfs.debug('snap: ' .. snap .. ' errno: ' .. errno)\n"
" if errno == ENOENT then\n"
" snaps[snap] = nil\n"
" elseif errno ~= 0 then\n"
" errors[snap] = errno\n"
" has_errors = true\n"
" end\n"
"end\n"
"if has_errors then\n"
" return errors\n"
"end\n"
"for snap, v in pairs(snaps) do\n"
" errno = zfs.sync.destroy{snap, defer=defer}\n"
" assert(errno == 0)\n"
"end\n"
"return { }\n";
nvlist_t *result = fnvlist_alloc();
int error = zcp_eval(nvpair_name(nvlist_next_nvpair(snaps, NULL)),
program,
B_TRUE,
0,
zfs_lua_max_memlimit,
nvlist_next_nvpair(wrapper, NULL), result);
if (error != 0) {
char *errorstr = NULL;
(void) nvlist_lookup_string(result, ZCP_RET_ERROR, &errorstr);
if (errorstr != NULL) {
zfs_dbgmsg(errorstr);
}
return (error);
}
fnvlist_free(wrapper);
/*
* lzc_destroy_snaps() is documented to fill the errlist with
* int32 values, so we need to covert the int64 values that are
* returned from LUA.
*/
int rv = 0;
nvlist_t *errlist_raw = fnvlist_lookup_nvlist(result, ZCP_RET_RETURN);
for (nvpair_t *pair = nvlist_next_nvpair(errlist_raw, NULL);
pair != NULL; pair = nvlist_next_nvpair(errlist_raw, pair)) {
int32_t val = (int32_t)fnvpair_value_int64(pair);
if (rv == 0)
rv = val;
fnvlist_add_int32(errlist, nvpair_name(pair), val);
}
fnvlist_free(result);
return (rv);
}
int
dsl_destroy_snapshot(const char *name, boolean_t defer)
{
int error;
nvlist_t *nvl = fnvlist_alloc();
nvlist_t *errlist = fnvlist_alloc();
fnvlist_add_boolean(nvl, name);
error = dsl_destroy_snapshots_nvl(nvl, defer, errlist);
fnvlist_free(errlist);
fnvlist_free(nvl);
return (error);
}
struct killarg {
dsl_dataset_t *ds;
dmu_tx_t *tx;
};
/* ARGSUSED */
static int
kill_blkptr(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
struct killarg *ka = arg;
dmu_tx_t *tx = ka->tx;
if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return (0);
if (zb->zb_level == ZB_ZIL_LEVEL) {
ASSERT(zilog != NULL);
/*
* It's a block in the intent log. It has no
* accounting, so just free it.
*/
dsl_free(ka->tx->tx_pool, ka->tx->tx_txg, bp);
} else {
ASSERT(zilog == NULL);
ASSERT3U(bp->blk_birth, >,
dsl_dataset_phys(ka->ds)->ds_prev_snap_txg);
(void) dsl_dataset_block_kill(ka->ds, bp, tx, B_FALSE);
}
return (0);
}
static void
old_synchronous_dataset_destroy(dsl_dataset_t *ds, dmu_tx_t *tx)
{
struct killarg ka;
/*
* Free everything that we point to (that's born after
* the previous snapshot, if we are a clone)
*
* NB: this should be very quick, because we already
* freed all the objects in open context.
*/
ka.ds = ds;
ka.tx = tx;
VERIFY0(traverse_dataset(ds,
dsl_dataset_phys(ds)->ds_prev_snap_txg, TRAVERSE_POST |
TRAVERSE_NO_DECRYPT, kill_blkptr, &ka));
ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
dsl_dataset_phys(ds)->ds_unique_bytes == 0);
}
int
dsl_destroy_head_check_impl(dsl_dataset_t *ds, int expected_holds)
{
int error;
uint64_t count;
objset_t *mos;
ASSERT(!ds->ds_is_snapshot);
if (ds->ds_is_snapshot)
return (SET_ERROR(EINVAL));
if (refcount_count(&ds->ds_longholds) != expected_holds)
return (SET_ERROR(EBUSY));
mos = ds->ds_dir->dd_pool->dp_meta_objset;
/*
* Can't delete a head dataset if there are snapshots of it.
* (Except if the only snapshots are from the branch we cloned
* from.)
*/
if (ds->ds_prev != NULL &&
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj == ds->ds_object)
return (SET_ERROR(EBUSY));
/*
* Can't delete if there are children of this fs.
*/
error = zap_count(mos,
dsl_dir_phys(ds->ds_dir)->dd_child_dir_zapobj, &count);
if (error != 0)
return (error);
if (count != 0)
return (SET_ERROR(EEXIST));
if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev) &&
dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
ds->ds_prev->ds_userrefs == 0) {
/* We need to remove the origin snapshot as well. */
if (!refcount_is_zero(&ds->ds_prev->ds_longholds))
return (SET_ERROR(EBUSY));
}
return (0);
}
int
dsl_destroy_head_check(void *arg, dmu_tx_t *tx)
{
dsl_destroy_head_arg_t *ddha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
error = dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds);
if (error != 0)
return (error);
error = dsl_destroy_head_check_impl(ds, 0);
dsl_dataset_rele(ds, FTAG);
return (error);
}
static void
dsl_dir_destroy_sync(uint64_t ddobj, dmu_tx_t *tx)
{
dsl_dir_t *dd;
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
dd_used_t t;
ASSERT(RRW_WRITE_HELD(&dmu_tx_pool(tx)->dp_config_rwlock));
VERIFY0(dsl_dir_hold_obj(dp, ddobj, NULL, FTAG, &dd));
ASSERT0(dsl_dir_phys(dd)->dd_head_dataset_obj);
/*
* Decrement the filesystem count for all parent filesystems.
*
* When we receive an incremental stream into a filesystem that already
* exists, a temporary clone is created. We never count this temporary
* clone, whose name begins with a '%'.
*/
if (dd->dd_myname[0] != '%' && dd->dd_parent != NULL)
dsl_fs_ss_count_adjust(dd->dd_parent, -1,
DD_FIELD_FILESYSTEM_COUNT, tx);
/*
* Remove our reservation. The impl() routine avoids setting the
* actual property, which would require the (already destroyed) ds.
*/
dsl_dir_set_reservation_sync_impl(dd, 0, tx);
ASSERT0(dsl_dir_phys(dd)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dd)->dd_reserved);
for (t = 0; t < DD_USED_NUM; t++)
ASSERT0(dsl_dir_phys(dd)->dd_used_breakdown[t]);
if (dd->dd_crypto_obj != 0) {
dsl_crypto_key_destroy_sync(dd->dd_crypto_obj, tx);
(void) spa_keystore_unload_wkey_impl(dp->dp_spa, dd->dd_object);
}
VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_child_dir_zapobj, tx));
VERIFY0(zap_destroy(mos, dsl_dir_phys(dd)->dd_props_zapobj, tx));
VERIFY0(dsl_deleg_destroy(mos, dsl_dir_phys(dd)->dd_deleg_zapobj, tx));
VERIFY0(zap_remove(mos,
dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
dd->dd_myname, tx));
dsl_dir_rele(dd, FTAG);
dmu_object_free_zapified(mos, ddobj, tx);
}
void
dsl_destroy_head_sync_impl(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = dmu_tx_pool(tx);
objset_t *mos = dp->dp_meta_objset;
uint64_t obj, ddobj, prevobj = 0;
boolean_t rmorigin;
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
ASSERT(ds->ds_prev == NULL ||
dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj != ds->ds_object);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT3U(dsl_dataset_phys(ds)->ds_bp.blk_birth, <=, tx->tx_txg);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
ASSERT(RRW_WRITE_HELD(&dp->dp_config_rwlock));
/* We need to log before removing it from the namespace. */
spa_history_log_internal_ds(ds, "destroy", tx, "");
rmorigin = (dsl_dir_is_clone(ds->ds_dir) &&
DS_IS_DEFER_DESTROY(ds->ds_prev) &&
dsl_dataset_phys(ds->ds_prev)->ds_num_children == 2 &&
ds->ds_prev->ds_userrefs == 0);
/* Remove our reservation. */
if (ds->ds_reserved != 0) {
dsl_dataset_set_refreservation_sync_impl(ds,
(ZPROP_SRC_NONE | ZPROP_SRC_LOCAL | ZPROP_SRC_RECEIVED),
0, tx);
ASSERT0(ds->ds_reserved);
}
obj = ds->ds_object;
for (spa_feature_t f = 0; f < SPA_FEATURES; f++) {
if (ds->ds_feature_inuse[f]) {
dsl_dataset_deactivate_feature(obj, f, tx);
ds->ds_feature_inuse[f] = B_FALSE;
}
}
dsl_scan_ds_destroyed(ds, tx);
if (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
/* This is a clone */
ASSERT(ds->ds_prev != NULL);
ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_next_snap_obj, !=,
obj);
ASSERT0(dsl_dataset_phys(ds)->ds_next_snap_obj);
dmu_buf_will_dirty(ds->ds_prev->ds_dbuf, tx);
if (dsl_dataset_phys(ds->ds_prev)->ds_next_clones_obj != 0) {
dsl_dataset_remove_from_next_clones(ds->ds_prev,
obj, tx);
}
ASSERT3U(dsl_dataset_phys(ds->ds_prev)->ds_num_children, >, 1);
dsl_dataset_phys(ds->ds_prev)->ds_num_children--;
}
/*
* Destroy the deadlist. Unless it's a clone, the
- * deadlist should be empty. (If it's a clone, it's
- * safe to ignore the deadlist contents.)
+ * deadlist should be empty since the dataset has no snapshots.
+ * (If it's a clone, it's safe to ignore the deadlist contents
+ * since they are still referenced by the origin snapshot.)
*/
dsl_deadlist_close(&ds->ds_deadlist);
dsl_deadlist_free(mos, dsl_dataset_phys(ds)->ds_deadlist_obj, tx);
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_deadlist_obj = 0;
+ if (dsl_dataset_remap_deadlist_exists(ds))
+ dsl_dataset_destroy_remap_deadlist(ds, tx);
+
objset_t *os;
VERIFY0(dmu_objset_from_ds(ds, &os));
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
old_synchronous_dataset_destroy(ds, tx);
} else {
/*
* Move the bptree into the pool's list of trees to
* clean up and update space accounting information.
*/
uint64_t used, comp, uncomp;
zil_destroy_sync(dmu_objset_zil(os), tx);
if (!spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY)) {
dsl_scan_t *scn = dp->dp_scan;
spa_feature_incr(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY,
tx);
dp->dp_bptree_obj = bptree_alloc(mos, tx);
VERIFY0(zap_add(mos,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
&dp->dp_bptree_obj, tx));
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = B_TRUE;
}
used = dsl_dir_phys(ds->ds_dir)->dd_used_bytes;
comp = dsl_dir_phys(ds->ds_dir)->dd_compressed_bytes;
uncomp = dsl_dir_phys(ds->ds_dir)->dd_uncompressed_bytes;
ASSERT(!DS_UNIQUE_IS_ACCURATE(ds) ||
dsl_dataset_phys(ds)->ds_unique_bytes == used);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
bptree_add(mos, dp->dp_bptree_obj,
&dsl_dataset_phys(ds)->ds_bp,
dsl_dataset_phys(ds)->ds_prev_snap_txg,
used, comp, uncomp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
dsl_dir_diduse_space(ds->ds_dir, DD_USED_HEAD,
-used, -comp, -uncomp, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
used, comp, uncomp, tx);
}
if (ds->ds_prev != NULL) {
if (spa_version(dp->dp_spa) >= SPA_VERSION_DIR_CLONES) {
VERIFY0(zap_remove_int(mos,
dsl_dir_phys(ds->ds_prev->ds_dir)->dd_clones,
ds->ds_object, tx));
}
prevobj = ds->ds_prev->ds_object;
dsl_dataset_rele(ds->ds_prev, ds);
ds->ds_prev = NULL;
}
/*
* This must be done after the dsl_traverse(), because it will
* re-open the objset.
*/
if (ds->ds_objset) {
dmu_objset_evict(ds->ds_objset);
ds->ds_objset = NULL;
}
/* Erase the link in the dir */
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
dsl_dir_phys(ds->ds_dir)->dd_head_dataset_obj = 0;
ddobj = ds->ds_dir->dd_object;
ASSERT(dsl_dataset_phys(ds)->ds_snapnames_zapobj != 0);
VERIFY0(zap_destroy(mos,
dsl_dataset_phys(ds)->ds_snapnames_zapobj, tx));
if (ds->ds_bookmarks != 0) {
VERIFY0(zap_destroy(mos, ds->ds_bookmarks, tx));
spa_feature_decr(dp->dp_spa, SPA_FEATURE_BOOKMARKS, tx);
}
spa_prop_clear_bootfs(dp->dp_spa, ds->ds_object, tx);
ASSERT0(dsl_dataset_phys(ds)->ds_next_clones_obj);
ASSERT0(dsl_dataset_phys(ds)->ds_props_obj);
ASSERT0(dsl_dataset_phys(ds)->ds_userrefs_obj);
dsl_dir_rele(ds->ds_dir, ds);
ds->ds_dir = NULL;
dmu_object_free_zapified(mos, obj, tx);
dsl_dir_destroy_sync(ddobj, tx);
if (rmorigin) {
dsl_dataset_t *prev;
VERIFY0(dsl_dataset_hold_obj(dp, prevobj, FTAG, &prev));
dsl_destroy_snapshot_sync_impl(prev, B_FALSE, tx);
dsl_dataset_rele(prev, FTAG);
}
}
void
dsl_destroy_head_sync(void *arg, dmu_tx_t *tx)
{
dsl_destroy_head_arg_t *ddha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
dsl_destroy_head_sync_impl(ds, tx);
zvol_remove_minors(dp->dp_spa, ddha->ddha_name, B_TRUE);
dsl_dataset_rele(ds, FTAG);
}
static void
dsl_destroy_head_begin_sync(void *arg, dmu_tx_t *tx)
{
dsl_destroy_head_arg_t *ddha = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold(dp, ddha->ddha_name, FTAG, &ds));
/* Mark it as inconsistent on-disk, in case we crash */
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_flags |= DS_FLAG_INCONSISTENT;
spa_history_log_internal_ds(ds, "destroy begin", tx, "");
dsl_dataset_rele(ds, FTAG);
}
int
dsl_destroy_head(const char *name)
{
dsl_destroy_head_arg_t ddha;
int error;
spa_t *spa;
boolean_t isenabled;
#ifdef _KERNEL
zfs_destroy_unmount_origin(name);
#endif
error = spa_open(name, &spa, FTAG);
if (error != 0)
return (error);
isenabled = spa_feature_is_enabled(spa, SPA_FEATURE_ASYNC_DESTROY);
spa_close(spa, FTAG);
ddha.ddha_name = name;
if (!isenabled) {
objset_t *os;
error = dsl_sync_task(name, dsl_destroy_head_check,
dsl_destroy_head_begin_sync, &ddha,
0, ZFS_SPACE_CHECK_NONE);
if (error != 0)
return (error);
/*
* Head deletion is processed in one txg on old pools;
* remove the objects from open context so that the txg sync
* is not too long.
*/
error = dmu_objset_own(name, DMU_OST_ANY, B_FALSE, B_FALSE,
FTAG, &os);
if (error == 0) {
uint64_t prev_snap_txg =
dsl_dataset_phys(dmu_objset_ds(os))->
ds_prev_snap_txg;
for (uint64_t obj = 0; error == 0;
error = dmu_object_next(os, &obj, FALSE,
prev_snap_txg))
(void) dmu_free_long_object(os, obj);
/* sync out all frees */
txg_wait_synced(dmu_objset_pool(os), 0);
dmu_objset_disown(os, B_FALSE, FTAG);
}
}
return (dsl_sync_task(name, dsl_destroy_head_check,
dsl_destroy_head_sync, &ddha, 0, ZFS_SPACE_CHECK_NONE));
}
/*
* Note, this function is used as the callback for dmu_objset_find(). We
* always return 0 so that we will continue to find and process
* inconsistent datasets, even if we encounter an error trying to
* process one of them.
*/
/* ARGSUSED */
int
dsl_destroy_inconsistent(const char *dsname, void *arg)
{
objset_t *os;
if (dmu_objset_hold(dsname, FTAG, &os) == 0) {
boolean_t need_destroy = DS_IS_INCONSISTENT(dmu_objset_ds(os));
/*
* If the dataset is inconsistent because a resumable receive
* has failed, then do not destroy it.
*/
if (dsl_dataset_has_resume_receive_state(dmu_objset_ds(os)))
need_destroy = B_FALSE;
dmu_objset_rele(os, FTAG);
if (need_destroy)
(void) dsl_destroy_head(dsname);
}
return (0);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(dsl_destroy_head);
EXPORT_SYMBOL(dsl_destroy_head_sync_impl);
EXPORT_SYMBOL(dsl_dataset_user_hold_check_one);
EXPORT_SYMBOL(dsl_destroy_snapshot_sync_impl);
EXPORT_SYMBOL(dsl_destroy_inconsistent);
EXPORT_SYMBOL(dsl_dataset_user_release_tmp);
EXPORT_SYMBOL(dsl_destroy_head_check_impl);
#endif
diff --git a/module/zfs/dsl_dir.c b/module/zfs/dsl_dir.c
index bf130eb990ae..a866c3074d3c 100644
--- a/module/zfs/dsl_dir.c
+++ b/module/zfs/dsl_dir.c
@@ -1,2121 +1,2171 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
* Copyright (c) 2013 Martin Matuska. All rights reserved.
* Copyright (c) 2014 Joyent, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
*/
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/metaslab.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/arc.h>
#include <sys/sunddi.h>
#include <sys/zfeature.h>
#include <sys/policy.h>
#include <sys/zfs_znode.h>
#include <sys/zvol.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
/*
* Filesystem and Snapshot Limits
* ------------------------------
*
* These limits are used to restrict the number of filesystems and/or snapshots
* that can be created at a given level in the tree or below. A typical
* use-case is with a delegated dataset where the administrator wants to ensure
* that a user within the zone is not creating too many additional filesystems
* or snapshots, even though they're not exceeding their space quota.
*
* The filesystem and snapshot counts are stored as extensible properties. This
* capability is controlled by a feature flag and must be enabled to be used.
* Once enabled, the feature is not active until the first limit is set. At
* that point, future operations to create/destroy filesystems or snapshots
* will validate and update the counts.
*
* Because the count properties will not exist before the feature is active,
* the counts are updated when a limit is first set on an uninitialized
* dsl_dir node in the tree (The filesystem/snapshot count on a node includes
* all of the nested filesystems/snapshots. Thus, a new leaf node has a
* filesystem count of 0 and a snapshot count of 0. Non-existent filesystem and
* snapshot count properties on a node indicate uninitialized counts on that
* node.) When first setting a limit on an uninitialized node, the code starts
* at the filesystem with the new limit and descends into all sub-filesystems
* to add the count properties.
*
* In practice this is lightweight since a limit is typically set when the
* filesystem is created and thus has no children. Once valid, changing the
* limit value won't require a re-traversal since the counts are already valid.
* When recursively fixing the counts, if a node with a limit is encountered
* during the descent, the counts are known to be valid and there is no need to
* descend into that filesystem's children. The counts on filesystems above the
* one with the new limit will still be uninitialized, unless a limit is
* eventually set on one of those filesystems. The counts are always recursively
* updated when a limit is set on a dataset, unless there is already a limit.
* When a new limit value is set on a filesystem with an existing limit, it is
* possible for the new limit to be less than the current count at that level
* since a user who can change the limit is also allowed to exceed the limit.
*
* Once the feature is active, then whenever a filesystem or snapshot is
* created, the code recurses up the tree, validating the new count against the
* limit at each initialized level. In practice, most levels will not have a
* limit set. If there is a limit at any initialized level up the tree, the
* check must pass or the creation will fail. Likewise, when a filesystem or
* snapshot is destroyed, the counts are recursively adjusted all the way up
* the initizized nodes in the tree. Renaming a filesystem into different point
* in the tree will first validate, then update the counts on each branch up to
* the common ancestor. A receive will also validate the counts and then update
* them.
*
* An exception to the above behavior is that the limit is not enforced if the
* user has permission to modify the limit. This is primarily so that
* recursive snapshots in the global zone always work. We want to prevent a
* denial-of-service in which a lower level delegated dataset could max out its
* limit and thus block recursive snapshots from being taken in the global zone.
* Because of this, it is possible for the snapshot count to be over the limit
* and snapshots taken in the global zone could cause a lower level dataset to
* hit or exceed its limit. The administrator taking the global zone recursive
* snapshot should be aware of this side-effect and behave accordingly.
* For consistency, the filesystem limit is also not enforced if the user can
* modify the limit.
*
* The filesystem and snapshot limits are validated by dsl_fs_ss_limit_check()
* and updated by dsl_fs_ss_count_adjust(). A new limit value is setup in
* dsl_dir_activate_fs_ss_limit() and the counts are adjusted, if necessary, by
* dsl_dir_init_fs_ss_count().
*
* There is a special case when we receive a filesystem that already exists. In
* this case a temporary clone name of %X is created (see dmu_recv_begin). We
* never update the filesystem counts for temporary clones.
*
* Likewise, we do not update the snapshot counts for temporary snapshots,
* such as those created by zfs diff.
*/
extern inline dsl_dir_phys_t *dsl_dir_phys(dsl_dir_t *dd);
static uint64_t dsl_dir_space_towrite(dsl_dir_t *dd);
+typedef struct ddulrt_arg {
+ dsl_dir_t *ddulrta_dd;
+ uint64_t ddlrta_txg;
+} ddulrt_arg_t;
+
static void
dsl_dir_evict_async(void *dbu)
{
dsl_dir_t *dd = dbu;
int t;
ASSERTV(dsl_pool_t *dp = dd->dd_pool);
dd->dd_dbuf = NULL;
for (t = 0; t < TXG_SIZE; t++) {
ASSERT(!txg_list_member(&dp->dp_dirty_dirs, dd, t));
ASSERT(dd->dd_tempreserved[t] == 0);
ASSERT(dd->dd_space_towrite[t] == 0);
}
if (dd->dd_parent)
dsl_dir_async_rele(dd->dd_parent, dd);
spa_async_close(dd->dd_pool->dp_spa, dd);
dsl_prop_fini(dd);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
}
int
dsl_dir_hold_obj(dsl_pool_t *dp, uint64_t ddobj,
const char *tail, void *tag, dsl_dir_t **ddp)
{
dmu_buf_t *dbuf;
dsl_dir_t *dd;
dmu_object_info_t doi;
int err;
ASSERT(dsl_pool_config_held(dp));
err = dmu_bonus_hold(dp->dp_meta_objset, ddobj, tag, &dbuf);
if (err != 0)
return (err);
dd = dmu_buf_get_user(dbuf);
dmu_object_info_from_db(dbuf, &doi);
ASSERT3U(doi.doi_bonus_type, ==, DMU_OT_DSL_DIR);
ASSERT3U(doi.doi_bonus_size, >=, sizeof (dsl_dir_phys_t));
if (dd == NULL) {
dsl_dir_t *winner;
dd = kmem_zalloc(sizeof (dsl_dir_t), KM_SLEEP);
dd->dd_object = ddobj;
dd->dd_dbuf = dbuf;
dd->dd_pool = dp;
if (dsl_dir_is_zapified(dd) &&
zap_contains(dp->dp_meta_objset, ddobj,
DD_FIELD_CRYPTO_KEY_OBJ) == 0) {
VERIFY0(zap_lookup(dp->dp_meta_objset,
ddobj, DD_FIELD_CRYPTO_KEY_OBJ,
sizeof (uint64_t), 1, &dd->dd_crypto_obj));
/* check for on-disk format errata */
if (dsl_dir_incompatible_encryption_version(dd)) {
dp->dp_spa->spa_errata =
ZPOOL_ERRATA_ZOL_6845_ENCRYPTION;
}
}
mutex_init(&dd->dd_lock, NULL, MUTEX_DEFAULT, NULL);
dsl_prop_init(dd);
dsl_dir_snap_cmtime_update(dd);
if (dsl_dir_phys(dd)->dd_parent_obj) {
err = dsl_dir_hold_obj(dp,
dsl_dir_phys(dd)->dd_parent_obj, NULL, dd,
&dd->dd_parent);
if (err != 0)
goto errout;
if (tail) {
#ifdef ZFS_DEBUG
uint64_t foundobj;
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj, tail,
sizeof (foundobj), 1, &foundobj);
ASSERT(err || foundobj == ddobj);
#endif
(void) strlcpy(dd->dd_myname, tail,
sizeof (dd->dd_myname));
} else {
err = zap_value_search(dp->dp_meta_objset,
dsl_dir_phys(dd->dd_parent)->
dd_child_dir_zapobj,
ddobj, 0, dd->dd_myname);
}
if (err != 0)
goto errout;
} else {
(void) strcpy(dd->dd_myname, spa_name(dp->dp_spa));
}
if (dsl_dir_is_clone(dd)) {
dmu_buf_t *origin_bonus;
dsl_dataset_phys_t *origin_phys;
/*
* We can't open the origin dataset, because
* that would require opening this dsl_dir.
* Just look at its phys directly instead.
*/
err = dmu_bonus_hold(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_origin_obj, FTAG,
&origin_bonus);
if (err != 0)
goto errout;
origin_phys = origin_bonus->db_data;
dd->dd_origin_txg =
origin_phys->ds_creation_txg;
dmu_buf_rele(origin_bonus, FTAG);
}
dmu_buf_init_user(&dd->dd_dbu, NULL, dsl_dir_evict_async,
&dd->dd_dbuf);
winner = dmu_buf_set_user_ie(dbuf, &dd->dd_dbu);
if (winner != NULL) {
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
dsl_prop_fini(dd);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dd = winner;
} else {
spa_open_ref(dp->dp_spa, dd);
}
}
/*
* The dsl_dir_t has both open-to-close and instantiate-to-evict
* holds on the spa. We need the open-to-close holds because
* otherwise the spa_refcnt wouldn't change when we open a
* dir which the spa also has open, so we could incorrectly
* think it was OK to unload/export/destroy the pool. We need
* the instantiate-to-evict hold because the dsl_dir_t has a
* pointer to the dd_pool, which has a pointer to the spa_t.
*/
spa_open_ref(dp->dp_spa, tag);
ASSERT3P(dd->dd_pool, ==, dp);
ASSERT3U(dd->dd_object, ==, ddobj);
ASSERT3P(dd->dd_dbuf, ==, dbuf);
*ddp = dd;
return (0);
errout:
if (dd->dd_parent)
dsl_dir_rele(dd->dd_parent, dd);
dsl_prop_fini(dd);
mutex_destroy(&dd->dd_lock);
kmem_free(dd, sizeof (dsl_dir_t));
dmu_buf_rele(dbuf, tag);
return (err);
}
void
dsl_dir_rele(dsl_dir_t *dd, void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/*
* Remove a reference to the given dsl dir that is being asynchronously
* released. Async releases occur from a taskq performing eviction of
* dsl datasets and dirs. This process is identical to a normal release
* with the exception of using the async API for releasing the reference on
* the spa.
*/
void
dsl_dir_async_rele(dsl_dir_t *dd, void *tag)
{
dprintf_dd(dd, "%s\n", "");
spa_async_close(dd->dd_pool->dp_spa, tag);
dmu_buf_rele(dd->dd_dbuf, tag);
}
/* buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */
void
dsl_dir_name(dsl_dir_t *dd, char *buf)
{
if (dd->dd_parent) {
dsl_dir_name(dd->dd_parent, buf);
VERIFY3U(strlcat(buf, "/", ZFS_MAX_DATASET_NAME_LEN), <,
ZFS_MAX_DATASET_NAME_LEN);
} else {
buf[0] = '\0';
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/*
* recursive mutex so that we can use
* dprintf_dd() with dd_lock held
*/
mutex_enter(&dd->dd_lock);
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
mutex_exit(&dd->dd_lock);
} else {
VERIFY3U(strlcat(buf, dd->dd_myname, ZFS_MAX_DATASET_NAME_LEN),
<, ZFS_MAX_DATASET_NAME_LEN);
}
}
/* Calculate name length, avoiding all the strcat calls of dsl_dir_name */
int
dsl_dir_namelen(dsl_dir_t *dd)
{
int result = 0;
if (dd->dd_parent) {
/* parent's name + 1 for the "/" */
result = dsl_dir_namelen(dd->dd_parent) + 1;
}
if (!MUTEX_HELD(&dd->dd_lock)) {
/* see dsl_dir_name */
mutex_enter(&dd->dd_lock);
result += strlen(dd->dd_myname);
mutex_exit(&dd->dd_lock);
} else {
result += strlen(dd->dd_myname);
}
return (result);
}
static int
getcomponent(const char *path, char *component, const char **nextp)
{
char *p;
if ((path == NULL) || (path[0] == '\0'))
return (SET_ERROR(ENOENT));
/* This would be a good place to reserve some namespace... */
p = strpbrk(path, "/@");
if (p && (p[1] == '/' || p[1] == '@')) {
/* two separators in a row */
return (SET_ERROR(EINVAL));
}
if (p == NULL || p == path) {
/*
* if the first thing is an @ or /, it had better be an
* @ and it had better not have any more ats or slashes,
* and it had better have something after the @.
*/
if (p != NULL &&
(p[0] != '@' || strpbrk(path+1, "/@") || p[1] == '\0'))
return (SET_ERROR(EINVAL));
if (strlen(path) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strcpy(component, path);
p = NULL;
} else if (p[0] == '/') {
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
p++;
} else if (p[0] == '@') {
/*
* if the next separator is an @, there better not be
* any more slashes.
*/
if (strchr(path, '/'))
return (SET_ERROR(EINVAL));
if (p - path >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
(void) strncpy(component, path, p - path);
component[p - path] = '\0';
} else {
panic("invalid p=%p", (void *)p);
}
*nextp = p;
return (0);
}
/*
* Return the dsl_dir_t, and possibly the last component which couldn't
* be found in *tail. The name must be in the specified dsl_pool_t. This
* thread must hold the dp_config_rwlock for the pool. Returns NULL if the
* path is bogus, or if tail==NULL and we couldn't parse the whole name.
* (*tail)[0] == '@' means that the last component is a snapshot.
*/
int
dsl_dir_hold(dsl_pool_t *dp, const char *name, void *tag,
dsl_dir_t **ddp, const char **tailp)
{
char *buf;
const char *spaname, *next, *nextnext = NULL;
int err;
dsl_dir_t *dd;
uint64_t ddobj;
buf = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
err = getcomponent(name, buf, &next);
if (err != 0)
goto error;
/* Make sure the name is in the specified pool. */
spaname = spa_name(dp->dp_spa);
if (strcmp(buf, spaname) != 0) {
err = SET_ERROR(EXDEV);
goto error;
}
ASSERT(dsl_pool_config_held(dp));
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj, NULL, tag, &dd);
if (err != 0) {
goto error;
}
while (next != NULL) {
dsl_dir_t *child_dd;
err = getcomponent(next, buf, &nextnext);
if (err != 0)
break;
ASSERT(next[0] != '\0');
if (next[0] == '@')
break;
dprintf("looking up %s in obj%lld\n",
buf, dsl_dir_phys(dd)->dd_child_dir_zapobj);
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dd)->dd_child_dir_zapobj,
buf, sizeof (ddobj), 1, &ddobj);
if (err != 0) {
if (err == ENOENT)
err = 0;
break;
}
err = dsl_dir_hold_obj(dp, ddobj, buf, tag, &child_dd);
if (err != 0)
break;
dsl_dir_rele(dd, tag);
dd = child_dd;
next = nextnext;
}
if (err != 0) {
dsl_dir_rele(dd, tag);
goto error;
}
/*
* It's an error if there's more than one component left, or
* tailp==NULL and there's any component left.
*/
if (next != NULL &&
(tailp == NULL || (nextnext && nextnext[0] != '\0'))) {
/* bad path name */
dsl_dir_rele(dd, tag);
dprintf("next=%p (%s) tail=%p\n", next, next?next:"", tailp);
err = SET_ERROR(ENOENT);
}
if (tailp != NULL)
*tailp = next;
*ddp = dd;
error:
kmem_free(buf, ZFS_MAX_DATASET_NAME_LEN);
return (err);
}
/*
* If the counts are already initialized for this filesystem and its
* descendants then do nothing, otherwise initialize the counts.
*
* The counts on this filesystem, and those below, may be uninitialized due to
* either the use of a pre-existing pool which did not support the
* filesystem/snapshot limit feature, or one in which the feature had not yet
* been enabled.
*
* Recursively descend the filesystem tree and update the filesystem/snapshot
* counts on each filesystem below, then update the cumulative count on the
* current filesystem. If the filesystem already has a count set on it,
* then we know that its counts, and the counts on the filesystems below it,
* are already correct, so we don't have to update this filesystem.
*/
static void
dsl_dir_init_fs_ss_count(dsl_dir_t *dd, dmu_tx_t *tx)
{
uint64_t my_fs_cnt = 0;
uint64_t my_ss_cnt = 0;
dsl_pool_t *dp = dd->dd_pool;
objset_t *os = dp->dp_meta_objset;
zap_cursor_t *zc;
zap_attribute_t *za;
dsl_dataset_t *ds;
ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT));
ASSERT(dsl_pool_config_held(dp));
ASSERT(dmu_tx_is_syncing(tx));
dsl_dir_zapify(dd, tx);
/*
* If the filesystem count has already been initialized then we
* don't need to recurse down any further.
*/
if (zap_contains(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT) == 0)
return;
zc = kmem_alloc(sizeof (zap_cursor_t), KM_SLEEP);
za = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP);
/* Iterate my child dirs */
for (zap_cursor_init(zc, os, dsl_dir_phys(dd)->dd_child_dir_zapobj);
zap_cursor_retrieve(zc, za) == 0; zap_cursor_advance(zc)) {
dsl_dir_t *chld_dd;
uint64_t count;
VERIFY0(dsl_dir_hold_obj(dp, za->za_first_integer, NULL, FTAG,
&chld_dd));
/*
* Ignore hidden ($FREE, $MOS & $ORIGIN) objsets and
* temporary datasets.
*/
if (chld_dd->dd_myname[0] == '$' ||
chld_dd->dd_myname[0] == '%') {
dsl_dir_rele(chld_dd, FTAG);
continue;
}
my_fs_cnt++; /* count this child */
dsl_dir_init_fs_ss_count(chld_dd, tx);
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (count), 1, &count));
my_fs_cnt += count;
VERIFY0(zap_lookup(os, chld_dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (count), 1, &count));
my_ss_cnt += count;
dsl_dir_rele(chld_dd, FTAG);
}
zap_cursor_fini(zc);
/* Count my snapshots (we counted children's snapshots above) */
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds));
for (zap_cursor_init(zc, os, dsl_dataset_phys(ds)->ds_snapnames_zapobj);
zap_cursor_retrieve(zc, za) == 0;
zap_cursor_advance(zc)) {
/* Don't count temporary snapshots */
if (za->za_name[0] != '%')
my_ss_cnt++;
}
zap_cursor_fini(zc);
dsl_dataset_rele(ds, FTAG);
kmem_free(zc, sizeof (zap_cursor_t));
kmem_free(za, sizeof (zap_attribute_t));
/* we're in a sync task, update counts */
dmu_buf_will_dirty(dd->dd_dbuf, tx);
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (my_fs_cnt), 1, &my_fs_cnt, tx));
VERIFY0(zap_add(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (my_ss_cnt), 1, &my_ss_cnt, tx));
}
static int
dsl_dir_actv_fs_ss_limit_check(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
int error;
error = dsl_dataset_hold(dp, ddname, FTAG, &ds);
if (error != 0)
return (error);
if (!spa_feature_is_enabled(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT)) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(ENOTSUP));
}
dd = ds->ds_dir;
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_FS_SS_LIMIT) &&
dsl_dir_is_zapified(dd) &&
zap_contains(dp->dp_meta_objset, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT) == 0) {
dsl_dataset_rele(ds, FTAG);
return (SET_ERROR(EALREADY));
}
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_dir_actv_fs_ss_limit_sync(void *arg, dmu_tx_t *tx)
{
char *ddname = (char *)arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
spa_t *spa;
VERIFY0(dsl_dataset_hold(dp, ddname, FTAG, &ds));
spa = dsl_dataset_get_spa(ds);
if (!spa_feature_is_active(spa, SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Since the feature was not active and we're now setting a
* limit, increment the feature-active counter so that the
* feature becomes active for the first time.
*
* We are already in a sync task so we can update the MOS.
*/
spa_feature_incr(spa, SPA_FEATURE_FS_SS_LIMIT, tx);
}
/*
* Since we are now setting a non-UINT64_MAX limit on the filesystem,
* we need to ensure the counts are correct. Descend down the tree from
* this point and update all of the counts to be accurate.
*/
dsl_dir_init_fs_ss_count(ds->ds_dir, tx);
dsl_dataset_rele(ds, FTAG);
}
/*
* Make sure the feature is enabled and activate it if necessary.
* Since we're setting a limit, ensure the on-disk counts are valid.
* This is only called by the ioctl path when setting a limit value.
*
* We do not need to validate the new limit, since users who can change the
* limit are also allowed to exceed the limit.
*/
int
dsl_dir_activate_fs_ss_limit(const char *ddname)
{
int error;
error = dsl_sync_task(ddname, dsl_dir_actv_fs_ss_limit_check,
dsl_dir_actv_fs_ss_limit_sync, (void *)ddname, 0,
ZFS_SPACE_CHECK_RESERVED);
if (error == EALREADY)
error = 0;
return (error);
}
/*
* Used to determine if the filesystem_limit or snapshot_limit should be
* enforced. We allow the limit to be exceeded if the user has permission to
* write the property value. We pass in the creds that we got in the open
* context since we will always be the GZ root in syncing context. We also have
* to handle the case where we are allowed to change the limit on the current
* dataset, but there may be another limit in the tree above.
*
* We can never modify these two properties within a non-global zone. In
* addition, the other checks are modeled on zfs_secpolicy_write_perms. We
* can't use that function since we are already holding the dp_config_rwlock.
* In addition, we already have the dd and dealing with snapshots is simplified
* in this code.
*/
typedef enum {
ENFORCE_ALWAYS,
ENFORCE_NEVER,
ENFORCE_ABOVE
} enforce_res_t;
static enforce_res_t
dsl_enforce_ds_ss_limits(dsl_dir_t *dd, zfs_prop_t prop, cred_t *cr)
{
enforce_res_t enforce = ENFORCE_ALWAYS;
uint64_t obj;
dsl_dataset_t *ds;
uint64_t zoned;
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
#ifdef _KERNEL
if (crgetzoneid(cr) != GLOBAL_ZONEID)
return (ENFORCE_ALWAYS);
if (secpolicy_zfs(cr) == 0)
return (ENFORCE_NEVER);
#endif
if ((obj = dsl_dir_phys(dd)->dd_head_dataset_obj) == 0)
return (ENFORCE_ALWAYS);
ASSERT(dsl_pool_config_held(dd->dd_pool));
if (dsl_dataset_hold_obj(dd->dd_pool, obj, FTAG, &ds) != 0)
return (ENFORCE_ALWAYS);
if (dsl_prop_get_ds(ds, "zoned", 8, 1, &zoned, NULL) || zoned) {
/* Only root can access zoned fs's from the GZ */
enforce = ENFORCE_ALWAYS;
} else {
if (dsl_deleg_access_impl(ds, zfs_prop_to_name(prop), cr) == 0)
enforce = ENFORCE_ABOVE;
}
dsl_dataset_rele(ds, FTAG);
return (enforce);
}
+static void
+dsl_dir_update_last_remap_txg_sync(void *varg, dmu_tx_t *tx)
+{
+ ddulrt_arg_t *arg = varg;
+ uint64_t last_remap_txg;
+ dsl_dir_t *dd = arg->ddulrta_dd;
+ objset_t *mos = dd->dd_pool->dp_meta_objset;
+
+ dsl_dir_zapify(dd, tx);
+ if (zap_lookup(mos, dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
+ sizeof (last_remap_txg), 1, &last_remap_txg) != 0 ||
+ last_remap_txg < arg->ddlrta_txg) {
+ VERIFY0(zap_update(mos, dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
+ sizeof (arg->ddlrta_txg), 1, &arg->ddlrta_txg, tx));
+ }
+}
+
+int
+dsl_dir_update_last_remap_txg(dsl_dir_t *dd, uint64_t txg)
+{
+ ddulrt_arg_t arg;
+ arg.ddulrta_dd = dd;
+ arg.ddlrta_txg = txg;
+
+ return (dsl_sync_task(spa_name(dd->dd_pool->dp_spa),
+ NULL, dsl_dir_update_last_remap_txg_sync, &arg,
+ 1, ZFS_SPACE_CHECK_RESERVED));
+}
+
/*
* Check if adding additional child filesystem(s) would exceed any filesystem
* limits or adding additional snapshot(s) would exceed any snapshot limits.
* The prop argument indicates which limit to check.
*
* Note that all filesystem limits up to the root (or the highest
* initialized) filesystem or the given ancestor must be satisfied.
*/
int
dsl_fs_ss_limit_check(dsl_dir_t *dd, uint64_t delta, zfs_prop_t prop,
dsl_dir_t *ancestor, cred_t *cr)
{
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t limit, count;
char *count_prop;
enforce_res_t enforce;
int err = 0;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(prop == ZFS_PROP_FILESYSTEM_LIMIT ||
prop == ZFS_PROP_SNAPSHOT_LIMIT);
/*
* If we're allowed to change the limit, don't enforce the limit
* e.g. this can happen if a snapshot is taken by an administrative
* user in the global zone (i.e. a recursive snapshot by root).
* However, we must handle the case of delegated permissions where we
* are allowed to change the limit on the current dataset, but there
* is another limit in the tree above.
*/
enforce = dsl_enforce_ds_ss_limits(dd, prop, cr);
if (enforce == ENFORCE_NEVER)
return (0);
/*
* e.g. if renaming a dataset with no snapshots, count adjustment
* is 0.
*/
if (delta == 0)
return (0);
if (prop == ZFS_PROP_SNAPSHOT_LIMIT) {
/*
* We don't enforce the limit for temporary snapshots. This is
* indicated by a NULL cred_t argument.
*/
if (cr == NULL)
return (0);
count_prop = DD_FIELD_SNAPSHOT_COUNT;
} else {
count_prop = DD_FIELD_FILESYSTEM_COUNT;
}
/*
* If an ancestor has been provided, stop checking the limit once we
* hit that dir. We need this during rename so that we don't overcount
* the check once we recurse up to the common ancestor.
*/
if (ancestor == dd)
return (0);
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know there is no limit here (or above). The counts are
* not valid on this node and we know we won't touch this node's counts.
*/
if (!dsl_dir_is_zapified(dd) || zap_lookup(os, dd->dd_object,
count_prop, sizeof (count), 1, &count) == ENOENT)
return (0);
err = dsl_prop_get_dd(dd, zfs_prop_to_name(prop), 8, 1, &limit, NULL,
B_FALSE);
if (err != 0)
return (err);
/* Is there a limit which we've hit? */
if (enforce == ENFORCE_ALWAYS && (count + delta) > limit)
return (SET_ERROR(EDQUOT));
if (dd->dd_parent != NULL)
err = dsl_fs_ss_limit_check(dd->dd_parent, delta, prop,
ancestor, cr);
return (err);
}
/*
* Adjust the filesystem or snapshot count for the specified dsl_dir_t and all
* parents. When a new filesystem/snapshot is created, increment the count on
* all parents, and when a filesystem/snapshot is destroyed, decrement the
* count.
*/
void
dsl_fs_ss_count_adjust(dsl_dir_t *dd, int64_t delta, const char *prop,
dmu_tx_t *tx)
{
int err;
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t count;
ASSERT(dsl_pool_config_held(dd->dd_pool));
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0 ||
strcmp(prop, DD_FIELD_SNAPSHOT_COUNT) == 0);
/*
* When we receive an incremental stream into a filesystem that already
* exists, a temporary clone is created. We don't count this temporary
* clone, whose name begins with a '%'. We also ignore hidden ($FREE,
* $MOS & $ORIGIN) objsets.
*/
if ((dd->dd_myname[0] == '%' || dd->dd_myname[0] == '$') &&
strcmp(prop, DD_FIELD_FILESYSTEM_COUNT) == 0)
return;
/*
* e.g. if renaming a dataset with no snapshots, count adjustment is 0
*/
if (delta == 0)
return;
/*
* If we hit an uninitialized node while recursing up the tree, we can
* stop since we know the counts are not valid on this node and we
* know we shouldn't touch this node's counts. An uninitialized count
* on the node indicates that either the feature has not yet been
* activated or there are no limits on this part of the tree.
*/
if (!dsl_dir_is_zapified(dd) || (err = zap_lookup(os, dd->dd_object,
prop, sizeof (count), 1, &count)) == ENOENT)
return;
VERIFY0(err);
count += delta;
/* Use a signed verify to make sure we're not neg. */
VERIFY3S(count, >=, 0);
VERIFY0(zap_update(os, dd->dd_object, prop, sizeof (count), 1, &count,
tx));
/* Roll up this additional count into our ancestors */
if (dd->dd_parent != NULL)
dsl_fs_ss_count_adjust(dd->dd_parent, delta, prop, tx);
}
uint64_t
dsl_dir_create_sync(dsl_pool_t *dp, dsl_dir_t *pds, const char *name,
dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
uint64_t ddobj;
dsl_dir_phys_t *ddphys;
dmu_buf_t *dbuf;
ddobj = dmu_object_alloc(mos, DMU_OT_DSL_DIR, 0,
DMU_OT_DSL_DIR, sizeof (dsl_dir_phys_t), tx);
if (pds) {
VERIFY(0 == zap_add(mos, dsl_dir_phys(pds)->dd_child_dir_zapobj,
name, sizeof (uint64_t), 1, &ddobj, tx));
} else {
/* it's the root dir */
VERIFY(0 == zap_add(mos, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1, &ddobj, tx));
}
VERIFY(0 == dmu_bonus_hold(mos, ddobj, FTAG, &dbuf));
dmu_buf_will_dirty(dbuf, tx);
ddphys = dbuf->db_data;
ddphys->dd_creation_time = gethrestime_sec();
if (pds) {
ddphys->dd_parent_obj = pds->dd_object;
/* update the filesystem counts */
dsl_fs_ss_count_adjust(pds, 1, DD_FIELD_FILESYSTEM_COUNT, tx);
}
ddphys->dd_props_zapobj = zap_create(mos,
DMU_OT_DSL_PROPS, DMU_OT_NONE, 0, tx);
ddphys->dd_child_dir_zapobj = zap_create(mos,
DMU_OT_DSL_DIR_CHILD_MAP, DMU_OT_NONE, 0, tx);
if (spa_version(dp->dp_spa) >= SPA_VERSION_USED_BREAKDOWN)
ddphys->dd_flags |= DD_FLAG_USED_BREAKDOWN;
dmu_buf_rele(dbuf, FTAG);
return (ddobj);
}
boolean_t
dsl_dir_is_clone(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_origin_obj &&
(dd->dd_pool->dp_origin_snap == NULL ||
dsl_dir_phys(dd)->dd_origin_obj !=
dd->dd_pool->dp_origin_snap->ds_object));
}
-
uint64_t
dsl_dir_get_used(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_bytes);
}
uint64_t
dsl_dir_get_quota(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_quota);
}
uint64_t
dsl_dir_get_reservation(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_reserved);
}
uint64_t
dsl_dir_get_compressratio(dsl_dir_t *dd)
{
/* a fixed point number, 100x the ratio */
return (dsl_dir_phys(dd)->dd_compressed_bytes == 0 ? 100 :
(dsl_dir_phys(dd)->dd_uncompressed_bytes * 100 /
dsl_dir_phys(dd)->dd_compressed_bytes));
}
uint64_t
dsl_dir_get_logicalused(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_uncompressed_bytes);
}
uint64_t
dsl_dir_get_usedsnap(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_SNAP]);
}
uint64_t
dsl_dir_get_usedds(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_HEAD]);
}
uint64_t
dsl_dir_get_usedrefreserv(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_REFRSRV]);
}
uint64_t
dsl_dir_get_usedchild(dsl_dir_t *dd)
{
return (dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD] +
dsl_dir_phys(dd)->dd_used_breakdown[DD_USED_CHILD_RSRV]);
}
void
dsl_dir_get_origin(dsl_dir_t *dd, char *buf)
{
dsl_dataset_t *ds;
VERIFY0(dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &ds));
dsl_dataset_name(ds, buf);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_get_filesystem_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_FILESYSTEM_COUNT,
sizeof (*count), 1, count));
} else {
return (ENOENT);
}
}
int
dsl_dir_get_snapshot_count(dsl_dir_t *dd, uint64_t *count)
{
if (dsl_dir_is_zapified(dd)) {
objset_t *os = dd->dd_pool->dp_meta_objset;
return (zap_lookup(os, dd->dd_object, DD_FIELD_SNAPSHOT_COUNT,
sizeof (*count), 1, count));
} else {
return (ENOENT);
}
}
+int
+dsl_dir_get_remaptxg(dsl_dir_t *dd, uint64_t *count)
+{
+ if (dsl_dir_is_zapified(dd)) {
+ objset_t *os = dd->dd_pool->dp_meta_objset;
+ return (zap_lookup(os, dd->dd_object, DD_FIELD_LAST_REMAP_TXG,
+ sizeof (*count), 1, count));
+ } else {
+ return (ENOENT);
+ }
+
+}
+
void
dsl_dir_stats(dsl_dir_t *dd, nvlist_t *nv)
{
mutex_enter(&dd->dd_lock);
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_QUOTA,
dsl_dir_get_quota(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_RESERVATION,
dsl_dir_get_reservation(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_LOGICALUSED,
dsl_dir_get_logicalused(dd));
if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDSNAP,
dsl_dir_get_usedsnap(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDDS,
dsl_dir_get_usedds(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDREFRESERV,
dsl_dir_get_usedrefreserv(dd));
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USEDCHILD,
dsl_dir_get_usedchild(dd));
}
mutex_exit(&dd->dd_lock);
uint64_t count;
if (dsl_dir_get_filesystem_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_FILESYSTEM_COUNT,
count);
}
if (dsl_dir_get_snapshot_count(dd, &count) == 0) {
dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_SNAPSHOT_COUNT,
count);
}
+ if (dsl_dir_get_remaptxg(dd, &count) == 0) {
+ dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_REMAPTXG,
+ count);
+ }
if (dsl_dir_is_clone(dd)) {
char buf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dir_get_origin(dd, buf);
dsl_prop_nvlist_add_string(nv, ZFS_PROP_ORIGIN, buf);
}
}
void
dsl_dir_dirty(dsl_dir_t *dd, dmu_tx_t *tx)
{
dsl_pool_t *dp = dd->dd_pool;
ASSERT(dsl_dir_phys(dd));
if (txg_list_add(&dp->dp_dirty_dirs, dd, tx->tx_txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(dd->dd_dbuf, dd);
}
}
static int64_t
parent_delta(dsl_dir_t *dd, uint64_t used, int64_t delta)
{
uint64_t old_accounted = MAX(used, dsl_dir_phys(dd)->dd_reserved);
uint64_t new_accounted =
MAX(used + delta, dsl_dir_phys(dd)->dd_reserved);
return (new_accounted - old_accounted);
}
void
dsl_dir_sync(dsl_dir_t *dd, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
mutex_enter(&dd->dd_lock);
ASSERT0(dd->dd_tempreserved[tx->tx_txg&TXG_MASK]);
dprintf_dd(dd, "txg=%llu towrite=%lluK\n", tx->tx_txg,
dd->dd_space_towrite[tx->tx_txg&TXG_MASK] / 1024);
dd->dd_space_towrite[tx->tx_txg&TXG_MASK] = 0;
mutex_exit(&dd->dd_lock);
/* release the hold from dsl_dir_dirty */
dmu_buf_rele(dd->dd_dbuf, dd);
}
static uint64_t
dsl_dir_space_towrite(dsl_dir_t *dd)
{
uint64_t space = 0;
ASSERT(MUTEX_HELD(&dd->dd_lock));
for (int i = 0; i < TXG_SIZE; i++) {
space += dd->dd_space_towrite[i & TXG_MASK];
ASSERT3U(dd->dd_space_towrite[i & TXG_MASK], >=, 0);
}
return (space);
}
/*
* How much space would dd have available if ancestor had delta applied
* to it? If ondiskonly is set, we're only interested in what's
* on-disk, not estimated pending changes.
*/
uint64_t
dsl_dir_space_available(dsl_dir_t *dd,
dsl_dir_t *ancestor, int64_t delta, int ondiskonly)
{
uint64_t parentspace, myspace, quota, used;
/*
* If there are no restrictions otherwise, assume we have
* unlimited space available.
*/
quota = UINT64_MAX;
parentspace = UINT64_MAX;
if (dd->dd_parent != NULL) {
parentspace = dsl_dir_space_available(dd->dd_parent,
ancestor, delta, ondiskonly);
}
mutex_enter(&dd->dd_lock);
if (dsl_dir_phys(dd)->dd_quota != 0)
quota = dsl_dir_phys(dd)->dd_quota;
used = dsl_dir_phys(dd)->dd_used_bytes;
if (!ondiskonly)
used += dsl_dir_space_towrite(dd);
if (dd->dd_parent == NULL) {
uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, FALSE);
quota = MIN(quota, poolsize);
}
if (dsl_dir_phys(dd)->dd_reserved > used && parentspace != UINT64_MAX) {
/*
* We have some space reserved, in addition to what our
* parent gave us.
*/
parentspace += dsl_dir_phys(dd)->dd_reserved - used;
}
if (dd == ancestor) {
ASSERT(delta <= 0);
ASSERT(used >= -delta);
used += delta;
if (parentspace != UINT64_MAX)
parentspace -= delta;
}
if (used > quota) {
/* over quota */
myspace = 0;
} else {
/*
* the lesser of the space provided by our parent and
* the space left in our quota
*/
myspace = MIN(parentspace, quota - used);
}
mutex_exit(&dd->dd_lock);
return (myspace);
}
struct tempreserve {
list_node_t tr_node;
dsl_dir_t *tr_ds;
uint64_t tr_size;
};
static int
dsl_dir_tempreserve_impl(dsl_dir_t *dd, uint64_t asize, boolean_t netfree,
boolean_t ignorequota, list_t *tr_list,
dmu_tx_t *tx, boolean_t first)
{
uint64_t txg;
uint64_t quota;
struct tempreserve *tr;
int retval;
uint64_t ref_rsrv;
top_of_function:
txg = tx->tx_txg;
retval = EDQUOT;
ref_rsrv = 0;
ASSERT3U(txg, !=, 0);
ASSERT3S(asize, >, 0);
mutex_enter(&dd->dd_lock);
/*
* Check against the dsl_dir's quota. We don't add in the delta
* when checking for over-quota because they get one free hit.
*/
uint64_t est_inflight = dsl_dir_space_towrite(dd);
for (int i = 0; i < TXG_SIZE; i++)
est_inflight += dd->dd_tempreserved[i];
uint64_t used_on_disk = dsl_dir_phys(dd)->dd_used_bytes;
/*
* On the first iteration, fetch the dataset's used-on-disk and
* refreservation values. Also, if checkrefquota is set, test if
* allocating this space would exceed the dataset's refquota.
*/
if (first && tx->tx_objset) {
int error;
dsl_dataset_t *ds = tx->tx_objset->os_dsl_dataset;
error = dsl_dataset_check_quota(ds, !netfree,
asize, est_inflight, &used_on_disk, &ref_rsrv);
if (error != 0) {
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (error);
}
}
/*
* If this transaction will result in a net free of space,
* we want to let it through.
*/
if (ignorequota || netfree || dsl_dir_phys(dd)->dd_quota == 0)
quota = UINT64_MAX;
else
quota = dsl_dir_phys(dd)->dd_quota;
/*
* Adjust the quota against the actual pool size at the root
* minus any outstanding deferred frees.
* To ensure that it's possible to remove files from a full
* pool without inducing transient overcommits, we throttle
* netfree transactions against a quota that is slightly larger,
* but still within the pool's allocation slop. In cases where
* we're very close to full, this will allow a steady trickle of
* removes to get through.
*/
uint64_t deferred = 0;
if (dd->dd_parent == NULL) {
spa_t *spa = dd->dd_pool->dp_spa;
uint64_t poolsize = dsl_pool_adjustedsize(dd->dd_pool, netfree);
deferred = metaslab_class_get_deferred(spa_normal_class(spa));
if (poolsize - deferred < quota) {
quota = poolsize - deferred;
retval = ENOSPC;
}
}
/*
* If they are requesting more space, and our current estimate
* is over quota, they get to try again unless the actual
* on-disk is over quota and there are no pending changes (which
* may free up space for us).
*/
if (used_on_disk + est_inflight >= quota) {
if (est_inflight > 0 || used_on_disk < quota ||
(retval == ENOSPC && used_on_disk < quota + deferred))
retval = ERESTART;
dprintf_dd(dd, "failing: used=%lluK inflight = %lluK "
"quota=%lluK tr=%lluK err=%d\n",
used_on_disk>>10, est_inflight>>10,
quota>>10, asize>>10, retval);
mutex_exit(&dd->dd_lock);
DMU_TX_STAT_BUMP(dmu_tx_quota);
return (SET_ERROR(retval));
}
/* We need to up our estimated delta before dropping dd_lock */
dd->dd_tempreserved[txg & TXG_MASK] += asize;
uint64_t parent_rsrv = parent_delta(dd, used_on_disk + est_inflight,
asize - ref_rsrv);
mutex_exit(&dd->dd_lock);
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_ds = dd;
tr->tr_size = asize;
list_insert_tail(tr_list, tr);
/* see if it's OK with our parent */
if (dd->dd_parent != NULL && parent_rsrv != 0) {
/*
* Recurse on our parent without recursion. This has been
* observed to be potentially large stack usage even within
* the test suite. Largest seen stack was 7632 bytes on linux.
*/
dd = dd->dd_parent;
asize = parent_rsrv;
ignorequota = (dsl_dir_phys(dd)->dd_head_dataset_obj == 0);
first = B_FALSE;
goto top_of_function;
} else {
return (0);
}
}
/*
* Reserve space in this dsl_dir, to be used in this tx's txg.
* After the space has been dirtied (and dsl_dir_willuse_space()
* has been called), the reservation should be canceled, using
* dsl_dir_tempreserve_clear().
*/
int
dsl_dir_tempreserve_space(dsl_dir_t *dd, uint64_t lsize, uint64_t asize,
boolean_t netfree, void **tr_cookiep, dmu_tx_t *tx)
{
int err;
list_t *tr_list;
if (asize == 0) {
*tr_cookiep = NULL;
return (0);
}
tr_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
list_create(tr_list, sizeof (struct tempreserve),
offsetof(struct tempreserve, tr_node));
ASSERT3S(asize, >, 0);
err = arc_tempreserve_space(lsize, tx->tx_txg);
if (err == 0) {
struct tempreserve *tr;
tr = kmem_zalloc(sizeof (struct tempreserve), KM_SLEEP);
tr->tr_size = lsize;
list_insert_tail(tr_list, tr);
} else {
if (err == EAGAIN) {
/*
* If arc_memory_throttle() detected that pageout
* is running and we are low on memory, we delay new
* non-pageout transactions to give pageout an
* advantage.
*
* It is unfortunate to be delaying while the caller's
* locks are held.
*/
txg_delay(dd->dd_pool, tx->tx_txg,
MSEC2NSEC(10), MSEC2NSEC(10));
err = SET_ERROR(ERESTART);
}
}
if (err == 0) {
err = dsl_dir_tempreserve_impl(dd, asize, netfree,
B_FALSE, tr_list, tx, B_TRUE);
}
if (err != 0)
dsl_dir_tempreserve_clear(tr_list, tx);
else
*tr_cookiep = tr_list;
return (err);
}
/*
* Clear a temporary reservation that we previously made with
* dsl_dir_tempreserve_space().
*/
void
dsl_dir_tempreserve_clear(void *tr_cookie, dmu_tx_t *tx)
{
int txgidx = tx->tx_txg & TXG_MASK;
list_t *tr_list = tr_cookie;
struct tempreserve *tr;
ASSERT3U(tx->tx_txg, !=, 0);
if (tr_cookie == NULL)
return;
while ((tr = list_head(tr_list)) != NULL) {
if (tr->tr_ds) {
mutex_enter(&tr->tr_ds->dd_lock);
ASSERT3U(tr->tr_ds->dd_tempreserved[txgidx], >=,
tr->tr_size);
tr->tr_ds->dd_tempreserved[txgidx] -= tr->tr_size;
mutex_exit(&tr->tr_ds->dd_lock);
} else {
arc_tempreserve_clear(tr->tr_size);
}
list_remove(tr_list, tr);
kmem_free(tr, sizeof (struct tempreserve));
}
kmem_free(tr_list, sizeof (list_t));
}
/*
* This should be called from open context when we think we're going to write
* or free space, for example when dirtying data. Be conservative; it's okay
* to write less space or free more, but we don't want to write more or free
* less than the amount specified.
*
* NOTE: The behavior of this function is identical to the Illumos / FreeBSD
* version however it has been adjusted to use an iterative rather then
* recursive algorithm to minimize stack usage.
*/
void
dsl_dir_willuse_space(dsl_dir_t *dd, int64_t space, dmu_tx_t *tx)
{
int64_t parent_space;
uint64_t est_used;
do {
mutex_enter(&dd->dd_lock);
if (space > 0)
dd->dd_space_towrite[tx->tx_txg & TXG_MASK] += space;
est_used = dsl_dir_space_towrite(dd) +
dsl_dir_phys(dd)->dd_used_bytes;
parent_space = parent_delta(dd, est_used, space);
mutex_exit(&dd->dd_lock);
/* Make sure that we clean up dd_space_to* */
dsl_dir_dirty(dd, tx);
dd = dd->dd_parent;
space = parent_space;
} while (space && dd);
}
/* call from syncing context when we actually write/free space for this dd */
void
dsl_dir_diduse_space(dsl_dir_t *dd, dd_used_t type,
int64_t used, int64_t compressed, int64_t uncompressed, dmu_tx_t *tx)
{
int64_t accounted_delta;
/*
* dsl_dataset_set_refreservation_sync_impl() calls this with
* dd_lock held, so that it can atomically update
* ds->ds_reserved and the dsl_dir accounting, so that
* dsl_dataset_check_quota() can see dataset and dir accounting
* consistently.
*/
boolean_t needlock = !MUTEX_HELD(&dd->dd_lock);
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(type < DD_USED_NUM);
dmu_buf_will_dirty(dd->dd_dbuf, tx);
if (needlock)
mutex_enter(&dd->dd_lock);
accounted_delta =
parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, used);
ASSERT(used >= 0 || dsl_dir_phys(dd)->dd_used_bytes >= -used);
ASSERT(compressed >= 0 ||
dsl_dir_phys(dd)->dd_compressed_bytes >= -compressed);
ASSERT(uncompressed >= 0 ||
dsl_dir_phys(dd)->dd_uncompressed_bytes >= -uncompressed);
dsl_dir_phys(dd)->dd_used_bytes += used;
dsl_dir_phys(dd)->dd_uncompressed_bytes += uncompressed;
dsl_dir_phys(dd)->dd_compressed_bytes += compressed;
if (dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN) {
ASSERT(used > 0 ||
dsl_dir_phys(dd)->dd_used_breakdown[type] >= -used);
dsl_dir_phys(dd)->dd_used_breakdown[type] += used;
#ifdef DEBUG
{
dd_used_t t;
uint64_t u = 0;
for (t = 0; t < DD_USED_NUM; t++)
u += dsl_dir_phys(dd)->dd_used_breakdown[t];
ASSERT3U(u, ==, dsl_dir_phys(dd)->dd_used_bytes);
}
#endif
}
if (needlock)
mutex_exit(&dd->dd_lock);
if (dd->dd_parent != NULL) {
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
accounted_delta, compressed, uncompressed, tx);
dsl_dir_transfer_space(dd->dd_parent,
used - accounted_delta,
DD_USED_CHILD_RSRV, DD_USED_CHILD, tx);
}
}
void
dsl_dir_transfer_space(dsl_dir_t *dd, int64_t delta,
dd_used_t oldtype, dd_used_t newtype, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(oldtype < DD_USED_NUM);
ASSERT(newtype < DD_USED_NUM);
if (delta == 0 ||
!(dsl_dir_phys(dd)->dd_flags & DD_FLAG_USED_BREAKDOWN))
return;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
ASSERT(delta > 0 ?
dsl_dir_phys(dd)->dd_used_breakdown[oldtype] >= delta :
dsl_dir_phys(dd)->dd_used_breakdown[newtype] >= -delta);
ASSERT(dsl_dir_phys(dd)->dd_used_bytes >= ABS(delta));
dsl_dir_phys(dd)->dd_used_breakdown[oldtype] -= delta;
dsl_dir_phys(dd)->dd_used_breakdown[newtype] += delta;
mutex_exit(&dd->dd_lock);
}
typedef struct dsl_dir_set_qr_arg {
const char *ddsqra_name;
zprop_source_t ddsqra_source;
uint64_t ddsqra_value;
} dsl_dir_set_qr_arg_t;
static int
dsl_dir_set_quota_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
int error;
uint64_t towrite, newval;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
error = dsl_prop_predict(ds->ds_dir, "quota",
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
if (newval == 0) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
mutex_enter(&ds->ds_dir->dd_lock);
/*
* If we are doing the preliminary check in open context, and
* there are pending changes, then don't fail it, since the
* pending changes could under-estimate the amount of space to be
* freed up.
*/
towrite = dsl_dir_space_towrite(ds->ds_dir);
if ((dmu_tx_is_syncing(tx) || towrite == 0) &&
(newval < dsl_dir_phys(ds->ds_dir)->dd_reserved ||
newval < dsl_dir_phys(ds->ds_dir)->dd_used_bytes + towrite)) {
error = SET_ERROR(ENOSPC);
}
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
return (error);
}
static void
dsl_dir_set_quota_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds, zfs_prop_to_name(ZFS_PROP_QUOTA),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_QUOTA), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_QUOTA), (longlong_t)newval);
}
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
mutex_enter(&ds->ds_dir->dd_lock);
dsl_dir_phys(ds->ds_dir)->dd_quota = newval;
mutex_exit(&ds->ds_dir->dd_lock);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_quota(const char *ddname, zprop_source_t source, uint64_t quota)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = quota;
return (dsl_sync_task(ddname, dsl_dir_set_quota_check,
dsl_dir_set_quota_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
}
int
dsl_dir_set_reservation_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
dsl_dir_t *dd;
uint64_t newval, used, avail;
int error;
error = dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds);
if (error != 0)
return (error);
dd = ds->ds_dir;
/*
* If we are doing the preliminary check in open context, the
* space estimates may be inaccurate.
*/
if (!dmu_tx_is_syncing(tx)) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
error = dsl_prop_predict(ds->ds_dir,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, ddsqra->ddsqra_value, &newval);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
return (error);
}
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
mutex_exit(&dd->dd_lock);
if (dd->dd_parent) {
avail = dsl_dir_space_available(dd->dd_parent,
NULL, 0, FALSE);
} else {
avail = dsl_pool_adjustedsize(dd->dd_pool, B_FALSE) - used;
}
if (MAX(used, newval) > MAX(used, dsl_dir_phys(dd)->dd_reserved)) {
uint64_t delta = MAX(used, newval) -
MAX(used, dsl_dir_phys(dd)->dd_reserved);
if (delta > avail ||
(dsl_dir_phys(dd)->dd_quota > 0 &&
newval > dsl_dir_phys(dd)->dd_quota))
error = SET_ERROR(ENOSPC);
}
dsl_dataset_rele(ds, FTAG);
return (error);
}
void
dsl_dir_set_reservation_sync_impl(dsl_dir_t *dd, uint64_t value, dmu_tx_t *tx)
{
uint64_t used;
int64_t delta;
dmu_buf_will_dirty(dd->dd_dbuf, tx);
mutex_enter(&dd->dd_lock);
used = dsl_dir_phys(dd)->dd_used_bytes;
delta = MAX(used, value) - MAX(used, dsl_dir_phys(dd)->dd_reserved);
dsl_dir_phys(dd)->dd_reserved = value;
if (dd->dd_parent != NULL) {
/* Roll up this additional usage into our ancestors */
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
delta, 0, 0, tx);
}
mutex_exit(&dd->dd_lock);
}
static void
dsl_dir_set_reservation_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_set_qr_arg_t *ddsqra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dataset_t *ds;
uint64_t newval;
VERIFY0(dsl_dataset_hold(dp, ddsqra->ddsqra_name, FTAG, &ds));
if (spa_version(dp->dp_spa) >= SPA_VERSION_RECVD_PROPS) {
dsl_prop_set_sync_impl(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION),
ddsqra->ddsqra_source, sizeof (ddsqra->ddsqra_value), 1,
&ddsqra->ddsqra_value, tx);
VERIFY0(dsl_prop_get_int_ds(ds,
zfs_prop_to_name(ZFS_PROP_RESERVATION), &newval));
} else {
newval = ddsqra->ddsqra_value;
spa_history_log_internal_ds(ds, "set", tx, "%s=%lld",
zfs_prop_to_name(ZFS_PROP_RESERVATION),
(longlong_t)newval);
}
dsl_dir_set_reservation_sync_impl(ds->ds_dir, newval, tx);
dsl_dataset_rele(ds, FTAG);
}
int
dsl_dir_set_reservation(const char *ddname, zprop_source_t source,
uint64_t reservation)
{
dsl_dir_set_qr_arg_t ddsqra;
ddsqra.ddsqra_name = ddname;
ddsqra.ddsqra_source = source;
ddsqra.ddsqra_value = reservation;
return (dsl_sync_task(ddname, dsl_dir_set_reservation_check,
dsl_dir_set_reservation_sync, &ddsqra, 0, ZFS_SPACE_CHECK_NONE));
}
static dsl_dir_t *
closest_common_ancestor(dsl_dir_t *ds1, dsl_dir_t *ds2)
{
for (; ds1; ds1 = ds1->dd_parent) {
dsl_dir_t *dd;
for (dd = ds2; dd; dd = dd->dd_parent) {
if (ds1 == dd)
return (dd);
}
}
return (NULL);
}
/*
* If delta is applied to dd, how much of that delta would be applied to
* ancestor? Syncing context only.
*/
static int64_t
would_change(dsl_dir_t *dd, int64_t delta, dsl_dir_t *ancestor)
{
if (dd == ancestor)
return (delta);
mutex_enter(&dd->dd_lock);
delta = parent_delta(dd, dsl_dir_phys(dd)->dd_used_bytes, delta);
mutex_exit(&dd->dd_lock);
return (would_change(dd->dd_parent, delta, ancestor));
}
typedef struct dsl_dir_rename_arg {
const char *ddra_oldname;
const char *ddra_newname;
cred_t *ddra_cred;
} dsl_dir_rename_arg_t;
/* ARGSUSED */
static int
dsl_valid_rename(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
int *deltap = arg;
char namebuf[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds, namebuf);
if (strlen(namebuf) + *deltap >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
dsl_dir_rename_check(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
const char *mynewname;
int error;
int delta = strlen(ddra->ddra_newname) - strlen(ddra->ddra_oldname);
/* target dir should exist */
error = dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL);
if (error != 0)
return (error);
/* new parent should exist */
error = dsl_dir_hold(dp, ddra->ddra_newname, FTAG,
&newparent, &mynewname);
if (error != 0) {
dsl_dir_rele(dd, FTAG);
return (error);
}
/* can't rename to different pool */
if (dd->dd_pool != newparent->dd_pool) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EXDEV));
}
/* new name should not already exist */
if (mynewname == NULL) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EEXIST));
}
/* if the name length is growing, validate child name lengths */
if (delta > 0) {
error = dmu_objset_find_dp(dp, dd->dd_object, dsl_valid_rename,
&delta, DS_FIND_CHILDREN | DS_FIND_SNAPSHOTS);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
if (dmu_tx_is_syncing(tx)) {
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
/*
* Although this is the check function and we don't
* normally make on-disk changes in check functions,
* we need to do that here.
*
* Ensure this portion of the tree's counts have been
* initialized in case the new parent has limits set.
*/
dsl_dir_init_fs_ss_count(dd, tx);
}
}
if (newparent != dd->dd_parent) {
/* is there enough space? */
uint64_t myspace =
MAX(dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_reserved);
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
if (dsl_dir_is_zapified(dd)) {
int err;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
/*
* have to add 1 for the filesystem itself that we're
* moving
*/
fs_cnt++;
err = zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt);
if (err != ENOENT && err != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (err);
}
}
/* check for encryption errors */
error = dsl_dir_rename_crypt_check(dd, newparent);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EACCES));
}
/* no rename into our descendant */
if (closest_common_ancestor(dd, newparent) == dd) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_dir_transfer_possible(dd->dd_parent,
newparent, fs_cnt, ss_cnt, myspace, ddra->ddra_cred);
if (error != 0) {
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (error);
}
}
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
return (0);
}
static void
dsl_dir_rename_sync(void *arg, dmu_tx_t *tx)
{
dsl_dir_rename_arg_t *ddra = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_dir_t *dd, *newparent;
const char *mynewname;
int error;
objset_t *mos = dp->dp_meta_objset;
VERIFY0(dsl_dir_hold(dp, ddra->ddra_oldname, FTAG, &dd, NULL));
VERIFY0(dsl_dir_hold(dp, ddra->ddra_newname, FTAG, &newparent,
&mynewname));
/* Log this before we change the name. */
spa_history_log_internal_dd(dd, "rename", tx,
"-> %s", ddra->ddra_newname);
if (newparent != dd->dd_parent) {
objset_t *os = dd->dd_pool->dp_meta_objset;
uint64_t fs_cnt = 0;
uint64_t ss_cnt = 0;
/*
* We already made sure the dd counts were initialized in the
* check function.
*/
if (spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_FS_SS_LIMIT)) {
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_FILESYSTEM_COUNT, sizeof (fs_cnt), 1,
&fs_cnt));
/* add 1 for the filesystem itself that we're moving */
fs_cnt++;
VERIFY0(zap_lookup(os, dd->dd_object,
DD_FIELD_SNAPSHOT_COUNT, sizeof (ss_cnt), 1,
&ss_cnt));
}
dsl_fs_ss_count_adjust(dd->dd_parent, -fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, fs_cnt,
DD_FIELD_FILESYSTEM_COUNT, tx);
dsl_fs_ss_count_adjust(dd->dd_parent, -ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_fs_ss_count_adjust(newparent, ss_cnt,
DD_FIELD_SNAPSHOT_COUNT, tx);
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD,
-dsl_dir_phys(dd)->dd_used_bytes,
-dsl_dir_phys(dd)->dd_compressed_bytes,
-dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD,
dsl_dir_phys(dd)->dd_used_bytes,
dsl_dir_phys(dd)->dd_compressed_bytes,
dsl_dir_phys(dd)->dd_uncompressed_bytes, tx);
if (dsl_dir_phys(dd)->dd_reserved >
dsl_dir_phys(dd)->dd_used_bytes) {
uint64_t unused_rsrv = dsl_dir_phys(dd)->dd_reserved -
dsl_dir_phys(dd)->dd_used_bytes;
dsl_dir_diduse_space(dd->dd_parent, DD_USED_CHILD_RSRV,
-unused_rsrv, 0, 0, tx);
dsl_dir_diduse_space(newparent, DD_USED_CHILD_RSRV,
unused_rsrv, 0, 0, tx);
}
}
dmu_buf_will_dirty(dd->dd_dbuf, tx);
/* remove from old parent zapobj */
error = zap_remove(mos,
dsl_dir_phys(dd->dd_parent)->dd_child_dir_zapobj,
dd->dd_myname, tx);
ASSERT0(error);
(void) strlcpy(dd->dd_myname, mynewname,
sizeof (dd->dd_myname));
dsl_dir_rele(dd->dd_parent, dd);
dsl_dir_phys(dd)->dd_parent_obj = newparent->dd_object;
VERIFY0(dsl_dir_hold_obj(dp,
newparent->dd_object, NULL, dd, &dd->dd_parent));
/* add to new parent zapobj */
VERIFY0(zap_add(mos, dsl_dir_phys(newparent)->dd_child_dir_zapobj,
dd->dd_myname, 8, 1, &dd->dd_object, tx));
zvol_rename_minors(dp->dp_spa, ddra->ddra_oldname,
ddra->ddra_newname, B_TRUE);
dsl_prop_notify_all(dd);
dsl_dir_rele(newparent, FTAG);
dsl_dir_rele(dd, FTAG);
}
int
dsl_dir_rename(const char *oldname, const char *newname)
{
dsl_dir_rename_arg_t ddra;
ddra.ddra_oldname = oldname;
ddra.ddra_newname = newname;
ddra.ddra_cred = CRED();
return (dsl_sync_task(oldname,
dsl_dir_rename_check, dsl_dir_rename_sync, &ddra,
3, ZFS_SPACE_CHECK_RESERVED));
}
int
dsl_dir_transfer_possible(dsl_dir_t *sdd, dsl_dir_t *tdd,
uint64_t fs_cnt, uint64_t ss_cnt, uint64_t space, cred_t *cr)
{
dsl_dir_t *ancestor;
int64_t adelta;
uint64_t avail;
int err;
ancestor = closest_common_ancestor(sdd, tdd);
adelta = would_change(sdd, -space, ancestor);
avail = dsl_dir_space_available(tdd, ancestor, adelta, FALSE);
if (avail < space)
return (SET_ERROR(ENOSPC));
err = dsl_fs_ss_limit_check(tdd, fs_cnt, ZFS_PROP_FILESYSTEM_LIMIT,
ancestor, cr);
if (err != 0)
return (err);
err = dsl_fs_ss_limit_check(tdd, ss_cnt, ZFS_PROP_SNAPSHOT_LIMIT,
ancestor, cr);
if (err != 0)
return (err);
return (0);
}
timestruc_t
dsl_dir_snap_cmtime(dsl_dir_t *dd)
{
timestruc_t t;
mutex_enter(&dd->dd_lock);
t = dd->dd_snap_cmtime;
mutex_exit(&dd->dd_lock);
return (t);
}
void
dsl_dir_snap_cmtime_update(dsl_dir_t *dd)
{
timestruc_t t;
gethrestime(&t);
mutex_enter(&dd->dd_lock);
dd->dd_snap_cmtime = t;
mutex_exit(&dd->dd_lock);
}
void
dsl_dir_zapify(dsl_dir_t *dd, dmu_tx_t *tx)
{
objset_t *mos = dd->dd_pool->dp_meta_objset;
dmu_object_zapify(mos, dd->dd_object, DMU_OT_DSL_DIR, tx);
}
boolean_t
dsl_dir_is_zapified(dsl_dir_t *dd)
{
dmu_object_info_t doi;
dmu_object_info_from_db(dd->dd_dbuf, &doi);
return (doi.doi_type == DMU_OTN_ZAP_METADATA);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(dsl_dir_set_quota);
EXPORT_SYMBOL(dsl_dir_set_reservation);
#endif
diff --git a/module/zfs/dsl_pool.c b/module/zfs/dsl_pool.c
index db2e67742e3c..094b6bec0103 100644
--- a/module/zfs/dsl_pool.c
+++ b/module/zfs/dsl_pool.c
@@ -1,1213 +1,1259 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dsl_scan.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/dsl_deadlist.h>
#include <sys/bptree.h>
#include <sys/zfeature.h>
#include <sys/zil_impl.h>
#include <sys/dsl_userhold.h>
#include <sys/trace_txg.h>
#include <sys/mmp.h>
/*
* ZFS Write Throttle
* ------------------
*
* ZFS must limit the rate of incoming writes to the rate at which it is able
* to sync data modifications to the backend storage. Throttling by too much
* creates an artificial limit; throttling by too little can only be sustained
* for short periods and would lead to highly lumpy performance. On a per-pool
* basis, ZFS tracks the amount of modified (dirty) data. As operations change
* data, the amount of dirty data increases; as ZFS syncs out data, the amount
* of dirty data decreases. When the amount of dirty data exceeds a
* predetermined threshold further modifications are blocked until the amount
* of dirty data decreases (as data is synced out).
*
* The limit on dirty data is tunable, and should be adjusted according to
* both the IO capacity and available memory of the system. The larger the
* window, the more ZFS is able to aggregate and amortize metadata (and data)
* changes. However, memory is a limited resource, and allowing for more dirty
* data comes at the cost of keeping other useful data in memory (for example
* ZFS data cached by the ARC).
*
* Implementation
*
* As buffers are modified dsl_pool_willuse_space() increments both the per-
* txg (dp_dirty_pertxg[]) and poolwide (dp_dirty_total) accounting of
* dirty space used; dsl_pool_dirty_space() decrements those values as data
* is synced out from dsl_pool_sync(). While only the poolwide value is
* relevant, the per-txg value is useful for debugging. The tunable
* zfs_dirty_data_max determines the dirty space limit. Once that value is
* exceeded, new writes are halted until space frees up.
*
* The zfs_dirty_data_sync tunable dictates the threshold at which we
* ensure that there is a txg syncing (see the comment in txg.c for a full
* description of transaction group stages).
*
* The IO scheduler uses both the dirty space limit and current amount of
* dirty data as inputs. Those values affect the number of concurrent IOs ZFS
* issues. See the comment in vdev_queue.c for details of the IO scheduler.
*
* The delay is also calculated based on the amount of dirty data. See the
* comment above dmu_tx_delay() for details.
*/
/*
* zfs_dirty_data_max will be set to zfs_dirty_data_max_percent% of all memory,
* capped at zfs_dirty_data_max_max. It can also be overridden with a module
* parameter.
*/
unsigned long zfs_dirty_data_max = 0;
unsigned long zfs_dirty_data_max_max = 0;
int zfs_dirty_data_max_percent = 10;
int zfs_dirty_data_max_max_percent = 25;
/*
* If there is at least this much dirty data, push out a txg.
*/
unsigned long zfs_dirty_data_sync = 64 * 1024 * 1024;
/*
* Once there is this amount of dirty data, the dmu_tx_delay() will kick in
* and delay each transaction.
* This value should be >= zfs_vdev_async_write_active_max_dirty_percent.
*/
int zfs_delay_min_dirty_percent = 60;
/*
* This controls how quickly the delay approaches infinity.
* Larger values cause it to delay more for a given amount of dirty data.
* Therefore larger values will cause there to be less dirty data for a
* given throughput.
*
* For the smoothest delay, this value should be about 1 billion divided
* by the maximum number of operations per second. This will smoothly
* handle between 10x and 1/10th this number.
*
* Note: zfs_delay_scale * zfs_dirty_data_max must be < 2^64, due to the
* multiply in dmu_tx_delay().
*/
unsigned long zfs_delay_scale = 1000 * 1000 * 1000 / 2000;
/*
* This determines the number of threads used by the dp_sync_taskq.
*/
int zfs_sync_taskq_batch_pct = 75;
/*
* These tunables determine the behavior of how zil_itxg_clean() is
* called via zil_clean() in the context of spa_sync(). When an itxg
* list needs to be cleaned, TQ_NOSLEEP will be used when dispatching.
* If the dispatch fails, the call to zil_itxg_clean() will occur
* synchronously in the context of spa_sync(), which can negatively
* impact the performance of spa_sync() (e.g. in the case of the itxg
* list having a large number of itxs that needs to be cleaned).
*
* Thus, these tunables can be used to manipulate the behavior of the
* taskq used by zil_clean(); they determine the number of taskq entries
* that are pre-populated when the taskq is first created (via the
* "zfs_zil_clean_taskq_minalloc" tunable) and the maximum number of
* taskq entries that are cached after an on-demand allocation (via the
* "zfs_zil_clean_taskq_maxalloc").
*
* The idea being, we want to try reasonably hard to ensure there will
* already be a taskq entry pre-allocated by the time that it is needed
* by zil_clean(). This way, we can avoid the possibility of an
* on-demand allocation of a new taskq entry from failing, which would
* result in zil_itxg_clean() being called synchronously from zil_clean()
* (which can adversely affect performance of spa_sync()).
*
* Additionally, the number of threads used by the taskq can be
* configured via the "zfs_zil_clean_taskq_nthr_pct" tunable.
*/
int zfs_zil_clean_taskq_nthr_pct = 100;
int zfs_zil_clean_taskq_minalloc = 1024;
int zfs_zil_clean_taskq_maxalloc = 1024 * 1024;
int
dsl_pool_open_special_dir(dsl_pool_t *dp, const char *name, dsl_dir_t **ddp)
{
uint64_t obj;
int err;
err = zap_lookup(dp->dp_meta_objset,
dsl_dir_phys(dp->dp_root_dir)->dd_child_dir_zapobj,
name, sizeof (obj), 1, &obj);
if (err)
return (err);
return (dsl_dir_hold_obj(dp, obj, name, dp, ddp));
}
static dsl_pool_t *
dsl_pool_open_impl(spa_t *spa, uint64_t txg)
{
dsl_pool_t *dp;
blkptr_t *bp = spa_get_rootblkptr(spa);
dp = kmem_zalloc(sizeof (dsl_pool_t), KM_SLEEP);
dp->dp_spa = spa;
dp->dp_meta_rootbp = *bp;
rrw_init(&dp->dp_config_rwlock, B_TRUE);
txg_init(dp, txg);
mmp_init(spa);
txg_list_create(&dp->dp_dirty_datasets, spa,
offsetof(dsl_dataset_t, ds_dirty_link));
txg_list_create(&dp->dp_dirty_zilogs, spa,
offsetof(zilog_t, zl_dirty_link));
txg_list_create(&dp->dp_dirty_dirs, spa,
offsetof(dsl_dir_t, dd_dirty_link));
txg_list_create(&dp->dp_sync_tasks, spa,
offsetof(dsl_sync_task_t, dst_node));
dp->dp_sync_taskq = taskq_create("dp_sync_taskq",
zfs_sync_taskq_batch_pct, minclsyspri, 1, INT_MAX,
TASKQ_THREADS_CPU_PCT);
dp->dp_zil_clean_taskq = taskq_create("dp_zil_clean_taskq",
zfs_zil_clean_taskq_nthr_pct, minclsyspri,
zfs_zil_clean_taskq_minalloc,
zfs_zil_clean_taskq_maxalloc,
TASKQ_PREPOPULATE | TASKQ_THREADS_CPU_PCT);
mutex_init(&dp->dp_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&dp->dp_spaceavail_cv, NULL, CV_DEFAULT, NULL);
dp->dp_iput_taskq = taskq_create("z_iput", max_ncpus, defclsyspri,
max_ncpus * 8, INT_MAX, TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
return (dp);
}
int
dsl_pool_init(spa_t *spa, uint64_t txg, dsl_pool_t **dpp)
{
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
/*
* Initialize the caller's dsl_pool_t structure before we actually open
* the meta objset. This is done because a self-healing write zio may
* be issued as part of dmu_objset_open_impl() and the spa needs its
* dsl_pool_t initialized in order to handle the write.
*/
*dpp = dp;
err = dmu_objset_open_impl(spa, NULL, &dp->dp_meta_rootbp,
&dp->dp_meta_objset);
if (err != 0) {
dsl_pool_close(dp);
*dpp = NULL;
}
return (err);
}
int
dsl_pool_open(dsl_pool_t *dp)
{
int err;
dsl_dir_t *dd;
dsl_dataset_t *ds;
uint64_t obj;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_ROOT_DATASET, sizeof (uint64_t), 1,
&dp->dp_root_dir_obj);
if (err)
goto out;
err = dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
NULL, dp, &dp->dp_root_dir);
if (err)
goto out;
err = dsl_pool_open_special_dir(dp, MOS_DIR_NAME, &dp->dp_mos_dir);
if (err)
goto out;
if (spa_version(dp->dp_spa) >= SPA_VERSION_ORIGIN) {
err = dsl_pool_open_special_dir(dp, ORIGIN_DIR_NAME, &dd);
if (err)
goto out;
err = dsl_dataset_hold_obj(dp,
dsl_dir_phys(dd)->dd_head_dataset_obj, FTAG, &ds);
if (err == 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, dp,
&dp->dp_origin_snap);
dsl_dataset_rele(ds, FTAG);
}
dsl_dir_rele(dd, dp);
if (err)
goto out;
}
if (spa_version(dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
err = dsl_pool_open_special_dir(dp, FREE_DIR_NAME,
&dp->dp_free_dir);
if (err)
goto out;
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj);
if (err)
goto out;
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
+ if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
+ err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj);
+ if (err == 0) {
+ VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj,
+ dp->dp_meta_objset, obj));
+ } else if (err == ENOENT) {
+ /*
+ * We might not have created the remap bpobj yet.
+ */
+ err = 0;
+ } else {
+ goto out;
+ }
+ }
+
/*
- * Note: errors ignored, because the leak dir will not exist if we
- * have not encountered a leak yet.
+ * Note: errors ignored, because the these special dirs, used for
+ * space accounting, are only created on demand.
*/
(void) dsl_pool_open_special_dir(dp, LEAK_DIR_NAME,
&dp->dp_leak_dir);
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_ASYNC_DESTROY)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, sizeof (uint64_t), 1,
&dp->dp_bptree_obj);
if (err != 0)
goto out;
}
if (spa_feature_is_active(dp->dp_spa, SPA_FEATURE_EMPTY_BPOBJ)) {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_EMPTY_BPOBJ, sizeof (uint64_t), 1,
&dp->dp_empty_bpobj);
if (err != 0)
goto out;
}
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_TMP_USERREFS, sizeof (uint64_t), 1,
&dp->dp_tmp_userrefs_obj);
if (err == ENOENT)
err = 0;
if (err)
goto out;
err = dsl_scan_init(dp, dp->dp_tx.tx_open_txg);
out:
rrw_exit(&dp->dp_config_rwlock, FTAG);
return (err);
}
void
dsl_pool_close(dsl_pool_t *dp)
{
/*
* Drop our references from dsl_pool_open().
*
* Since we held the origin_snap from "syncing" context (which
* includes pool-opening context), it actually only got a "ref"
* and not a hold, so just drop that here.
*/
- if (dp->dp_origin_snap)
+ if (dp->dp_origin_snap != NULL)
dsl_dataset_rele(dp->dp_origin_snap, dp);
- if (dp->dp_mos_dir)
+ if (dp->dp_mos_dir != NULL)
dsl_dir_rele(dp->dp_mos_dir, dp);
- if (dp->dp_free_dir)
+ if (dp->dp_free_dir != NULL)
dsl_dir_rele(dp->dp_free_dir, dp);
- if (dp->dp_leak_dir)
+ if (dp->dp_leak_dir != NULL)
dsl_dir_rele(dp->dp_leak_dir, dp);
- if (dp->dp_root_dir)
+ if (dp->dp_root_dir != NULL)
dsl_dir_rele(dp->dp_root_dir, dp);
bpobj_close(&dp->dp_free_bpobj);
+ bpobj_close(&dp->dp_obsolete_bpobj);
/* undo the dmu_objset_open_impl(mos) from dsl_pool_open() */
- if (dp->dp_meta_objset)
+ if (dp->dp_meta_objset != NULL)
dmu_objset_evict(dp->dp_meta_objset);
txg_list_destroy(&dp->dp_dirty_datasets);
txg_list_destroy(&dp->dp_dirty_zilogs);
txg_list_destroy(&dp->dp_sync_tasks);
txg_list_destroy(&dp->dp_dirty_dirs);
taskq_destroy(dp->dp_zil_clean_taskq);
taskq_destroy(dp->dp_sync_taskq);
/*
* We can't set retry to TRUE since we're explicitly specifying
* a spa to flush. This is good enough; any missed buffers for
* this spa won't cause trouble, and they'll eventually fall
* out of the ARC just like any other unused buffer.
*/
arc_flush(dp->dp_spa, FALSE);
mmp_fini(dp->dp_spa);
txg_fini(dp);
dsl_scan_fini(dp);
dmu_buf_user_evict_wait();
rrw_destroy(&dp->dp_config_rwlock);
mutex_destroy(&dp->dp_lock);
cv_destroy(&dp->dp_spaceavail_cv);
taskq_destroy(dp->dp_iput_taskq);
- if (dp->dp_blkstats) {
+ if (dp->dp_blkstats != NULL) {
mutex_destroy(&dp->dp_blkstats->zab_lock);
vmem_free(dp->dp_blkstats, sizeof (zfs_all_blkstats_t));
}
kmem_free(dp, sizeof (dsl_pool_t));
}
+void
+dsl_pool_create_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
+{
+ uint64_t obj;
+ /*
+ * Currently, we only create the obsolete_bpobj where there are
+ * indirect vdevs with referenced mappings.
+ */
+ ASSERT(spa_feature_is_active(dp->dp_spa, SPA_FEATURE_DEVICE_REMOVAL));
+ /* create and open the obsolete_bpobj */
+ obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
+ VERIFY0(bpobj_open(&dp->dp_obsolete_bpobj, dp->dp_meta_objset, obj));
+ VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_OBSOLETE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
+ spa_feature_incr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+}
+
+void
+dsl_pool_destroy_obsolete_bpobj(dsl_pool_t *dp, dmu_tx_t *tx)
+{
+ spa_feature_decr(dp->dp_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ VERIFY0(zap_remove(dp->dp_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_OBSOLETE_BPOBJ, tx));
+ bpobj_free(dp->dp_meta_objset,
+ dp->dp_obsolete_bpobj.bpo_object, tx);
+ bpobj_close(&dp->dp_obsolete_bpobj);
+}
+
dsl_pool_t *
dsl_pool_create(spa_t *spa, nvlist_t *zplprops, dsl_crypto_params_t *dcp,
uint64_t txg)
{
int err;
dsl_pool_t *dp = dsl_pool_open_impl(spa, txg);
dmu_tx_t *tx = dmu_tx_create_assigned(dp, txg);
dsl_dataset_t *ds;
uint64_t obj;
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
/* create and open the MOS (meta-objset) */
dp->dp_meta_objset = dmu_objset_create_impl(spa,
NULL, &dp->dp_meta_rootbp, DMU_OST_META, tx);
spa->spa_meta_objset = dp->dp_meta_objset;
/* create the pool directory */
err = zap_create_claim(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_OT_OBJECT_DIRECTORY, DMU_OT_NONE, 0, tx);
ASSERT0(err);
/* Initialize scan structures */
VERIFY0(dsl_scan_init(dp, txg));
/* create and open the root dir */
dp->dp_root_dir_obj = dsl_dir_create_sync(dp, NULL, NULL, tx);
VERIFY0(dsl_dir_hold_obj(dp, dp->dp_root_dir_obj,
NULL, dp, &dp->dp_root_dir));
/* create and open the meta-objset dir */
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, MOS_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
MOS_DIR_NAME, &dp->dp_mos_dir));
if (spa_version(spa) >= SPA_VERSION_DEADLISTS) {
/* create and open the free dir */
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
FREE_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
FREE_DIR_NAME, &dp->dp_free_dir));
/* create and open the free_bplist */
obj = bpobj_alloc(dp->dp_meta_objset, SPA_OLD_MAXBLOCKSIZE, tx);
VERIFY(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx) == 0);
VERIFY0(bpobj_open(&dp->dp_free_bpobj,
dp->dp_meta_objset, obj));
}
if (spa_version(spa) >= SPA_VERSION_DSL_SCRUB)
dsl_pool_create_origin(dp, tx);
/*
* Some features may be needed when creating the root dataset, so we
* create the feature objects here.
*/
if (spa_version(spa) >= SPA_VERSION_FEATURES)
spa_feature_create_zap_objects(spa, tx);
if (dcp != NULL && dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT)
spa_feature_enable(spa, SPA_FEATURE_ENCRYPTION, tx);
/* create the root dataset */
obj = dsl_dataset_create_sync_dd(dp->dp_root_dir, NULL, dcp, 0, tx);
/* create the root objset */
VERIFY0(dsl_dataset_hold_obj(dp, obj, FTAG, &ds));
#ifdef _KERNEL
{
objset_t *os;
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
os = dmu_objset_create_impl(dp->dp_spa, ds,
dsl_dataset_get_blkptr(ds), DMU_OST_ZFS, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
zfs_create_fs(os, kcred, zplprops, tx);
}
#endif
dsl_dataset_rele(ds, FTAG);
dmu_tx_commit(tx);
rrw_exit(&dp->dp_config_rwlock, FTAG);
return (dp);
}
/*
* Account for the meta-objset space in its placeholder dsl_dir.
*/
void
dsl_pool_mos_diduse_space(dsl_pool_t *dp,
int64_t used, int64_t comp, int64_t uncomp)
{
ASSERT3U(comp, ==, uncomp); /* it's all metadata */
mutex_enter(&dp->dp_lock);
dp->dp_mos_used_delta += used;
dp->dp_mos_compressed_delta += comp;
dp->dp_mos_uncompressed_delta += uncomp;
mutex_exit(&dp->dp_lock);
}
static void
dsl_pool_sync_mos(dsl_pool_t *dp, dmu_tx_t *tx)
{
zio_t *zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
dmu_objset_sync(dp->dp_meta_objset, zio, tx);
VERIFY0(zio_wait(zio));
dprintf_bp(&dp->dp_meta_rootbp, "meta objset rootbp is %s", "");
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
}
static void
dsl_pool_dirty_delta(dsl_pool_t *dp, int64_t delta)
{
ASSERT(MUTEX_HELD(&dp->dp_lock));
if (delta < 0)
ASSERT3U(-delta, <=, dp->dp_dirty_total);
dp->dp_dirty_total += delta;
/*
* Note: we signal even when increasing dp_dirty_total.
* This ensures forward progress -- each thread wakes the next waiter.
*/
if (dp->dp_dirty_total < zfs_dirty_data_max)
cv_signal(&dp->dp_spaceavail_cv);
}
void
dsl_pool_sync(dsl_pool_t *dp, uint64_t txg)
{
zio_t *zio;
dmu_tx_t *tx;
dsl_dir_t *dd;
dsl_dataset_t *ds;
objset_t *mos = dp->dp_meta_objset;
list_t synced_datasets;
list_create(&synced_datasets, sizeof (dsl_dataset_t),
offsetof(dsl_dataset_t, ds_synced_link));
tx = dmu_tx_create_assigned(dp, txg);
/*
* Write out all dirty blocks of dirty datasets.
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
/*
* We must not sync any non-MOS datasets twice, because
* we may have taken a snapshot of them. However, we
* may sync newly-created datasets on pass 2.
*/
ASSERT(!list_link_active(&ds->ds_synced_link));
list_insert_tail(&synced_datasets, ds);
dsl_dataset_sync(ds, zio, tx);
}
VERIFY0(zio_wait(zio));
/*
* We have written all of the accounted dirty data, so our
* dp_space_towrite should now be zero. However, some seldom-used
* code paths do not adhere to this (e.g. dbuf_undirty(), also
* rounding error in dbuf_write_physdone).
* Shore up the accounting of any dirtied space now.
*/
dsl_pool_undirty_space(dp, dp->dp_dirty_pertxg[txg & TXG_MASK], txg);
/*
* Update the long range free counter after
* we're done syncing user data
*/
mutex_enter(&dp->dp_lock);
ASSERT(spa_sync_pass(dp->dp_spa) == 1 ||
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] == 0);
dp->dp_long_free_dirty_pertxg[txg & TXG_MASK] = 0;
mutex_exit(&dp->dp_lock);
/*
* After the data blocks have been written (ensured by the zio_wait()
* above), update the user/group/project space accounting. This happens
* in tasks dispatched to dp_sync_taskq, so wait for them before
* continuing.
*/
for (ds = list_head(&synced_datasets); ds != NULL;
ds = list_next(&synced_datasets, ds)) {
dmu_objset_do_userquota_updates(ds->ds_objset, tx);
}
taskq_wait(dp->dp_sync_taskq);
/*
* Sync the datasets again to push out the changes due to
* userspace updates. This must be done before we process the
* sync tasks, so that any snapshots will have the correct
* user accounting information (and we won't get confused
* about which blocks are part of the snapshot).
*/
zio = zio_root(dp->dp_spa, NULL, NULL, ZIO_FLAG_MUSTSUCCEED);
while ((ds = txg_list_remove(&dp->dp_dirty_datasets, txg)) != NULL) {
ASSERT(list_link_active(&ds->ds_synced_link));
dmu_buf_rele(ds->ds_dbuf, ds);
dsl_dataset_sync(ds, zio, tx);
}
VERIFY0(zio_wait(zio));
/*
* Now that the datasets have been completely synced, we can
* clean up our in-memory structures accumulated while syncing:
*
* - move dead blocks from the pending deadlist to the on-disk deadlist
* - release hold from dsl_dataset_dirty()
*/
while ((ds = list_remove_head(&synced_datasets)) != NULL) {
dsl_dataset_sync_done(ds, tx);
}
while ((dd = txg_list_remove(&dp->dp_dirty_dirs, txg)) != NULL) {
dsl_dir_sync(dd, tx);
}
/*
* The MOS's space is accounted for in the pool/$MOS
* (dp_mos_dir). We can't modify the mos while we're syncing
* it, so we remember the deltas and apply them here.
*/
if (dp->dp_mos_used_delta != 0 || dp->dp_mos_compressed_delta != 0 ||
dp->dp_mos_uncompressed_delta != 0) {
dsl_dir_diduse_space(dp->dp_mos_dir, DD_USED_HEAD,
dp->dp_mos_used_delta,
dp->dp_mos_compressed_delta,
dp->dp_mos_uncompressed_delta, tx);
dp->dp_mos_used_delta = 0;
dp->dp_mos_compressed_delta = 0;
dp->dp_mos_uncompressed_delta = 0;
}
if (!multilist_is_empty(mos->os_dirty_dnodes[txg & TXG_MASK])) {
dsl_pool_sync_mos(dp, tx);
}
/*
* If we modify a dataset in the same txg that we want to destroy it,
* its dsl_dir's dd_dbuf will be dirty, and thus have a hold on it.
* dsl_dir_destroy_check() will fail if there are unexpected holds.
* Therefore, we want to sync the MOS (thus syncing the dd_dbuf
* and clearing the hold on it) before we process the sync_tasks.
* The MOS data dirtied by the sync_tasks will be synced on the next
* pass.
*/
if (!txg_list_empty(&dp->dp_sync_tasks, txg)) {
dsl_sync_task_t *dst;
/*
* No more sync tasks should have been added while we
* were syncing.
*/
ASSERT3U(spa_sync_pass(dp->dp_spa), ==, 1);
while ((dst = txg_list_remove(&dp->dp_sync_tasks, txg)) != NULL)
dsl_sync_task_sync(dst, tx);
}
dmu_tx_commit(tx);
DTRACE_PROBE2(dsl_pool_sync__done, dsl_pool_t *dp, dp, uint64_t, txg);
}
void
dsl_pool_sync_done(dsl_pool_t *dp, uint64_t txg)
{
zilog_t *zilog;
while ((zilog = txg_list_head(&dp->dp_dirty_zilogs, txg))) {
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
/*
* We don't remove the zilog from the dp_dirty_zilogs
* list until after we've cleaned it. This ensures that
* callers of zilog_is_dirty() receive an accurate
* answer when they are racing with the spa sync thread.
*/
zil_clean(zilog, txg);
(void) txg_list_remove_this(&dp->dp_dirty_zilogs, zilog, txg);
ASSERT(!dmu_objset_is_dirty(zilog->zl_os, txg));
dmu_buf_rele(ds->ds_dbuf, zilog);
}
ASSERT(!dmu_objset_is_dirty(dp->dp_meta_objset, txg));
}
/*
* TRUE if the current thread is the tx_sync_thread or if we
* are being called from SPA context during pool initialization.
*/
int
dsl_pool_sync_context(dsl_pool_t *dp)
{
return (curthread == dp->dp_tx.tx_sync_thread ||
spa_is_initializing(dp->dp_spa) ||
taskq_member(dp->dp_sync_taskq, curthread));
}
uint64_t
dsl_pool_adjustedsize(dsl_pool_t *dp, boolean_t netfree)
{
uint64_t space, resv;
/*
* If we're trying to assess whether it's OK to do a free,
* cut the reservation in half to allow forward progress
* (e.g. make it possible to rm(1) files from a full pool).
*/
space = spa_get_dspace(dp->dp_spa);
resv = spa_get_slop_space(dp->dp_spa);
if (netfree)
resv >>= 1;
return (space - resv);
}
boolean_t
dsl_pool_need_dirty_delay(dsl_pool_t *dp)
{
uint64_t delay_min_bytes =
zfs_dirty_data_max * zfs_delay_min_dirty_percent / 100;
boolean_t rv;
mutex_enter(&dp->dp_lock);
if (dp->dp_dirty_total > zfs_dirty_data_sync)
txg_kick(dp);
rv = (dp->dp_dirty_total > delay_min_bytes);
mutex_exit(&dp->dp_lock);
return (rv);
}
void
dsl_pool_dirty_space(dsl_pool_t *dp, int64_t space, dmu_tx_t *tx)
{
if (space > 0) {
mutex_enter(&dp->dp_lock);
dp->dp_dirty_pertxg[tx->tx_txg & TXG_MASK] += space;
dsl_pool_dirty_delta(dp, space);
mutex_exit(&dp->dp_lock);
}
}
void
dsl_pool_undirty_space(dsl_pool_t *dp, int64_t space, uint64_t txg)
{
ASSERT3S(space, >=, 0);
if (space == 0)
return;
mutex_enter(&dp->dp_lock);
if (dp->dp_dirty_pertxg[txg & TXG_MASK] < space) {
/* XXX writing something we didn't dirty? */
space = dp->dp_dirty_pertxg[txg & TXG_MASK];
}
ASSERT3U(dp->dp_dirty_pertxg[txg & TXG_MASK], >=, space);
dp->dp_dirty_pertxg[txg & TXG_MASK] -= space;
ASSERT3U(dp->dp_dirty_total, >=, space);
dsl_pool_dirty_delta(dp, -space);
mutex_exit(&dp->dp_lock);
}
/* ARGSUSED */
static int
upgrade_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
dmu_tx_t *tx = arg;
dsl_dataset_t *ds, *prev = NULL;
int err;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object)
break;
dsl_dataset_rele(ds, FTAG);
ds = prev;
prev = NULL;
}
if (prev == NULL) {
prev = dp->dp_origin_snap;
/*
* The $ORIGIN can't have any data, or the accounting
* will be wrong.
*/
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
ASSERT0(dsl_dataset_phys(prev)->ds_bp.blk_birth);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
/* The origin doesn't get attached to itself */
if (ds->ds_object == prev->ds_object) {
dsl_dataset_rele(ds, FTAG);
return (0);
}
dmu_buf_will_dirty(ds->ds_dbuf, tx);
dsl_dataset_phys(ds)->ds_prev_snap_obj = prev->ds_object;
dsl_dataset_phys(ds)->ds_prev_snap_txg =
dsl_dataset_phys(prev)->ds_creation_txg;
dmu_buf_will_dirty(ds->ds_dir->dd_dbuf, tx);
dsl_dir_phys(ds->ds_dir)->dd_origin_obj = prev->ds_object;
dmu_buf_will_dirty(prev->ds_dbuf, tx);
dsl_dataset_phys(prev)->ds_num_children++;
if (dsl_dataset_phys(ds)->ds_next_snap_obj == 0) {
ASSERT(ds->ds_prev == NULL);
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj,
ds, &ds->ds_prev));
}
}
ASSERT3U(dsl_dir_phys(ds->ds_dir)->dd_origin_obj, ==, prev->ds_object);
ASSERT3U(dsl_dataset_phys(ds)->ds_prev_snap_obj, ==, prev->ds_object);
if (dsl_dataset_phys(prev)->ds_next_clones_obj == 0) {
dmu_buf_will_dirty(prev->ds_dbuf, tx);
dsl_dataset_phys(prev)->ds_next_clones_obj =
zap_create(dp->dp_meta_objset,
DMU_OT_NEXT_CLONES, DMU_OT_NONE, 0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dataset_phys(prev)->ds_next_clones_obj, ds->ds_object, tx));
dsl_dataset_rele(ds, FTAG);
if (prev != dp->dp_origin_snap)
dsl_dataset_rele(prev, FTAG);
return (0);
}
void
dsl_pool_upgrade_clones(dsl_pool_t *dp, dmu_tx_t *tx)
{
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dp->dp_origin_snap != NULL);
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj, upgrade_clones_cb,
tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
}
/* ARGSUSED */
static int
upgrade_dir_clones_cb(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
dmu_tx_t *tx = arg;
objset_t *mos = dp->dp_meta_objset;
if (dsl_dir_phys(ds->ds_dir)->dd_origin_obj != 0) {
dsl_dataset_t *origin;
VERIFY0(dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &origin));
if (dsl_dir_phys(origin->ds_dir)->dd_clones == 0) {
dmu_buf_will_dirty(origin->ds_dir->dd_dbuf, tx);
dsl_dir_phys(origin->ds_dir)->dd_clones =
zap_create(mos, DMU_OT_DSL_CLONES, DMU_OT_NONE,
0, tx);
}
VERIFY0(zap_add_int(dp->dp_meta_objset,
dsl_dir_phys(origin->ds_dir)->dd_clones,
ds->ds_object, tx));
dsl_dataset_rele(origin, FTAG);
}
return (0);
}
void
dsl_pool_upgrade_dir_clones(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t obj;
ASSERT(dmu_tx_is_syncing(tx));
(void) dsl_dir_create_sync(dp, dp->dp_root_dir, FREE_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
FREE_DIR_NAME, &dp->dp_free_dir));
/*
* We can't use bpobj_alloc(), because spa_version() still
* returns the old version, and we need a new-version bpobj with
* subobj support. So call dmu_object_alloc() directly.
*/
obj = dmu_object_alloc(dp->dp_meta_objset, DMU_OT_BPOBJ,
SPA_OLD_MAXBLOCKSIZE, DMU_OT_BPOBJ_HDR, sizeof (bpobj_phys_t), tx);
VERIFY0(zap_add(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_FREE_BPOBJ, sizeof (uint64_t), 1, &obj, tx));
VERIFY0(bpobj_open(&dp->dp_free_bpobj, dp->dp_meta_objset, obj));
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
upgrade_dir_clones_cb, tx, DS_FIND_CHILDREN | DS_FIND_SERIALIZE));
}
void
dsl_pool_create_origin(dsl_pool_t *dp, dmu_tx_t *tx)
{
uint64_t dsobj;
dsl_dataset_t *ds;
ASSERT(dmu_tx_is_syncing(tx));
ASSERT(dp->dp_origin_snap == NULL);
ASSERT(rrw_held(&dp->dp_config_rwlock, RW_WRITER));
/* create the origin dir, ds, & snap-ds */
dsobj = dsl_dataset_create_sync(dp->dp_root_dir, ORIGIN_DIR_NAME,
NULL, 0, kcred, NULL, tx);
VERIFY0(dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
dsl_dataset_snapshot_sync_impl(ds, ORIGIN_DIR_NAME, tx);
VERIFY0(dsl_dataset_hold_obj(dp, dsl_dataset_phys(ds)->ds_prev_snap_obj,
dp, &dp->dp_origin_snap));
dsl_dataset_rele(ds, FTAG);
}
taskq_t *
dsl_pool_iput_taskq(dsl_pool_t *dp)
{
return (dp->dp_iput_taskq);
}
/*
* Walk through the pool-wide zap object of temporary snapshot user holds
* and release them.
*/
void
dsl_pool_clean_tmp_userrefs(dsl_pool_t *dp)
{
zap_attribute_t za;
zap_cursor_t zc;
objset_t *mos = dp->dp_meta_objset;
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
nvlist_t *holds;
if (zapobj == 0)
return;
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
holds = fnvlist_alloc();
for (zap_cursor_init(&zc, mos, zapobj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
char *htag;
nvlist_t *tags;
htag = strchr(za.za_name, '-');
*htag = '\0';
++htag;
if (nvlist_lookup_nvlist(holds, za.za_name, &tags) != 0) {
tags = fnvlist_alloc();
fnvlist_add_boolean(tags, htag);
fnvlist_add_nvlist(holds, za.za_name, tags);
fnvlist_free(tags);
} else {
fnvlist_add_boolean(tags, htag);
}
}
dsl_dataset_user_release_tmp(dp, holds);
fnvlist_free(holds);
zap_cursor_fini(&zc);
}
/*
* Create the pool-wide zap object for storing temporary snapshot holds.
*/
void
dsl_pool_user_hold_create_obj(dsl_pool_t *dp, dmu_tx_t *tx)
{
objset_t *mos = dp->dp_meta_objset;
ASSERT(dp->dp_tmp_userrefs_obj == 0);
ASSERT(dmu_tx_is_syncing(tx));
dp->dp_tmp_userrefs_obj = zap_create_link(mos, DMU_OT_USERREFS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_TMP_USERREFS, tx);
}
static int
dsl_pool_user_hold_rele_impl(dsl_pool_t *dp, uint64_t dsobj,
const char *tag, uint64_t now, dmu_tx_t *tx, boolean_t holding)
{
objset_t *mos = dp->dp_meta_objset;
uint64_t zapobj = dp->dp_tmp_userrefs_obj;
char *name;
int error;
ASSERT(spa_version(dp->dp_spa) >= SPA_VERSION_USERREFS);
ASSERT(dmu_tx_is_syncing(tx));
/*
* If the pool was created prior to SPA_VERSION_USERREFS, the
* zap object for temporary holds might not exist yet.
*/
if (zapobj == 0) {
if (holding) {
dsl_pool_user_hold_create_obj(dp, tx);
zapobj = dp->dp_tmp_userrefs_obj;
} else {
return (SET_ERROR(ENOENT));
}
}
name = kmem_asprintf("%llx-%s", (u_longlong_t)dsobj, tag);
if (holding)
error = zap_add(mos, zapobj, name, 8, 1, &now, tx);
else
error = zap_remove(mos, zapobj, name, tx);
strfree(name);
return (error);
}
/*
* Add a temporary hold for the given dataset object and tag.
*/
int
dsl_pool_user_hold(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
uint64_t now, dmu_tx_t *tx)
{
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, now, tx, B_TRUE));
}
/*
* Release a temporary hold for the given dataset object and tag.
*/
int
dsl_pool_user_release(dsl_pool_t *dp, uint64_t dsobj, const char *tag,
dmu_tx_t *tx)
{
return (dsl_pool_user_hold_rele_impl(dp, dsobj, tag, 0,
tx, B_FALSE));
}
/*
* DSL Pool Configuration Lock
*
* The dp_config_rwlock protects against changes to DSL state (e.g. dataset
* creation / destruction / rename / property setting). It must be held for
* read to hold a dataset or dsl_dir. I.e. you must call
* dsl_pool_config_enter() or dsl_pool_hold() before calling
* dsl_{dataset,dir}_hold{_obj}. In most circumstances, the dp_config_rwlock
* must be held continuously until all datasets and dsl_dirs are released.
*
* The only exception to this rule is that if a "long hold" is placed on
* a dataset, then the dp_config_rwlock may be dropped while the dataset
* is still held. The long hold will prevent the dataset from being
* destroyed -- the destroy will fail with EBUSY. A long hold can be
* obtained by calling dsl_dataset_long_hold(), or by "owning" a dataset
* (by calling dsl_{dataset,objset}_{try}own{_obj}).
*
* Legitimate long-holders (including owners) should be long-running, cancelable
* tasks that should cause "zfs destroy" to fail. This includes DMU
* consumers (i.e. a ZPL filesystem being mounted or ZVOL being open),
* "zfs send", and "zfs diff". There are several other long-holders whose
* uses are suboptimal (e.g. "zfs promote", and zil_suspend()).
*
* The usual formula for long-holding would be:
* dsl_pool_hold()
* dsl_dataset_hold()
* ... perform checks ...
* dsl_dataset_long_hold()
* dsl_pool_rele()
* ... perform long-running task ...
* dsl_dataset_long_rele()
* dsl_dataset_rele()
*
* Note that when the long hold is released, the dataset is still held but
* the pool is not held. The dataset may change arbitrarily during this time
* (e.g. it could be destroyed). Therefore you shouldn't do anything to the
* dataset except release it.
*
* User-initiated operations (e.g. ioctls, zfs_ioc_*()) are either read-only
* or modifying operations.
*
* Modifying operations should generally use dsl_sync_task(). The synctask
* infrastructure enforces proper locking strategy with respect to the
* dp_config_rwlock. See the comment above dsl_sync_task() for details.
*
* Read-only operations will manually hold the pool, then the dataset, obtain
* information from the dataset, then release the pool and dataset.
* dmu_objset_{hold,rele}() are convenience routines that also do the pool
* hold/rele.
*/
int
dsl_pool_hold(const char *name, void *tag, dsl_pool_t **dp)
{
spa_t *spa;
int error;
error = spa_open(name, &spa, tag);
if (error == 0) {
*dp = spa_get_dsl(spa);
dsl_pool_config_enter(*dp, tag);
}
return (error);
}
void
dsl_pool_rele(dsl_pool_t *dp, void *tag)
{
dsl_pool_config_exit(dp, tag);
spa_close(dp->dp_spa, tag);
}
void
dsl_pool_config_enter(dsl_pool_t *dp, void *tag)
{
/*
* We use a "reentrant" reader-writer lock, but not reentrantly.
*
* The rrwlock can (with the track_all flag) track all reading threads,
* which is very useful for debugging which code path failed to release
* the lock, and for verifying that the *current* thread does hold
* the lock.
*
* (Unlike a rwlock, which knows that N threads hold it for
* read, but not *which* threads, so rw_held(RW_READER) returns TRUE
* if any thread holds it for read, even if this thread doesn't).
*/
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter(&dp->dp_config_rwlock, RW_READER, tag);
}
void
dsl_pool_config_enter_prio(dsl_pool_t *dp, void *tag)
{
ASSERT(!rrw_held(&dp->dp_config_rwlock, RW_READER));
rrw_enter_read_prio(&dp->dp_config_rwlock, tag);
}
void
dsl_pool_config_exit(dsl_pool_t *dp, void *tag)
{
rrw_exit(&dp->dp_config_rwlock, tag);
}
boolean_t
dsl_pool_config_held(dsl_pool_t *dp)
{
return (RRW_LOCK_HELD(&dp->dp_config_rwlock));
}
boolean_t
dsl_pool_config_held_writer(dsl_pool_t *dp)
{
return (RRW_WRITE_HELD(&dp->dp_config_rwlock));
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(dsl_pool_config_enter);
EXPORT_SYMBOL(dsl_pool_config_exit);
/* BEGIN CSTYLED */
/* zfs_dirty_data_max_percent only applied at module load in arc_init(). */
module_param(zfs_dirty_data_max_percent, int, 0444);
MODULE_PARM_DESC(zfs_dirty_data_max_percent, "percent of ram can be dirty");
/* zfs_dirty_data_max_max_percent only applied at module load in arc_init(). */
module_param(zfs_dirty_data_max_max_percent, int, 0444);
MODULE_PARM_DESC(zfs_dirty_data_max_max_percent,
"zfs_dirty_data_max upper bound as % of RAM");
module_param(zfs_delay_min_dirty_percent, int, 0644);
MODULE_PARM_DESC(zfs_delay_min_dirty_percent, "transaction delay threshold");
module_param(zfs_dirty_data_max, ulong, 0644);
MODULE_PARM_DESC(zfs_dirty_data_max, "determines the dirty space limit");
/* zfs_dirty_data_max_max only applied at module load in arc_init(). */
module_param(zfs_dirty_data_max_max, ulong, 0444);
MODULE_PARM_DESC(zfs_dirty_data_max_max,
"zfs_dirty_data_max upper bound in bytes");
module_param(zfs_dirty_data_sync, ulong, 0644);
MODULE_PARM_DESC(zfs_dirty_data_sync, "sync txg when this much dirty data");
module_param(zfs_delay_scale, ulong, 0644);
MODULE_PARM_DESC(zfs_delay_scale, "how quickly delay approaches infinity");
module_param(zfs_sync_taskq_batch_pct, int, 0644);
MODULE_PARM_DESC(zfs_sync_taskq_batch_pct,
"max percent of CPUs that are used to sync dirty data");
module_param(zfs_zil_clean_taskq_nthr_pct, int, 0644);
MODULE_PARM_DESC(zfs_zil_clean_taskq_nthr_pct,
"max percent of CPUs that are used per dp_sync_taskq");
module_param(zfs_zil_clean_taskq_minalloc, int, 0644);
MODULE_PARM_DESC(zfs_zil_clean_taskq_minalloc,
"number of taskq entries that are pre-populated");
module_param(zfs_zil_clean_taskq_maxalloc, int, 0644);
MODULE_PARM_DESC(zfs_zil_clean_taskq_maxalloc,
"max number of taskq entries that are cached");
/* END CSTYLED */
#endif
diff --git a/module/zfs/dsl_scan.c b/module/zfs/dsl_scan.c
index 90534b4fa3b3..53953a6c5851 100644
--- a/module/zfs/dsl_scan.c
+++ b/module/zfs/dsl_scan.c
@@ -1,3921 +1,3962 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
- * Copyright (c) 2011, 2016 by Delphix. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright 2016 Gary Mills
* Copyright (c) 2017 Datto Inc.
* Copyright 2017 Joyent, Inc.
*/
#include <sys/dsl_scan.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_synctask.h>
#include <sys/dnode.h>
#include <sys/dmu_tx.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/zap.h>
#include <sys/zio.h>
#include <sys/zfs_context.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_znode.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zil_impl.h>
#include <sys/zio_checksum.h>
#include <sys/ddt.h>
#include <sys/sa.h>
#include <sys/sa_impl.h>
#include <sys/zfeature.h>
#include <sys/abd.h>
#include <sys/range_tree.h>
#ifdef _KERNEL
#include <sys/zfs_vfsops.h>
#endif
/*
* Grand theory statement on scan queue sorting
*
* Scanning is implemented by recursively traversing all indirection levels
* in an object and reading all blocks referenced from said objects. This
* results in us approximately traversing the object from lowest logical
* offset to the highest. For best performance, we would want the logical
* blocks to be physically contiguous. However, this is frequently not the
* case with pools given the allocation patterns of copy-on-write filesystems.
* So instead, we put the I/Os into a reordering queue and issue them in a
* way that will most benefit physical disks (LBA-order).
*
* Queue management:
*
* Ideally, we would want to scan all metadata and queue up all block I/O
* prior to starting to issue it, because that allows us to do an optimal
* sorting job. This can however consume large amounts of memory. Therefore
* we continuously monitor the size of the queues and constrain them to 5%
* (zfs_scan_mem_lim_fact) of physmem. If the queues grow larger than this
* limit, we clear out a few of the largest extents at the head of the queues
* to make room for more scanning. Hopefully, these extents will be fairly
* large and contiguous, allowing us to approach sequential I/O throughput
* even without a fully sorted tree.
*
* Metadata scanning takes place in dsl_scan_visit(), which is called from
* dsl_scan_sync() every spa_sync(). If we have either fully scanned all
* metadata on the pool, or we need to make room in memory because our
* queues are too large, dsl_scan_visit() is postponed and
* scan_io_queues_run() is called from dsl_scan_sync() instead. This implies
* that metadata scanning and queued I/O issuing are mutually exclusive. This
* allows us to provide maximum sequential I/O throughput for the majority of
* I/O's issued since sequential I/O performance is significantly negatively
* impacted if it is interleaved with random I/O.
*
* Implementation Notes
*
* One side effect of the queued scanning algorithm is that the scanning code
* needs to be notified whenever a block is freed. This is needed to allow
* the scanning code to remove these I/Os from the issuing queue. Additionally,
* we do not attempt to queue gang blocks to be issued sequentially since this
* is very hard to do and would have an extremely limited performance benefit.
* Instead, we simply issue gang I/Os as soon as we find them using the legacy
* algorithm.
*
* Backwards compatibility
*
* This new algorithm is backwards compatible with the legacy on-disk data
* structures (and therefore does not require a new feature flag).
* Periodically during scanning (see zfs_scan_checkpoint_intval), the scan
* will stop scanning metadata (in logical order) and wait for all outstanding
* sorted I/O to complete. Once this is done, we write out a checkpoint
* bookmark, indicating that we have scanned everything logically before it.
* If the pool is imported on a machine without the new sorting algorithm,
* the scan simply resumes from the last checkpoint using the legacy algorithm.
*/
typedef int (scan_cb_t)(dsl_pool_t *, const blkptr_t *,
const zbookmark_phys_t *);
static scan_cb_t dsl_scan_scrub_cb;
static int scan_ds_queue_compare(const void *a, const void *b);
static int scan_prefetch_queue_compare(const void *a, const void *b);
static void scan_ds_queue_clear(dsl_scan_t *scn);
static boolean_t scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj,
uint64_t *txg);
static void scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg);
static void scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj);
static void scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx);
static uint64_t dsl_scan_count_leaves(vdev_t *vd);
extern int zfs_vdev_async_write_active_min_dirty_percent;
/*
* By default zfs will check to ensure it is not over the hard memory
* limit before each txg. If finer-grained control of this is needed
* this value can be set to 1 to enable checking before scanning each
* block.
*/
int zfs_scan_strict_mem_lim = B_FALSE;
/*
* Maximum number of parallelly executed bytes per leaf vdev. We attempt
* to strike a balance here between keeping the vdev queues full of I/Os
* at all times and not overflowing the queues to cause long latency,
* which would cause long txg sync times. No matter what, we will not
* overload the drives with I/O, since that is protected by
* zfs_vdev_scrub_max_active.
*/
unsigned long zfs_scan_vdev_limit = 4 << 20;
int zfs_scan_issue_strategy = 0;
int zfs_scan_legacy = B_FALSE; /* don't queue & sort zios, go direct */
unsigned long zfs_scan_max_ext_gap = 2 << 20; /* in bytes */
/*
* fill_weight is non-tunable at runtime, so we copy it at module init from
* zfs_scan_fill_weight. Runtime adjustments to zfs_scan_fill_weight would
* break queue sorting.
*/
int zfs_scan_fill_weight = 3;
static uint64_t fill_weight;
/* See dsl_scan_should_clear() for details on the memory limit tunables */
uint64_t zfs_scan_mem_lim_min = 16 << 20; /* bytes */
uint64_t zfs_scan_mem_lim_soft_max = 128 << 20; /* bytes */
int zfs_scan_mem_lim_fact = 20; /* fraction of physmem */
int zfs_scan_mem_lim_soft_fact = 20; /* fraction of mem lim above */
int zfs_scrub_min_time_ms = 1000; /* min millisecs to scrub per txg */
+int zfs_obsolete_min_time_ms = 500; /* min millisecs to obsolete per txg */
int zfs_free_min_time_ms = 1000; /* min millisecs to free per txg */
int zfs_resilver_min_time_ms = 3000; /* min millisecs to resilver per txg */
int zfs_scan_checkpoint_intval = 7200; /* in seconds */
int zfs_no_scrub_io = B_FALSE; /* set to disable scrub i/o */
int zfs_no_scrub_prefetch = B_FALSE; /* set to disable scrub prefetch */
enum ddt_class zfs_scrub_ddt_class_max = DDT_CLASS_DUPLICATE;
/* max number of blocks to free in a single TXG */
-unsigned long zfs_free_max_blocks = 100000;
+unsigned long zfs_async_block_max_blocks = 100000;
/*
* We wait a few txgs after importing a pool to begin scanning so that
* the import / mounting code isn't held up by scrub / resilver IO.
* Unfortunately, it is a bit difficult to determine exactly how long
* this will take since userspace will trigger fs mounts asynchronously
* and the kernel will create zvol minors asynchronously. As a result,
* the value provided here is a bit arbitrary, but represents a
* reasonable estimate of how many txgs it will take to finish fully
* importing a pool
*/
#define SCAN_IMPORT_WAIT_TXGS 5
#define DSL_SCAN_IS_SCRUB_RESILVER(scn) \
((scn)->scn_phys.scn_func == POOL_SCAN_SCRUB || \
(scn)->scn_phys.scn_func == POOL_SCAN_RESILVER)
/*
* Enable/disable the processing of the free_bpobj object.
*/
int zfs_free_bpobj_enabled = 1;
/* the order has to match pool_scan_type */
static scan_cb_t *scan_funcs[POOL_SCAN_FUNCS] = {
NULL,
dsl_scan_scrub_cb, /* POOL_SCAN_SCRUB */
dsl_scan_scrub_cb, /* POOL_SCAN_RESILVER */
};
/* In core node for the scn->scn_queue. Represents a dataset to be scanned */
typedef struct {
uint64_t sds_dsobj;
uint64_t sds_txg;
avl_node_t sds_node;
} scan_ds_t;
/*
* This controls what conditions are placed on dsl_scan_sync_state():
* SYNC_OPTIONAL) write out scn_phys iff scn_bytes_pending == 0
* SYNC_MANDATORY) write out scn_phys always. scn_bytes_pending must be 0.
* SYNC_CACHED) if scn_bytes_pending == 0, write out scn_phys. Otherwise
* write out the scn_phys_cached version.
* See dsl_scan_sync_state for details.
*/
typedef enum {
SYNC_OPTIONAL,
SYNC_MANDATORY,
SYNC_CACHED
} state_sync_type_t;
/*
* This struct represents the minimum information needed to reconstruct a
* zio for sequential scanning. This is useful because many of these will
* accumulate in the sequential IO queues before being issued, so saving
* memory matters here.
*/
typedef struct scan_io {
/* fields from blkptr_t */
uint64_t sio_offset;
uint64_t sio_blk_prop;
uint64_t sio_phys_birth;
uint64_t sio_birth;
zio_cksum_t sio_cksum;
uint32_t sio_asize;
/* fields from zio_t */
int sio_flags;
zbookmark_phys_t sio_zb;
/* members for queue sorting */
union {
avl_node_t sio_addr_node; /* link into issueing queue */
list_node_t sio_list_node; /* link for issuing to disk */
} sio_nodes;
} scan_io_t;
struct dsl_scan_io_queue {
dsl_scan_t *q_scn; /* associated dsl_scan_t */
vdev_t *q_vd; /* top-level vdev that this queue represents */
/* trees used for sorting I/Os and extents of I/Os */
range_tree_t *q_exts_by_addr;
avl_tree_t q_exts_by_size;
avl_tree_t q_sios_by_addr;
/* members for zio rate limiting */
uint64_t q_maxinflight_bytes;
uint64_t q_inflight_bytes;
kcondvar_t q_zio_cv; /* used under vd->vdev_scan_io_queue_lock */
/* per txg statistics */
uint64_t q_total_seg_size_this_txg;
uint64_t q_segs_this_txg;
uint64_t q_total_zio_size_this_txg;
uint64_t q_zios_this_txg;
};
/* private data for dsl_scan_prefetch_cb() */
typedef struct scan_prefetch_ctx {
refcount_t spc_refcnt; /* refcount for memory management */
dsl_scan_t *spc_scn; /* dsl_scan_t for the pool */
boolean_t spc_root; /* is this prefetch for an objset? */
uint8_t spc_indblkshift; /* dn_indblkshift of current dnode */
uint16_t spc_datablkszsec; /* dn_idatablkszsec of current dnode */
} scan_prefetch_ctx_t;
/* private data for dsl_scan_prefetch() */
typedef struct scan_prefetch_issue_ctx {
avl_node_t spic_avl_node; /* link into scn->scn_prefetch_queue */
scan_prefetch_ctx_t *spic_spc; /* spc for the callback */
blkptr_t spic_bp; /* bp to prefetch */
zbookmark_phys_t spic_zb; /* bookmark to prefetch */
} scan_prefetch_issue_ctx_t;
static void scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue);
static void scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue,
scan_io_t *sio);
static dsl_scan_io_queue_t *scan_io_queue_create(vdev_t *vd);
static void scan_io_queues_destroy(dsl_scan_t *scn);
static kmem_cache_t *sio_cache;
void
scan_init(void)
{
/*
* This is used in ext_size_compare() to weight segments
* based on how sparse they are. This cannot be changed
* mid-scan and the tree comparison functions don't currently
* have a mechanism for passing additional context to the
* compare functions. Thus we store this value globally and
* we only allow it to be set at module initialization time
*/
fill_weight = zfs_scan_fill_weight;
sio_cache = kmem_cache_create("sio_cache",
sizeof (scan_io_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
scan_fini(void)
{
kmem_cache_destroy(sio_cache);
}
static inline boolean_t
dsl_scan_is_running(const dsl_scan_t *scn)
{
return (scn->scn_phys.scn_state == DSS_SCANNING);
}
boolean_t
dsl_scan_resilvering(dsl_pool_t *dp)
{
return (dsl_scan_is_running(dp->dp_scan) &&
dp->dp_scan->scn_phys.scn_func == POOL_SCAN_RESILVER);
}
static inline void
sio2bp(const scan_io_t *sio, blkptr_t *bp, uint64_t vdev_id)
{
bzero(bp, sizeof (*bp));
DVA_SET_ASIZE(&bp->blk_dva[0], sio->sio_asize);
DVA_SET_VDEV(&bp->blk_dva[0], vdev_id);
DVA_SET_OFFSET(&bp->blk_dva[0], sio->sio_offset);
bp->blk_prop = sio->sio_blk_prop;
bp->blk_phys_birth = sio->sio_phys_birth;
bp->blk_birth = sio->sio_birth;
bp->blk_fill = 1; /* we always only work with data pointers */
bp->blk_cksum = sio->sio_cksum;
}
static inline void
bp2sio(const blkptr_t *bp, scan_io_t *sio, int dva_i)
{
/* we discard the vdev id, since we can deduce it from the queue */
sio->sio_offset = DVA_GET_OFFSET(&bp->blk_dva[dva_i]);
sio->sio_asize = DVA_GET_ASIZE(&bp->blk_dva[dva_i]);
sio->sio_blk_prop = bp->blk_prop;
sio->sio_phys_birth = bp->blk_phys_birth;
sio->sio_birth = bp->blk_birth;
sio->sio_cksum = bp->blk_cksum;
}
int
dsl_scan_init(dsl_pool_t *dp, uint64_t txg)
{
int err;
dsl_scan_t *scn;
spa_t *spa = dp->dp_spa;
uint64_t f;
scn = dp->dp_scan = kmem_zalloc(sizeof (dsl_scan_t), KM_SLEEP);
scn->scn_dp = dp;
/*
* It's possible that we're resuming a scan after a reboot so
* make sure that the scan_async_destroying flag is initialized
* appropriately.
*/
ASSERT(!scn->scn_async_destroying);
scn->scn_async_destroying = spa_feature_is_active(dp->dp_spa,
SPA_FEATURE_ASYNC_DESTROY);
/*
* Calculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB). Limits for the issuing
* phase are done per top-level vdev and are handled separately.
*/
scn->scn_maxinflight_bytes = MAX(zfs_scan_vdev_limit *
dsl_scan_count_leaves(spa->spa_root_vdev), 1ULL << 20);
bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
avl_create(&scn->scn_queue, scan_ds_queue_compare, sizeof (scan_ds_t),
offsetof(scan_ds_t, sds_node));
avl_create(&scn->scn_prefetch_queue, scan_prefetch_queue_compare,
sizeof (scan_prefetch_issue_ctx_t),
offsetof(scan_prefetch_issue_ctx_t, spic_avl_node));
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_func", sizeof (uint64_t), 1, &f);
if (err == 0) {
/*
* There was an old-style scrub in progress. Restart a
* new-style scrub from the beginning.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("old-style scrub was in progress; "
"restarting new-style scrub in txg %llu",
(longlong_t)scn->scn_restart_txg);
/*
* Load the queue obj from the old location so that it
* can be freed by dsl_scan_done().
*/
(void) zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
"scrub_queue", sizeof (uint64_t), 1,
&scn->scn_phys.scn_queue_obj);
} else {
err = zap_lookup(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys);
/*
* Detect if the pool contains the signature of #2094. If it
* does properly update the scn->scn_phys structure and notify
* the administrator by setting an errata for the pool.
*/
if (err == EOVERFLOW) {
uint64_t zaptmp[SCAN_PHYS_NUMINTS + 1];
VERIFY3S(SCAN_PHYS_NUMINTS, ==, 24);
VERIFY3S(offsetof(dsl_scan_phys_t, scn_flags), ==,
(23 * sizeof (uint64_t)));
err = zap_lookup(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SCAN,
sizeof (uint64_t), SCAN_PHYS_NUMINTS + 1, &zaptmp);
if (err == 0) {
uint64_t overflow = zaptmp[SCAN_PHYS_NUMINTS];
if (overflow & ~DSL_SCAN_FLAGS_MASK ||
scn->scn_async_destroying) {
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_ASYNC_DESTROY;
return (EOVERFLOW);
}
bcopy(zaptmp, &scn->scn_phys,
SCAN_PHYS_NUMINTS * sizeof (uint64_t));
scn->scn_phys.scn_flags = overflow;
/* Required scrub already in progress. */
if (scn->scn_phys.scn_state == DSS_FINISHED ||
scn->scn_phys.scn_state == DSS_CANCELED)
spa->spa_errata =
ZPOOL_ERRATA_ZOL_2094_SCRUB;
}
}
if (err == ENOENT)
return (0);
else if (err)
return (err);
/*
* We might be restarting after a reboot, so jump the issued
* counter to how far we've scanned. We know we're consistent
* up to here.
*/
scn->scn_issued_before_pass = scn->scn_phys.scn_examined;
if (dsl_scan_is_running(scn) &&
spa_prev_software_version(dp->dp_spa) < SPA_VERSION_SCAN) {
/*
* A new-type scrub was in progress on an old
* pool, and the pool was accessed by old
* software. Restart from the beginning, since
* the old software may have changed the pool in
* the meantime.
*/
scn->scn_restart_txg = txg;
zfs_dbgmsg("new-style scrub was modified "
"by old software; restarting in txg %llu",
(longlong_t)scn->scn_restart_txg);
}
}
/* reload the queue into the in-core state */
if (scn->scn_phys.scn_queue_obj != 0) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
za.za_first_integer);
}
zap_cursor_fini(&zc);
}
spa_scan_stat_init(spa);
return (0);
}
void
dsl_scan_fini(dsl_pool_t *dp)
{
if (dp->dp_scan != NULL) {
dsl_scan_t *scn = dp->dp_scan;
if (scn->scn_taskq != NULL)
taskq_destroy(scn->scn_taskq);
scan_ds_queue_clear(scn);
avl_destroy(&scn->scn_queue);
avl_destroy(&scn->scn_prefetch_queue);
kmem_free(dp->dp_scan, sizeof (dsl_scan_t));
dp->dp_scan = NULL;
}
}
static boolean_t
dsl_scan_restarting(dsl_scan_t *scn, dmu_tx_t *tx)
{
return (scn->scn_restart_txg != 0 &&
scn->scn_restart_txg <= tx->tx_txg);
}
boolean_t
dsl_scan_scrubbing(const dsl_pool_t *dp)
{
dsl_scan_phys_t *scn_phys = &dp->dp_scan->scn_phys;
return (scn_phys->scn_state == DSS_SCANNING &&
scn_phys->scn_func == POOL_SCAN_SCRUB);
}
boolean_t
dsl_scan_is_paused_scrub(const dsl_scan_t *scn)
{
return (dsl_scan_scrubbing(scn->scn_dp) &&
scn->scn_phys.scn_flags & DSF_SCRUB_PAUSED);
}
/*
* Writes out a persistent dsl_scan_phys_t record to the pool directory.
* Because we can be running in the block sorting algorithm, we do not always
* want to write out the record, only when it is "safe" to do so. This safety
* condition is achieved by making sure that the sorting queues are empty
* (scn_bytes_pending == 0). When this condition is not true, the sync'd state
* is inconsistent with how much actual scanning progress has been made. The
* kind of sync to be performed is specified by the sync_type argument. If the
* sync is optional, we only sync if the queues are empty. If the sync is
* mandatory, we do a hard ASSERT to make sure that the queues are empty. The
* third possible state is a "cached" sync. This is done in response to:
* 1) The dataset that was in the last sync'd dsl_scan_phys_t having been
* destroyed, so we wouldn't be able to restart scanning from it.
* 2) The snapshot that was in the last sync'd dsl_scan_phys_t having been
* superseded by a newer snapshot.
* 3) The dataset that was in the last sync'd dsl_scan_phys_t having been
* swapped with its clone.
* In all cases, a cached sync simply rewrites the last record we've written,
* just slightly modified. For the modifications that are performed to the
* last written dsl_scan_phys_t, see dsl_scan_ds_destroyed,
* dsl_scan_ds_snapshotted and dsl_scan_ds_clone_swapped.
*/
static void
dsl_scan_sync_state(dsl_scan_t *scn, dmu_tx_t *tx, state_sync_type_t sync_type)
{
int i;
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(sync_type != SYNC_MANDATORY || scn->scn_bytes_pending == 0);
if (scn->scn_bytes_pending == 0) {
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *q = vd->vdev_scan_io_queue;
if (q == NULL)
continue;
mutex_enter(&vd->vdev_scan_io_queue_lock);
ASSERT3P(avl_first(&q->q_sios_by_addr), ==, NULL);
ASSERT3P(avl_first(&q->q_exts_by_size), ==, NULL);
ASSERT3P(range_tree_first(q->q_exts_by_addr), ==, NULL);
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
if (scn->scn_phys.scn_queue_obj != 0)
scan_ds_queue_sync(scn, tx);
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys, tx));
bcopy(&scn->scn_phys, &scn->scn_phys_cached,
sizeof (scn->scn_phys));
if (scn->scn_checkpointing)
zfs_dbgmsg("finish scan checkpoint");
scn->scn_checkpointing = B_FALSE;
scn->scn_last_checkpoint = ddi_get_lbolt();
} else if (sync_type == SYNC_CACHED) {
VERIFY0(zap_update(scn->scn_dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_SCAN, sizeof (uint64_t), SCAN_PHYS_NUMINTS,
&scn->scn_phys_cached, tx));
}
}
/* ARGSUSED */
static int
dsl_scan_setup_check(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (dsl_scan_is_running(scn))
return (SET_ERROR(EBUSY));
return (0);
}
static void
dsl_scan_setup_sync(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
pool_scan_func_t *funcp = arg;
dmu_object_type_t ot = 0;
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
ASSERT(!dsl_scan_is_running(scn));
ASSERT(*funcp > POOL_SCAN_NONE && *funcp < POOL_SCAN_FUNCS);
bzero(&scn->scn_phys, sizeof (scn->scn_phys));
scn->scn_phys.scn_func = *funcp;
scn->scn_phys.scn_state = DSS_SCANNING;
scn->scn_phys.scn_min_txg = 0;
scn->scn_phys.scn_max_txg = tx->tx_txg;
scn->scn_phys.scn_ddt_class_max = DDT_CLASSES - 1; /* the entire DDT */
scn->scn_phys.scn_start_time = gethrestime_sec();
scn->scn_phys.scn_errors = 0;
scn->scn_phys.scn_to_examine = spa->spa_root_vdev->vdev_stat.vs_alloc;
scn->scn_issued_before_pass = 0;
scn->scn_restart_txg = 0;
scn->scn_done_txg = 0;
scn->scn_last_checkpoint = 0;
scn->scn_checkpointing = B_FALSE;
spa_scan_stat_init(spa);
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
scn->scn_phys.scn_ddt_class_max = zfs_scrub_ddt_class_max;
/* rewrite all disk labels */
vdev_config_dirty(spa->spa_root_vdev);
if (vdev_resilver_needed(spa->spa_root_vdev,
&scn->scn_phys.scn_min_txg, &scn->scn_phys.scn_max_txg)) {
spa_event_notify(spa, NULL, NULL,
ESC_ZFS_RESILVER_START);
} else {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_START);
}
spa->spa_scrub_started = B_TRUE;
/*
* If this is an incremental scrub, limit the DDT scrub phase
* to just the auto-ditto class (for correctness); the rest
* of the scrub should go faster using top-down pruning.
*/
if (scn->scn_phys.scn_min_txg > TXG_INITIAL)
scn->scn_phys.scn_ddt_class_max = DDT_CLASS_DITTO;
}
/* back to the generic stuff */
if (dp->dp_blkstats == NULL) {
dp->dp_blkstats =
vmem_alloc(sizeof (zfs_all_blkstats_t), KM_SLEEP);
mutex_init(&dp->dp_blkstats->zab_lock, NULL,
MUTEX_DEFAULT, NULL);
}
bzero(&dp->dp_blkstats->zab_type, sizeof (dp->dp_blkstats->zab_type));
if (spa_version(spa) < SPA_VERSION_DSL_SCRUB)
ot = DMU_OT_ZAP_OTHER;
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset,
ot ? ot : DMU_OT_SCAN_QUEUE, DMU_OT_NONE, 0, tx);
bcopy(&scn->scn_phys, &scn->scn_phys_cached, sizeof (scn->scn_phys));
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_history_log_internal(spa, "scan setup", tx,
"func=%u mintxg=%llu maxtxg=%llu",
*funcp, scn->scn_phys.scn_min_txg, scn->scn_phys.scn_max_txg);
}
/*
* Called by the ZFS_IOC_POOL_SCAN ioctl to start a scrub or resilver.
* Can also be called to resume a paused scrub.
*/
int
dsl_scan(dsl_pool_t *dp, pool_scan_func_t func)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
/*
* Purge all vdev caches and probe all devices. We do this here
* rather than in sync context because this requires a writer lock
* on the spa_config lock, which we can't do from sync context. The
* spa_scrub_reopen flag indicates that vdev_open() should not
* attempt to start another scrub.
*/
spa_vdev_state_enter(spa, SCL_NONE);
spa->spa_scrub_reopen = B_TRUE;
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
if (func == POOL_SCAN_SCRUB && dsl_scan_is_paused_scrub(scn)) {
/* got scrub start cmd, resume paused scrub */
int err = dsl_scrub_set_pause_resume(scn->scn_dp,
POOL_SCRUB_NORMAL);
if (err == 0) {
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_RESUME);
return (ECANCELED);
}
return (SET_ERROR(err));
}
return (dsl_sync_task(spa_name(spa), dsl_scan_setup_check,
dsl_scan_setup_sync, &func, 0, ZFS_SPACE_CHECK_NONE));
}
/* ARGSUSED */
static void
dsl_scan_done(dsl_scan_t *scn, boolean_t complete, dmu_tx_t *tx)
{
static const char *old_names[] = {
"scrub_bookmark",
"scrub_ddt_bookmark",
"scrub_ddt_class_max",
"scrub_queue",
"scrub_min_txg",
"scrub_max_txg",
"scrub_func",
"scrub_errors",
NULL
};
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
int i;
/* Remove any remnants of an old-style scrub. */
for (i = 0; old_names[i]; i++) {
(void) zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, old_names[i], tx);
}
if (scn->scn_phys.scn_queue_obj != 0) {
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = 0;
}
scan_ds_queue_clear(scn);
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
/*
* If we were "restarted" from a stopped state, don't bother
* with anything else.
*/
if (!dsl_scan_is_running(scn)) {
ASSERT(!scn->scn_is_sorted);
return;
}
if (scn->scn_is_sorted) {
scan_io_queues_destroy(scn);
scn->scn_is_sorted = B_FALSE;
if (scn->scn_taskq != NULL) {
taskq_destroy(scn->scn_taskq);
scn->scn_taskq = NULL;
}
}
scn->scn_phys.scn_state = complete ? DSS_FINISHED : DSS_CANCELED;
if (dsl_scan_restarting(scn, tx))
spa_history_log_internal(spa, "scan aborted, restarting", tx,
"errors=%llu", spa_get_errlog_size(spa));
else if (!complete)
spa_history_log_internal(spa, "scan cancelled", tx,
"errors=%llu", spa_get_errlog_size(spa));
else
spa_history_log_internal(spa, "scan done", tx,
"errors=%llu", spa_get_errlog_size(spa));
if (DSL_SCAN_IS_SCRUB_RESILVER(scn)) {
spa->spa_scrub_started = B_FALSE;
spa->spa_scrub_active = B_FALSE;
/*
* If the scrub/resilver completed, update all DTLs to
* reflect this. Whether it succeeded or not, vacate
* all temporary scrub DTLs.
*/
vdev_dtl_reassess(spa->spa_root_vdev, tx->tx_txg,
complete ? scn->scn_phys.scn_max_txg : 0, B_TRUE);
if (complete) {
spa_event_notify(spa, NULL, NULL,
scn->scn_phys.scn_min_txg ?
ESC_ZFS_RESILVER_FINISH : ESC_ZFS_SCRUB_FINISH);
}
spa_errlog_rotate(spa);
/*
* We may have finished replacing a device.
* Let the async thread assess this and handle the detach.
*/
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
}
scn->scn_phys.scn_end_time = gethrestime_sec();
if (spa->spa_errata == ZPOOL_ERRATA_ZOL_2094_SCRUB)
spa->spa_errata = 0;
ASSERT(!dsl_scan_is_running(scn));
}
/* ARGSUSED */
static int
dsl_scan_cancel_check(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
if (!dsl_scan_is_running(scn))
return (SET_ERROR(ENOENT));
return (0);
}
/* ARGSUSED */
static void
dsl_scan_cancel_sync(void *arg, dmu_tx_t *tx)
{
dsl_scan_t *scn = dmu_tx_pool(tx)->dp_scan;
dsl_scan_done(scn, B_FALSE, tx);
dsl_scan_sync_state(scn, tx, SYNC_MANDATORY);
spa_event_notify(scn->scn_dp->dp_spa, NULL, NULL, ESC_ZFS_SCRUB_ABORT);
}
int
dsl_scan_cancel(dsl_pool_t *dp)
{
return (dsl_sync_task(spa_name(dp->dp_spa), dsl_scan_cancel_check,
dsl_scan_cancel_sync, NULL, 3, ZFS_SPACE_CHECK_RESERVED));
}
static int
dsl_scrub_pause_resume_check(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
if (!dsl_scan_scrubbing(dp))
return (SET_ERROR(ENOENT));
/* can't pause a paused scrub */
if (dsl_scan_is_paused_scrub(scn))
return (SET_ERROR(EBUSY));
} else if (*cmd != POOL_SCRUB_NORMAL) {
return (SET_ERROR(ENOTSUP));
}
return (0);
}
static void
dsl_scrub_pause_resume_sync(void *arg, dmu_tx_t *tx)
{
pool_scrub_cmd_t *cmd = arg;
dsl_pool_t *dp = dmu_tx_pool(tx);
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
if (*cmd == POOL_SCRUB_PAUSE) {
/* can't pause a scrub when there is no in-progress scrub */
spa->spa_scan_pass_scrub_pause = gethrestime_sec();
scn->scn_phys.scn_flags |= DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_SCRUB_PAUSED);
} else {
ASSERT3U(*cmd, ==, POOL_SCRUB_NORMAL);
if (dsl_scan_is_paused_scrub(scn)) {
/*
* We need to keep track of how much time we spend
* paused per pass so that we can adjust the scrub rate
* shown in the output of 'zpool status'
*/
spa->spa_scan_pass_scrub_spent_paused +=
gethrestime_sec() - spa->spa_scan_pass_scrub_pause;
spa->spa_scan_pass_scrub_pause = 0;
scn->scn_phys.scn_flags &= ~DSF_SCRUB_PAUSED;
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
}
}
/*
* Set scrub pause/resume state if it makes sense to do so
*/
int
dsl_scrub_set_pause_resume(const dsl_pool_t *dp, pool_scrub_cmd_t cmd)
{
return (dsl_sync_task(spa_name(dp->dp_spa),
dsl_scrub_pause_resume_check, dsl_scrub_pause_resume_sync, &cmd, 3,
ZFS_SPACE_CHECK_RESERVED));
}
/* start a new scan, or restart an existing one. */
void
dsl_resilver_restart(dsl_pool_t *dp, uint64_t txg)
{
if (txg == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create_dd(dp->dp_mos_dir);
VERIFY(0 == dmu_tx_assign(tx, TXG_WAIT));
txg = dmu_tx_get_txg(tx);
dp->dp_scan->scn_restart_txg = txg;
dmu_tx_commit(tx);
} else {
dp->dp_scan->scn_restart_txg = txg;
}
zfs_dbgmsg("restarting resilver txg=%llu", (longlong_t)txg);
}
void
dsl_free(dsl_pool_t *dp, uint64_t txg, const blkptr_t *bp)
{
zio_free(dp->dp_spa, txg, bp);
}
void
dsl_free_sync(zio_t *pio, dsl_pool_t *dp, uint64_t txg, const blkptr_t *bpp)
{
ASSERT(dsl_pool_sync_context(dp));
zio_nowait(zio_free_sync(pio, dp->dp_spa, txg, bpp, pio->io_flags));
}
static int
scan_ds_queue_compare(const void *a, const void *b)
{
const scan_ds_t *sds_a = a, *sds_b = b;
if (sds_a->sds_dsobj < sds_b->sds_dsobj)
return (-1);
if (sds_a->sds_dsobj == sds_b->sds_dsobj)
return (0);
return (1);
}
static void
scan_ds_queue_clear(dsl_scan_t *scn)
{
void *cookie = NULL;
scan_ds_t *sds;
while ((sds = avl_destroy_nodes(&scn->scn_queue, &cookie)) != NULL) {
kmem_free(sds, sizeof (*sds));
}
}
static boolean_t
scan_ds_queue_contains(dsl_scan_t *scn, uint64_t dsobj, uint64_t *txg)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
if (sds != NULL && txg != NULL)
*txg = sds->sds_txg;
return (sds != NULL);
}
static void
scan_ds_queue_insert(dsl_scan_t *scn, uint64_t dsobj, uint64_t txg)
{
scan_ds_t *sds;
avl_index_t where;
sds = kmem_zalloc(sizeof (*sds), KM_SLEEP);
sds->sds_dsobj = dsobj;
sds->sds_txg = txg;
VERIFY3P(avl_find(&scn->scn_queue, sds, &where), ==, NULL);
avl_insert(&scn->scn_queue, sds, where);
}
static void
scan_ds_queue_remove(dsl_scan_t *scn, uint64_t dsobj)
{
scan_ds_t srch, *sds;
srch.sds_dsobj = dsobj;
sds = avl_find(&scn->scn_queue, &srch, NULL);
VERIFY(sds != NULL);
avl_remove(&scn->scn_queue, sds);
kmem_free(sds, sizeof (*sds));
}
static void
scan_ds_queue_sync(dsl_scan_t *scn, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
spa_t *spa = dp->dp_spa;
dmu_object_type_t ot = (spa_version(spa) >= SPA_VERSION_DSL_SCRUB) ?
DMU_OT_SCAN_QUEUE : DMU_OT_ZAP_OTHER;
ASSERT0(scn->scn_bytes_pending);
ASSERT(scn->scn_phys.scn_queue_obj != 0);
VERIFY0(dmu_object_free(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, tx));
scn->scn_phys.scn_queue_obj = zap_create(dp->dp_meta_objset, ot,
DMU_OT_NONE, 0, tx);
for (scan_ds_t *sds = avl_first(&scn->scn_queue);
sds != NULL; sds = AVL_NEXT(&scn->scn_queue, sds)) {
VERIFY0(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, sds->sds_dsobj,
sds->sds_txg, tx));
}
}
/*
* Computes the memory limit state that we're currently in. A sorted scan
* needs quite a bit of memory to hold the sorting queue, so we need to
* reasonably constrain the size so it doesn't impact overall system
* performance. We compute two limits:
* 1) Hard memory limit: if the amount of memory used by the sorting
* queues on a pool gets above this value, we stop the metadata
* scanning portion and start issuing the queued up and sorted
* I/Os to reduce memory usage.
* This limit is calculated as a fraction of physmem (by default 5%).
* We constrain the lower bound of the hard limit to an absolute
* minimum of zfs_scan_mem_lim_min (default: 16 MiB). We also constrain
* the upper bound to 5% of the total pool size - no chance we'll
* ever need that much memory, but just to keep the value in check.
* 2) Soft memory limit: once we hit the hard memory limit, we start
* issuing I/O to reduce queue memory usage, but we don't want to
* completely empty out the queues, since we might be able to find I/Os
* that will fill in the gaps of our non-sequential IOs at some point
* in the future. So we stop the issuing of I/Os once the amount of
* memory used drops below the soft limit (at which point we stop issuing
* I/O and start scanning metadata again).
*
* This limit is calculated by subtracting a fraction of the hard
* limit from the hard limit. By default this fraction is 5%, so
* the soft limit is 95% of the hard limit. We cap the size of the
* difference between the hard and soft limits at an absolute
* maximum of zfs_scan_mem_lim_soft_max (default: 128 MiB) - this is
* sufficient to not cause too frequent switching between the
* metadata scan and I/O issue (even at 2k recordsize, 128 MiB's
* worth of queues is about 1.2 GiB of on-pool data, so scanning
* that should take at least a decent fraction of a second).
*/
static boolean_t
dsl_scan_should_clear(dsl_scan_t *scn)
{
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
uint64_t mlim_hard, mlim_soft, mused;
uint64_t alloc = metaslab_class_get_alloc(spa_normal_class(
scn->scn_dp->dp_spa));
mlim_hard = MAX((physmem / zfs_scan_mem_lim_fact) * PAGESIZE,
zfs_scan_mem_lim_min);
mlim_hard = MIN(mlim_hard, alloc / 20);
mlim_soft = mlim_hard - MIN(mlim_hard / zfs_scan_mem_lim_soft_fact,
zfs_scan_mem_lim_soft_max);
mused = 0;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
dsl_scan_io_queue_t *queue;
mutex_enter(&tvd->vdev_scan_io_queue_lock);
queue = tvd->vdev_scan_io_queue;
if (queue != NULL) {
/* #extents in exts_by_size = # in exts_by_addr */
mused += avl_numnodes(&queue->q_exts_by_size) *
sizeof (range_seg_t) +
avl_numnodes(&queue->q_sios_by_addr) *
sizeof (scan_io_t);
}
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
dprintf("current scan memory usage: %llu bytes\n", (longlong_t)mused);
if (mused == 0)
ASSERT0(scn->scn_bytes_pending);
/*
* If we are above our hard limit, we need to clear out memory.
* If we are below our soft limit, we need to accumulate sequential IOs.
* Otherwise, we should keep doing whatever we are currently doing.
*/
if (mused >= mlim_hard)
return (B_TRUE);
else if (mused < mlim_soft)
return (B_FALSE);
else
return (scn->scn_clearing);
}
static boolean_t
dsl_scan_check_suspend(dsl_scan_t *scn, const zbookmark_phys_t *zb)
{
/* we never skip user/group accounting objects */
if (zb && (int64_t)zb->zb_object < 0)
return (B_FALSE);
if (scn->scn_suspending)
return (B_TRUE); /* we're already suspending */
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark))
return (B_FALSE); /* we're resuming */
/* We only know how to resume from level-0 blocks. */
if (zb && zb->zb_level != 0)
return (B_FALSE);
/*
* We suspend if:
* - we have scanned for at least the minimum time (default 1 sec
* for scrub, 3 sec for resilver), and either we have sufficient
* dirty data that we are starting to write more quickly
* (default 30%), someone is explicitly waiting for this txg
* to complete, or we have used up all of the time in the txg
* timeout (default 5 sec).
* or
* - the spa is shutting down because this pool is being exported
* or the machine is rebooting.
* or
* - the scan queue has reached its memory use limit
*/
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
if ((NSEC2MSEC(scan_time_ns) > mintime &&
(dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa) ||
(zfs_scan_strict_mem_lim && dsl_scan_should_clear(scn))) {
if (zb) {
dprintf("suspending at bookmark %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
scn->scn_phys.scn_bookmark = *zb;
} else {
#ifdef ZFS_DEBUG
dsl_scan_phys_t *scnp = &scn->scn_phys;
dprintf("suspending at at DDT bookmark "
"%llx/%llx/%llx/%llx\n",
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
#endif
}
scn->scn_suspending = B_TRUE;
return (B_TRUE);
}
return (B_FALSE);
}
typedef struct zil_scan_arg {
dsl_pool_t *zsa_dp;
zil_header_t *zsa_zh;
} zil_scan_arg_t;
/* ARGSUSED */
static int
dsl_scan_zil_block(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
{
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
zbookmark_phys_t zb;
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* One block ("stubby") can be allocated a long time ago; we
* want to visit that one because it has been allocated
* (on-disk) even if it hasn't been claimed (even though for
* scrub there's nothing to do to it).
*/
if (claim_txg == 0 && bp->blk_birth >= spa_first_txg(dp->dp_spa))
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
return (0);
}
/* ARGSUSED */
static int
dsl_scan_zil_record(zilog_t *zilog, lr_t *lrc, void *arg, uint64_t claim_txg)
{
if (lrc->lrc_txtype == TX_WRITE) {
zil_scan_arg_t *zsa = arg;
dsl_pool_t *dp = zsa->zsa_dp;
dsl_scan_t *scn = dp->dp_scan;
zil_header_t *zh = zsa->zsa_zh;
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
zbookmark_phys_t zb;
if (BP_IS_HOLE(bp) ||
bp->blk_birth <= scn->scn_phys.scn_cur_min_txg)
return (0);
/*
* birth can be < claim_txg if this record's txg is
* already txg sync'ed (but this log block contains
* other records that are not synced)
*/
if (claim_txg == 0 || bp->blk_birth < claim_txg)
return (0);
SET_BOOKMARK(&zb, zh->zh_log.blk_cksum.zc_word[ZIL_ZC_OBJSET],
lr->lr_foid, ZB_ZIL_LEVEL,
lr->lr_offset / BP_GET_LSIZE(bp));
VERIFY(0 == scan_funcs[scn->scn_phys.scn_func](dp, bp, &zb));
}
return (0);
}
static void
dsl_scan_zil(dsl_pool_t *dp, zil_header_t *zh)
{
uint64_t claim_txg = zh->zh_claim_txg;
zil_scan_arg_t zsa = { dp, zh };
zilog_t *zilog;
/*
* We only want to visit blocks that have been claimed but not yet
* replayed (or, in read-only mode, blocks that *would* be claimed).
*/
if (claim_txg == 0 && spa_writeable(dp->dp_spa))
return;
zilog = zil_alloc(dp->dp_meta_objset, zh);
(void) zil_parse(zilog, dsl_scan_zil_block, dsl_scan_zil_record, &zsa,
claim_txg, B_FALSE);
zil_free(zilog);
}
/*
* We compare scan_prefetch_issue_ctx_t's based on their bookmarks. The idea
* here is to sort the AVL tree by the order each block will be needed.
*/
static int
scan_prefetch_queue_compare(const void *a, const void *b)
{
const scan_prefetch_issue_ctx_t *spic_a = a, *spic_b = b;
const scan_prefetch_ctx_t *spc_a = spic_a->spic_spc;
const scan_prefetch_ctx_t *spc_b = spic_b->spic_spc;
return (zbookmark_compare(spc_a->spc_datablkszsec,
spc_a->spc_indblkshift, spc_b->spc_datablkszsec,
spc_b->spc_indblkshift, &spic_a->spic_zb, &spic_b->spic_zb));
}
static void
scan_prefetch_ctx_rele(scan_prefetch_ctx_t *spc, void *tag)
{
if (refcount_remove(&spc->spc_refcnt, tag) == 0) {
refcount_destroy(&spc->spc_refcnt);
kmem_free(spc, sizeof (scan_prefetch_ctx_t));
}
}
static scan_prefetch_ctx_t *
scan_prefetch_ctx_create(dsl_scan_t *scn, dnode_phys_t *dnp, void *tag)
{
scan_prefetch_ctx_t *spc;
spc = kmem_alloc(sizeof (scan_prefetch_ctx_t), KM_SLEEP);
refcount_create(&spc->spc_refcnt);
refcount_add(&spc->spc_refcnt, tag);
spc->spc_scn = scn;
if (dnp != NULL) {
spc->spc_datablkszsec = dnp->dn_datablkszsec;
spc->spc_indblkshift = dnp->dn_indblkshift;
spc->spc_root = B_FALSE;
} else {
spc->spc_datablkszsec = 0;
spc->spc_indblkshift = 0;
spc->spc_root = B_TRUE;
}
return (spc);
}
static void
scan_prefetch_ctx_add_ref(scan_prefetch_ctx_t *spc, void *tag)
{
refcount_add(&spc->spc_refcnt, tag);
}
static boolean_t
dsl_scan_check_prefetch_resume(scan_prefetch_ctx_t *spc,
const zbookmark_phys_t *zb)
{
zbookmark_phys_t *last_zb = &spc->spc_scn->scn_prefetch_bookmark;
dnode_phys_t tmp_dnp;
dnode_phys_t *dnp = (spc->spc_root) ? NULL : &tmp_dnp;
if (zb->zb_objset != last_zb->zb_objset)
return (B_TRUE);
if ((int64_t)zb->zb_object < 0)
return (B_FALSE);
tmp_dnp.dn_datablkszsec = spc->spc_datablkszsec;
tmp_dnp.dn_indblkshift = spc->spc_indblkshift;
if (zbookmark_subtree_completed(dnp, zb, last_zb))
return (B_TRUE);
return (B_FALSE);
}
static void
dsl_scan_prefetch(scan_prefetch_ctx_t *spc, blkptr_t *bp, zbookmark_phys_t *zb)
{
avl_index_t idx;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
if (zfs_no_scrub_prefetch)
return;
if (BP_IS_HOLE(bp) || bp->blk_birth <= scn->scn_phys.scn_cur_min_txg ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_DNODE &&
BP_GET_TYPE(bp) != DMU_OT_OBJSET))
return;
if (dsl_scan_check_prefetch_resume(spc, zb))
return;
scan_prefetch_ctx_add_ref(spc, scn);
spic = kmem_alloc(sizeof (scan_prefetch_issue_ctx_t), KM_SLEEP);
spic->spic_spc = spc;
spic->spic_bp = *bp;
spic->spic_zb = *zb;
/*
* Add the IO to the queue of blocks to prefetch. This allows us to
* prioritize blocks that we will need first for the main traversal
* thread.
*/
mutex_enter(&spa->spa_scrub_lock);
if (avl_find(&scn->scn_prefetch_queue, spic, &idx) != NULL) {
/* this block is already queued for prefetch */
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
scan_prefetch_ctx_rele(spc, scn);
mutex_exit(&spa->spa_scrub_lock);
return;
}
avl_insert(&scn->scn_prefetch_queue, spic, idx);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
static void
dsl_scan_prefetch_dnode(dsl_scan_t *scn, dnode_phys_t *dnp,
uint64_t objset, uint64_t object)
{
int i;
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
if (dnp->dn_nblkptr == 0 && !(dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR))
return;
SET_BOOKMARK(&zb, objset, object, 0, 0);
spc = scan_prefetch_ctx_create(scn, dnp, FTAG);
for (i = 0; i < dnp->dn_nblkptr; i++) {
zb.zb_level = BP_GET_LEVEL(&dnp->dn_blkptr[i]);
zb.zb_blkid = i;
dsl_scan_prefetch(spc, &dnp->dn_blkptr[i], &zb);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zb.zb_level = 0;
zb.zb_blkid = DMU_SPILL_BLKID;
dsl_scan_prefetch(spc, DN_SPILL_BLKPTR(dnp), &zb);
}
scan_prefetch_ctx_rele(spc, FTAG);
}
void
dsl_scan_prefetch_cb(zio_t *zio, const zbookmark_phys_t *zb, const blkptr_t *bp,
arc_buf_t *buf, void *private)
{
scan_prefetch_ctx_t *spc = private;
dsl_scan_t *scn = spc->spc_scn;
spa_t *spa = scn->scn_dp->dp_spa;
/* broadcast that the IO has completed for rate limiting purposes */
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
/* if there was an error or we are done prefetching, just cleanup */
if (buf == NULL || scn->scn_prefetch_stop)
goto out;
if (BP_GET_LEVEL(bp) > 0) {
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
zbookmark_phys_t czb;
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1, zb->zb_blkid * epb + i);
dsl_scan_prefetch(spc, cbp, &czb);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_prefetch_dnode(scn, cdnp,
zb->zb_objset, zb->zb_blkid * epb + i);
}
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
objset_phys_t *osp = buf->b_data;
dsl_scan_prefetch_dnode(scn, &osp->os_meta_dnode,
zb->zb_objset, DMU_META_DNODE_OBJECT);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
dsl_scan_prefetch_dnode(scn,
&osp->os_groupused_dnode, zb->zb_objset,
DMU_GROUPUSED_OBJECT);
dsl_scan_prefetch_dnode(scn,
&osp->os_userused_dnode, zb->zb_objset,
DMU_USERUSED_OBJECT);
}
}
out:
if (buf != NULL)
arc_buf_destroy(buf, private);
scan_prefetch_ctx_rele(spc, scn);
}
/* ARGSUSED */
static void
dsl_scan_prefetch_thread(void *arg)
{
dsl_scan_t *scn = arg;
spa_t *spa = scn->scn_dp->dp_spa;
scan_prefetch_issue_ctx_t *spic;
/* loop until we are told to stop */
while (!scn->scn_prefetch_stop) {
arc_flags_t flags = ARC_FLAG_NOWAIT |
ARC_FLAG_PRESCIENT_PREFETCH | ARC_FLAG_PREFETCH;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
mutex_enter(&spa->spa_scrub_lock);
/*
* Wait until we have an IO to issue and are not above our
* maximum in flight limit.
*/
while (!scn->scn_prefetch_stop &&
(avl_numnodes(&scn->scn_prefetch_queue) == 0 ||
spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)) {
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
}
/* recheck if we should stop since we waited for the cv */
if (scn->scn_prefetch_stop) {
mutex_exit(&spa->spa_scrub_lock);
break;
}
/* remove the prefetch IO from the tree */
spic = avl_first(&scn->scn_prefetch_queue);
spa->spa_scrub_inflight += BP_GET_PSIZE(&spic->spic_bp);
avl_remove(&scn->scn_prefetch_queue, spic);
mutex_exit(&spa->spa_scrub_lock);
if (BP_IS_PROTECTED(&spic->spic_bp)) {
ASSERT(BP_GET_TYPE(&spic->spic_bp) == DMU_OT_DNODE ||
BP_GET_TYPE(&spic->spic_bp) == DMU_OT_OBJSET);
ASSERT3U(BP_GET_LEVEL(&spic->spic_bp), ==, 0);
zio_flags |= ZIO_FLAG_RAW;
}
/* issue the prefetch asynchronously */
(void) arc_read(scn->scn_zio_root, scn->scn_dp->dp_spa,
&spic->spic_bp, dsl_scan_prefetch_cb, spic->spic_spc,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, &spic->spic_zb);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT(scn->scn_prefetch_stop);
/* free any prefetches we didn't get to complete */
mutex_enter(&spa->spa_scrub_lock);
while ((spic = avl_first(&scn->scn_prefetch_queue)) != NULL) {
avl_remove(&scn->scn_prefetch_queue, spic);
scan_prefetch_ctx_rele(spic->spic_spc, scn);
kmem_free(spic, sizeof (scan_prefetch_issue_ctx_t));
}
ASSERT0(avl_numnodes(&scn->scn_prefetch_queue));
mutex_exit(&spa->spa_scrub_lock);
}
static boolean_t
dsl_scan_check_resume(dsl_scan_t *scn, const dnode_phys_t *dnp,
const zbookmark_phys_t *zb)
{
/*
* We never skip over user/group accounting objects (obj<0)
*/
if (!ZB_IS_ZERO(&scn->scn_phys.scn_bookmark) &&
(int64_t)zb->zb_object >= 0) {
/*
* If we already visited this bp & everything below (in
* a prior txg sync), don't bother doing it again.
*/
if (zbookmark_subtree_completed(dnp, zb,
&scn->scn_phys.scn_bookmark))
return (B_TRUE);
/*
* If we found the block we're trying to resume from, or
* we went past it to a different object, zero it out to
* indicate that it's OK to start checking for suspending
* again.
*/
if (bcmp(zb, &scn->scn_phys.scn_bookmark, sizeof (*zb)) == 0 ||
zb->zb_object > scn->scn_phys.scn_bookmark.zb_object) {
dprintf("resuming at %llx/%llx/%llx/%llx\n",
(longlong_t)zb->zb_objset,
(longlong_t)zb->zb_object,
(longlong_t)zb->zb_level,
(longlong_t)zb->zb_blkid);
bzero(&scn->scn_phys.scn_bookmark, sizeof (*zb));
}
}
return (B_FALSE);
}
static void dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx);
inline __attribute__((always_inline)) static void dsl_scan_visitdnode(
dsl_scan_t *, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, uint64_t object, dmu_tx_t *tx);
/*
* Return nonzero on i/o error.
* Return new buf to write out in *bufp.
*/
inline __attribute__((always_inline)) static int
dsl_scan_recurse(dsl_scan_t *scn, dsl_dataset_t *ds, dmu_objset_type_t ostype,
dnode_phys_t *dnp, const blkptr_t *bp,
const zbookmark_phys_t *zb, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
int zio_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_SCAN_THREAD;
int err;
if (BP_GET_LEVEL(bp) > 0) {
arc_flags_t flags = ARC_FLAG_WAIT;
int i;
blkptr_t *cbp;
int epb = BP_GET_LSIZE(bp) >> SPA_BLKPTRSHIFT;
arc_buf_t *buf;
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cbp = buf->b_data; i < epb; i++, cbp++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, zb->zb_objset, zb->zb_object,
zb->zb_level - 1,
zb->zb_blkid * epb + i);
dsl_scan_visitbp(cbp, &czb, dnp,
ds, scn, ostype, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_DNODE) {
arc_flags_t flags = ARC_FLAG_WAIT;
dnode_phys_t *cdnp;
int i;
int epb = BP_GET_LSIZE(bp) >> DNODE_SHIFT;
arc_buf_t *buf;
if (BP_IS_PROTECTED(bp)) {
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
zio_flags |= ZIO_FLAG_RAW;
}
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
for (i = 0, cdnp = buf->b_data; i < epb;
i += cdnp->dn_extra_slots + 1,
cdnp += cdnp->dn_extra_slots + 1) {
dsl_scan_visitdnode(scn, ds, ostype,
cdnp, zb->zb_blkid * epb + i, tx);
}
arc_buf_destroy(buf, &buf);
} else if (BP_GET_TYPE(bp) == DMU_OT_OBJSET) {
arc_flags_t flags = ARC_FLAG_WAIT;
objset_phys_t *osp;
arc_buf_t *buf;
err = arc_read(NULL, dp->dp_spa, bp, arc_getbuf_func, &buf,
ZIO_PRIORITY_SCRUB, zio_flags, &flags, zb);
if (err) {
scn->scn_phys.scn_errors++;
return (err);
}
osp = buf->b_data;
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_meta_dnode, DMU_META_DNODE_OBJECT, tx);
if (OBJSET_BUF_HAS_USERUSED(buf)) {
/*
* We also always visit user/group/project accounting
* objects, and never skip them, even if we are
* suspending. This is necessary so that the
* space deltas from this txg get integrated.
*/
if (OBJSET_BUF_HAS_PROJECTUSED(buf))
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_projectused_dnode,
DMU_PROJECTUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_groupused_dnode,
DMU_GROUPUSED_OBJECT, tx);
dsl_scan_visitdnode(scn, ds, osp->os_type,
&osp->os_userused_dnode,
DMU_USERUSED_OBJECT, tx);
}
arc_buf_destroy(buf, &buf);
}
return (0);
}
inline __attribute__((always_inline)) static void
dsl_scan_visitdnode(dsl_scan_t *scn, dsl_dataset_t *ds,
dmu_objset_type_t ostype, dnode_phys_t *dnp,
uint64_t object, dmu_tx_t *tx)
{
int j;
for (j = 0; j < dnp->dn_nblkptr; j++) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
dnp->dn_nlevels - 1, j);
dsl_scan_visitbp(&dnp->dn_blkptr[j],
&czb, dnp, ds, scn, ostype, tx);
}
if (dnp->dn_flags & DNODE_FLAG_SPILL_BLKPTR) {
zbookmark_phys_t czb;
SET_BOOKMARK(&czb, ds ? ds->ds_object : 0, object,
0, DMU_SPILL_BLKID);
dsl_scan_visitbp(DN_SPILL_BLKPTR(dnp),
&czb, dnp, ds, scn, ostype, tx);
}
}
/*
* The arguments are in this order because mdb can only print the
* first 5; we want them to be useful.
*/
static void
dsl_scan_visitbp(blkptr_t *bp, const zbookmark_phys_t *zb,
dnode_phys_t *dnp, dsl_dataset_t *ds, dsl_scan_t *scn,
dmu_objset_type_t ostype, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
blkptr_t *bp_toread = NULL;
if (dsl_scan_check_suspend(scn, zb))
return;
if (dsl_scan_check_resume(scn, dnp, zb))
return;
scn->scn_visited_this_txg++;
/*
* This debugging is commented out to conserve stack space. This
* function is called recursively and the debugging addes several
* bytes to the stack for each call. It can be commented back in
* if required to debug an issue in dsl_scan_visitbp().
*
* dprintf_bp(bp,
* "visiting ds=%p/%llu zb=%llx/%llx/%llx/%llx bp=%p",
* ds, ds ? ds->ds_object : 0,
* zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
* bp);
*/
if (BP_IS_HOLE(bp)) {
scn->scn_holes_this_txg++;
return;
}
if (bp->blk_birth <= scn->scn_phys.scn_cur_min_txg) {
scn->scn_lt_min_this_txg++;
return;
}
bp_toread = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
*bp_toread = *bp;
if (dsl_scan_recurse(scn, ds, ostype, dnp, bp_toread, zb, tx) != 0)
goto out;
/*
* If dsl_scan_ddt() has already visited this block, it will have
* already done any translations or scrubbing, so don't call the
* callback again.
*/
if (ddt_class_contains(dp->dp_spa,
scn->scn_phys.scn_ddt_class_max, bp)) {
scn->scn_ddt_contained_this_txg++;
goto out;
}
/*
* If this block is from the future (after cur_max_txg), then we
* are doing this on behalf of a deleted snapshot, and we will
* revisit the future block on the next pass of this dataset.
* Don't scan it now unless we need to because something
* under it was modified.
*/
if (BP_PHYSICAL_BIRTH(bp) > scn->scn_phys.scn_cur_max_txg) {
scn->scn_gt_max_this_txg++;
goto out;
}
scan_funcs[scn->scn_phys.scn_func](dp, bp, zb);
out:
kmem_free(bp_toread, sizeof (blkptr_t));
}
static void
dsl_scan_visit_rootbp(dsl_scan_t *scn, dsl_dataset_t *ds, blkptr_t *bp,
dmu_tx_t *tx)
{
zbookmark_phys_t zb;
scan_prefetch_ctx_t *spc;
SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET,
ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID);
if (ZB_IS_ZERO(&scn->scn_phys.scn_bookmark)) {
SET_BOOKMARK(&scn->scn_prefetch_bookmark,
zb.zb_objset, 0, 0, 0);
} else {
scn->scn_prefetch_bookmark = scn->scn_phys.scn_bookmark;
}
scn->scn_objsets_visited_this_txg++;
spc = scan_prefetch_ctx_create(scn, NULL, FTAG);
dsl_scan_prefetch(spc, bp, &zb);
scan_prefetch_ctx_rele(spc, FTAG);
dsl_scan_visitbp(bp, &zb, NULL, ds, scn, DMU_OST_NONE, tx);
dprintf_ds(ds, "finished scan%s", "");
}
static void
ds_destroyed_scn_phys(dsl_dataset_t *ds, dsl_scan_phys_t *scn_phys)
{
if (scn_phys->scn_bookmark.zb_objset == ds->ds_object) {
if (ds->ds_is_snapshot) {
/*
* Note:
* - scn_cur_{min,max}_txg stays the same.
* - Setting the flag is not really necessary if
* scn_cur_max_txg == scn_max_txg, because there
* is nothing after this snapshot that we care
* about. However, we set it anyway and then
* ignore it when we retraverse it in
* dsl_scan_visitds().
*/
scn_phys->scn_bookmark.zb_objset =
dsl_dataset_phys(ds)->ds_next_snap_obj;
zfs_dbgmsg("destroying ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
scn_phys->scn_flags |= DSF_VISIT_DS_AGAIN;
} else {
SET_BOOKMARK(&scn_phys->scn_bookmark,
ZB_DESTROYED_OBJSET, 0, 0, 0);
zfs_dbgmsg("destroying ds %llu; currently traversing; "
"reset bookmark to -1,0,0,0",
(u_longlong_t)ds->ds_object);
}
}
}
/*
* Invoked when a dataset is destroyed. We need to make sure that:
*
* 1) If it is the dataset that was currently being scanned, we write
* a new dsl_scan_phys_t and marking the objset reference in it
* as destroyed.
* 2) Remove it from the work queue, if it was present.
*
* If the dataset was actually a snapshot, instead of marking the dataset
* as destroyed, we instead substitute the next snapshot in line.
*/
void
dsl_scan_ds_destroyed(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ds_destroyed_scn_phys(ds, &scn->scn_phys);
ds_destroyed_scn_phys(ds, &scn->scn_phys_cached);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
if (ds->ds_is_snapshot)
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
ASSERT3U(dsl_dataset_phys(ds)->ds_num_children, <=, 1);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
if (ds->ds_is_snapshot) {
/*
* We keep the same mintxg; it could be >
* ds_creation_txg if the previous snapshot was
* deleted too.
*/
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_next_snap_obj,
mintxg, tx) == 0);
zfs_dbgmsg("destroying ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->
ds_next_snap_obj);
} else {
zfs_dbgmsg("destroying ds %llu; in queue; removing",
(u_longlong_t)ds->ds_object);
}
}
/*
* dsl_scan_sync() should be called after this, and should sync
* out our changed state, but just to be safe, do it here.
*/
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_snapshotted_bookmark(dsl_dataset_t *ds, zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds->ds_object) {
scn_bookmark->zb_objset =
dsl_dataset_phys(ds)->ds_prev_snap_obj;
zfs_dbgmsg("snapshotting ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
}
/*
* Called when a dataset is snapshotted. If we were currently traversing
* this snapshot, we reset our bookmark to point at the newly created
* snapshot. We also modify our work queue to remove the old snapshot and
* replace with the new one.
*/
void
dsl_scan_ds_snapshotted(dsl_dataset_t *ds, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ASSERT(dsl_dataset_phys(ds)->ds_prev_snap_obj != 0);
ds_snapshotted_bookmark(ds, &scn->scn_phys.scn_bookmark);
ds_snapshotted_bookmark(ds, &scn->scn_phys_cached.scn_bookmark);
if (scan_ds_queue_contains(scn, ds->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds->ds_object);
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds->ds_object, &mintxg) == 0) {
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds->ds_object, tx));
VERIFY(zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
dsl_dataset_phys(ds)->ds_prev_snap_obj, mintxg, tx) == 0);
zfs_dbgmsg("snapshotting ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds->ds_object,
(u_longlong_t)dsl_dataset_phys(ds)->ds_prev_snap_obj);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
static void
ds_clone_swapped_bookmark(dsl_dataset_t *ds1, dsl_dataset_t *ds2,
zbookmark_phys_t *scn_bookmark)
{
if (scn_bookmark->zb_objset == ds1->ds_object) {
scn_bookmark->zb_objset = ds2->ds_object;
zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds1->ds_object,
(u_longlong_t)ds2->ds_object);
} else if (scn_bookmark->zb_objset == ds2->ds_object) {
scn_bookmark->zb_objset = ds1->ds_object;
zfs_dbgmsg("clone_swap ds %llu; currently traversing; "
"reset zb_objset to %llu",
(u_longlong_t)ds2->ds_object,
(u_longlong_t)ds1->ds_object);
}
}
/*
* Called when a parent dataset and its clone are swapped. If we were
* currently traversing the dataset, we need to switch to traversing the
* newly promoted parent.
*/
void
dsl_scan_ds_clone_swapped(dsl_dataset_t *ds1, dsl_dataset_t *ds2, dmu_tx_t *tx)
{
dsl_pool_t *dp = ds1->ds_dir->dd_pool;
dsl_scan_t *scn = dp->dp_scan;
uint64_t mintxg;
if (!dsl_scan_is_running(scn))
return;
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys.scn_bookmark);
ds_clone_swapped_bookmark(ds1, ds2, &scn->scn_phys_cached.scn_bookmark);
if (scan_ds_queue_contains(scn, ds1->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds1->ds_object);
scan_ds_queue_insert(scn, ds2->ds_object, mintxg);
}
if (scan_ds_queue_contains(scn, ds2->ds_object, &mintxg)) {
scan_ds_queue_remove(scn, ds2->ds_object);
scan_ds_queue_insert(scn, ds1->ds_object, mintxg);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds1->ds_object, &mintxg) == 0) {
int err;
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, tx));
err = zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, mintxg, tx);
VERIFY(err == 0 || err == EEXIST);
if (err == EEXIST) {
/* Both were there to begin with */
VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj,
ds1->ds_object, mintxg, tx));
}
zfs_dbgmsg("clone_swap ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds1->ds_object,
(u_longlong_t)ds2->ds_object);
}
if (zap_lookup_int_key(dp->dp_meta_objset, scn->scn_phys.scn_queue_obj,
ds2->ds_object, &mintxg) == 0) {
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds1)->ds_prev_snap_txg);
ASSERT3U(mintxg, ==, dsl_dataset_phys(ds2)->ds_prev_snap_txg);
VERIFY3U(0, ==, zap_remove_int(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds2->ds_object, tx));
VERIFY(0 == zap_add_int_key(dp->dp_meta_objset,
scn->scn_phys.scn_queue_obj, ds1->ds_object, mintxg, tx));
zfs_dbgmsg("clone_swap ds %llu; in queue; "
"replacing with %llu",
(u_longlong_t)ds2->ds_object,
(u_longlong_t)ds1->ds_object);
}
dsl_scan_sync_state(scn, tx, SYNC_CACHED);
}
/* ARGSUSED */
static int
enqueue_clones_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
uint64_t originobj = *(uint64_t *)arg;
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
if (dsl_dir_phys(hds->ds_dir)->dd_origin_obj != originobj)
return (0);
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != originobj) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
dsl_dataset_rele(ds, FTAG);
if (err)
return (err);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
static void
dsl_scan_visitds(dsl_scan_t *scn, uint64_t dsobj, dmu_tx_t *tx)
{
dsl_pool_t *dp = scn->scn_dp;
dsl_dataset_t *ds;
- objset_t *os;
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (scn->scn_phys.scn_cur_min_txg >=
scn->scn_phys.scn_max_txg) {
/*
* This can happen if this snapshot was created after the
* scan started, and we already completed a previous snapshot
* that was created after the scan started. This snapshot
* only references blocks with:
*
* birth < our ds_creation_txg
* cur_min_txg is no less than ds_creation_txg.
* We have already visited these blocks.
* or
* birth > scn_max_txg
* The scan requested not to visit these blocks.
*
* Subsequent snapshots (and clones) can reference our
* blocks, or blocks with even higher birth times.
* Therefore we do not need to visit them either,
* so we do not add them to the work queue.
*
* Note that checking for cur_min_txg >= cur_max_txg
* is not sufficient, because in that case we may need to
* visit subsequent snapshots. This happens when min_txg > 0,
* which raises cur_min_txg. In this case we will visit
* this dataset but skip all of its blocks, because the
* rootbp's birth time is < cur_min_txg. Then we will
* add the next snapshots/clones to the work queue.
*/
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanning dataset %llu (%s) is unnecessary because "
"cur_min_txg (%llu) >= max_txg (%llu)",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_max_txg);
kmem_free(dsname, MAXNAMELEN);
goto out;
}
- if (dmu_objset_from_ds(ds, &os))
- goto out;
-
/*
- * Only the ZIL in the head (non-snapshot) is valid. Even though
+ * Only the ZIL in the head (non-snapshot) is valid. Even though
* snapshots can have ZIL block pointers (which may be the same
- * BP as in the head), they must be ignored. So we traverse the
- * ZIL here, rather than in scan_recurse(), because the regular
- * snapshot block-sharing rules don't apply to it.
+ * BP as in the head), they must be ignored. In addition, $ORIGIN
+ * doesn't have a objset (i.e. its ds_bp is a hole) so we don't
+ * need to look for a ZIL in it either. So we traverse the ZIL here,
+ * rather than in scan_recurse(), because the regular snapshot
+ * block-sharing rules don't apply to it.
*/
- if (!ds->ds_is_snapshot)
+ if (!dsl_dataset_is_snapshot(ds) &&
+ ds->ds_dir != dp->dp_origin_snap->ds_dir) {
+ objset_t *os;
+ if (dmu_objset_from_ds(ds, &os) != 0) {
+ goto out;
+ }
dsl_scan_zil(dp, &os->os_zil_header);
+ }
/*
* Iterate over the bps in this ds.
*/
dmu_buf_will_dirty(ds->ds_dbuf, tx);
rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG);
dsl_scan_visit_rootbp(scn, ds, &dsl_dataset_phys(ds)->ds_bp, tx);
rrw_exit(&ds->ds_bp_rwlock, FTAG);
char *dsname = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN, KM_SLEEP);
dsl_dataset_name(ds, dsname);
zfs_dbgmsg("scanned dataset %llu (%s) with min=%llu max=%llu; "
"suspending=%u",
(longlong_t)dsobj, dsname,
(longlong_t)scn->scn_phys.scn_cur_min_txg,
(longlong_t)scn->scn_phys.scn_cur_max_txg,
(int)scn->scn_suspending);
kmem_free(dsname, ZFS_MAX_DATASET_NAME_LEN);
if (scn->scn_suspending)
goto out;
/*
* We've finished this pass over this dataset.
*/
/*
* If we did not completely visit this dataset, do another pass.
*/
if (scn->scn_phys.scn_flags & DSF_VISIT_DS_AGAIN) {
zfs_dbgmsg("incomplete pass; visiting again");
scn->scn_phys.scn_flags &= ~DSF_VISIT_DS_AGAIN;
scan_ds_queue_insert(scn, ds->ds_object,
scn->scn_phys.scn_cur_max_txg);
goto out;
}
/*
* Add descendant datasets to work queue.
*/
if (dsl_dataset_phys(ds)->ds_next_snap_obj != 0) {
scan_ds_queue_insert(scn,
dsl_dataset_phys(ds)->ds_next_snap_obj,
dsl_dataset_phys(ds)->ds_creation_txg);
}
if (dsl_dataset_phys(ds)->ds_num_children > 1) {
boolean_t usenext = B_FALSE;
if (dsl_dataset_phys(ds)->ds_next_clones_obj != 0) {
uint64_t count;
/*
* A bug in a previous version of the code could
* cause upgrade_clones_cb() to not set
* ds_next_snap_obj when it should, leading to a
* missing entry. Therefore we can only use the
* next_clones_obj when its count is correct.
*/
int err = zap_count(dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj, &count);
if (err == 0 &&
count == dsl_dataset_phys(ds)->ds_num_children - 1)
usenext = B_TRUE;
}
if (usenext) {
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, dp->dp_meta_objset,
dsl_dataset_phys(ds)->ds_next_clones_obj);
zap_cursor_retrieve(&zc, &za) == 0;
(void) zap_cursor_advance(&zc)) {
scan_ds_queue_insert(scn,
zfs_strtonum(za.za_name, NULL),
dsl_dataset_phys(ds)->ds_creation_txg);
}
zap_cursor_fini(&zc);
} else {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_clones_cb, &ds->ds_object,
DS_FIND_CHILDREN));
}
}
out:
dsl_dataset_rele(ds, FTAG);
}
/* ARGSUSED */
static int
enqueue_cb(dsl_pool_t *dp, dsl_dataset_t *hds, void *arg)
{
dsl_dataset_t *ds;
int err;
dsl_scan_t *scn = dp->dp_scan;
err = dsl_dataset_hold_obj(dp, hds->ds_object, FTAG, &ds);
if (err)
return (err);
while (dsl_dataset_phys(ds)->ds_prev_snap_obj != 0) {
dsl_dataset_t *prev;
err = dsl_dataset_hold_obj(dp,
dsl_dataset_phys(ds)->ds_prev_snap_obj, FTAG, &prev);
if (err) {
dsl_dataset_rele(ds, FTAG);
return (err);
}
/*
* If this is a clone, we don't need to worry about it for now.
*/
if (dsl_dataset_phys(prev)->ds_next_snap_obj != ds->ds_object) {
dsl_dataset_rele(ds, FTAG);
dsl_dataset_rele(prev, FTAG);
return (0);
}
dsl_dataset_rele(ds, FTAG);
ds = prev;
}
scan_ds_queue_insert(scn, ds->ds_object,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
dsl_dataset_rele(ds, FTAG);
return (0);
}
/* ARGSUSED */
void
dsl_scan_ddt_entry(dsl_scan_t *scn, enum zio_checksum checksum,
ddt_entry_t *dde, dmu_tx_t *tx)
{
const ddt_key_t *ddk = &dde->dde_key;
ddt_phys_t *ddp = dde->dde_phys;
blkptr_t bp;
zbookmark_phys_t zb = { 0 };
int p;
if (!dsl_scan_is_running(scn))
return;
for (p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 ||
ddp->ddp_phys_birth > scn->scn_phys.scn_max_txg)
continue;
ddt_bp_create(checksum, ddk, ddp, &bp);
scn->scn_visited_this_txg++;
scan_funcs[scn->scn_phys.scn_func](scn->scn_dp, &bp, &zb);
}
}
/*
* Scrub/dedup interaction.
*
* If there are N references to a deduped block, we don't want to scrub it
* N times -- ideally, we should scrub it exactly once.
*
* We leverage the fact that the dde's replication class (enum ddt_class)
* is ordered from highest replication class (DDT_CLASS_DITTO) to lowest
* (DDT_CLASS_UNIQUE) so that we may walk the DDT in that order.
*
* To prevent excess scrubbing, the scrub begins by walking the DDT
* to find all blocks with refcnt > 1, and scrubs each of these once.
* Since there are two replication classes which contain blocks with
* refcnt > 1, we scrub the highest replication class (DDT_CLASS_DITTO) first.
* Finally the top-down scrub begins, only visiting blocks with refcnt == 1.
*
* There would be nothing more to say if a block's refcnt couldn't change
* during a scrub, but of course it can so we must account for changes
* in a block's replication class.
*
* Here's an example of what can occur:
*
* If a block has refcnt > 1 during the DDT scrub phase, but has refcnt == 1
* when visited during the top-down scrub phase, it will be scrubbed twice.
* This negates our scrub optimization, but is otherwise harmless.
*
* If a block has refcnt == 1 during the DDT scrub phase, but has refcnt > 1
* on each visit during the top-down scrub phase, it will never be scrubbed.
* To catch this, ddt_sync_entry() notifies the scrub code whenever a block's
* reference class transitions to a higher level (i.e DDT_CLASS_UNIQUE to
* DDT_CLASS_DUPLICATE); if it transitions from refcnt == 1 to refcnt > 1
* while a scrub is in progress, it scrubs the block right then.
*/
static void
dsl_scan_ddt(dsl_scan_t *scn, dmu_tx_t *tx)
{
ddt_bookmark_t *ddb = &scn->scn_phys.scn_ddt_bookmark;
ddt_entry_t dde;
int error;
uint64_t n = 0;
bzero(&dde, sizeof (ddt_entry_t));
while ((error = ddt_walk(scn->scn_dp->dp_spa, ddb, &dde)) == 0) {
ddt_t *ddt;
if (ddb->ddb_class > scn->scn_phys.scn_ddt_class_max)
break;
dprintf("visiting ddb=%llu/%llu/%llu/%llx\n",
(longlong_t)ddb->ddb_class,
(longlong_t)ddb->ddb_type,
(longlong_t)ddb->ddb_checksum,
(longlong_t)ddb->ddb_cursor);
/* There should be no pending changes to the dedup table */
ddt = scn->scn_dp->dp_spa->spa_ddt[ddb->ddb_checksum];
ASSERT(avl_first(&ddt->ddt_tree) == NULL);
dsl_scan_ddt_entry(scn, ddb->ddb_checksum, &dde, tx);
n++;
if (dsl_scan_check_suspend(scn, NULL))
break;
}
zfs_dbgmsg("scanned %llu ddt entries with class_max = %u; "
"suspending=%u", (longlong_t)n,
(int)scn->scn_phys.scn_ddt_class_max, (int)scn->scn_suspending);
ASSERT(error == 0 || error == ENOENT);
ASSERT(error != ENOENT ||
ddb->ddb_class > scn->scn_phys.scn_ddt_class_max);
}
static uint64_t
dsl_scan_ds_maxtxg(dsl_dataset_t *ds)
{
uint64_t smt = ds->ds_dir->dd_pool->dp_scan->scn_phys.scn_max_txg;
if (ds->ds_is_snapshot)
return (MIN(smt, dsl_dataset_phys(ds)->ds_creation_txg));
return (smt);
}
static void
dsl_scan_visit(dsl_scan_t *scn, dmu_tx_t *tx)
{
scan_ds_t *sds;
dsl_pool_t *dp = scn->scn_dp;
if (scn->scn_phys.scn_ddt_bookmark.ddb_class <=
scn->scn_phys.scn_ddt_class_max) {
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_ddt(scn, tx);
if (scn->scn_suspending)
return;
}
if (scn->scn_phys.scn_bookmark.zb_objset == DMU_META_OBJSET) {
/* First do the MOS & ORIGIN */
scn->scn_phys.scn_cur_min_txg = scn->scn_phys.scn_min_txg;
scn->scn_phys.scn_cur_max_txg = scn->scn_phys.scn_max_txg;
dsl_scan_visit_rootbp(scn, NULL,
&dp->dp_meta_rootbp, tx);
spa_set_rootblkptr(dp->dp_spa, &dp->dp_meta_rootbp);
if (scn->scn_suspending)
return;
if (spa_version(dp->dp_spa) < SPA_VERSION_DSL_SCRUB) {
VERIFY0(dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
enqueue_cb, NULL, DS_FIND_CHILDREN));
} else {
dsl_scan_visitds(scn,
dp->dp_origin_snap->ds_object, tx);
}
ASSERT(!scn->scn_suspending);
} else if (scn->scn_phys.scn_bookmark.zb_objset !=
ZB_DESTROYED_OBJSET) {
uint64_t dsobj = scn->scn_phys.scn_bookmark.zb_objset;
/*
* If we were suspended, continue from here. Note if the
* ds we were suspended on was deleted, the zb_objset may
* be -1, so we will skip this and find a new objset
* below.
*/
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/*
* In case we suspended right at the end of the ds, zero the
* bookmark so we don't think that we're still trying to resume.
*/
bzero(&scn->scn_phys.scn_bookmark, sizeof (zbookmark_phys_t));
/*
* Keep pulling things out of the dataset avl queue. Updates to the
* persistent zap-object-as-queue happen only at checkpoints.
*/
while ((sds = avl_first(&scn->scn_queue)) != NULL) {
dsl_dataset_t *ds;
uint64_t dsobj = sds->sds_dsobj;
uint64_t txg = sds->sds_txg;
/* dequeue and free the ds from the queue */
scan_ds_queue_remove(scn, dsobj);
sds = NULL;
/* set up min / max txg */
VERIFY3U(0, ==, dsl_dataset_hold_obj(dp, dsobj, FTAG, &ds));
if (txg != 0) {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg, txg);
} else {
scn->scn_phys.scn_cur_min_txg =
MAX(scn->scn_phys.scn_min_txg,
dsl_dataset_phys(ds)->ds_prev_snap_txg);
}
scn->scn_phys.scn_cur_max_txg = dsl_scan_ds_maxtxg(ds);
dsl_dataset_rele(ds, FTAG);
dsl_scan_visitds(scn, dsobj, tx);
if (scn->scn_suspending)
return;
}
/* No more objsets to fetch, we're done */
scn->scn_phys.scn_bookmark.zb_objset = ZB_DESTROYED_OBJSET;
ASSERT0(scn->scn_suspending);
}
static uint64_t
dsl_scan_count_leaves(vdev_t *vd)
{
uint64_t i, leaves = 0;
/* we only count leaves that belong to the main pool and are readable */
if (vd->vdev_islog || vd->vdev_isspare ||
vd->vdev_isl2cache || !vdev_readable(vd))
return (0);
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (i = 0; i < vd->vdev_children; i++) {
leaves += dsl_scan_count_leaves(vd->vdev_child[i]);
}
return (leaves);
}
static void
scan_io_queues_update_zio_stats(dsl_scan_io_queue_t *q, const blkptr_t *bp)
{
int i;
uint64_t cur_size = 0;
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
cur_size += DVA_GET_ASIZE(&bp->blk_dva[i]);
}
q->q_total_zio_size_this_txg += cur_size;
q->q_zios_this_txg++;
}
static void
scan_io_queues_update_seg_stats(dsl_scan_io_queue_t *q, uint64_t start,
uint64_t end)
{
q->q_total_seg_size_this_txg += end - start;
q->q_segs_this_txg++;
}
static boolean_t
scan_io_queue_check_suspend(dsl_scan_t *scn)
{
/* See comment in dsl_scan_check_suspend() */
uint64_t curr_time_ns = gethrtime();
uint64_t scan_time_ns = curr_time_ns - scn->scn_sync_start_time;
uint64_t sync_time_ns = curr_time_ns -
scn->scn_dp->dp_spa->spa_sync_starttime;
int dirty_pct = scn->scn_dp->dp_dirty_total * 100 / zfs_dirty_data_max;
int mintime = (scn->scn_phys.scn_func == POOL_SCAN_RESILVER) ?
zfs_resilver_min_time_ms : zfs_scrub_min_time_ms;
return ((NSEC2MSEC(scan_time_ns) > mintime &&
(dirty_pct >= zfs_vdev_async_write_active_min_dirty_percent ||
txg_sync_waiting(scn->scn_dp) ||
NSEC2SEC(sync_time_ns) >= zfs_txg_timeout)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
/*
* Given a list of scan_io_t's in io_list, this issues the I/Os out to
* disk. This consumes the io_list and frees the scan_io_t's. This is
* called when emptying queues, either when we're up against the memory
* limit or when we have finished scanning. Returns B_TRUE if we stopped
* processing the list before we finished. Any sios that were not issued
* will remain in the io_list.
*/
static boolean_t
scan_io_queue_issue(dsl_scan_io_queue_t *queue, list_t *io_list)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
int64_t bytes_issued = 0;
boolean_t suspended = B_FALSE;
while ((sio = list_head(io_list)) != NULL) {
blkptr_t bp;
if (scan_io_queue_check_suspend(scn)) {
suspended = B_TRUE;
break;
}
sio2bp(sio, &bp, queue->q_vd->vdev_id);
bytes_issued += sio->sio_asize;
scan_exec_io(scn->scn_dp, &bp, sio->sio_flags,
&sio->sio_zb, queue);
(void) list_remove_head(io_list);
scan_io_queues_update_zio_stats(queue, &bp);
kmem_cache_free(sio_cache, sio);
}
atomic_add_64(&scn->scn_bytes_pending, -bytes_issued);
return (suspended);
}
/*
* This function removes sios from an IO queue which reside within a given
* range_seg_t and inserts them (in offset order) into a list. Note that
* we only ever return a maximum of 32 sios at once. If there are more sios
* to process within this segment that did not make it onto the list we
* return B_TRUE and otherwise B_FALSE.
*/
static boolean_t
scan_io_queue_gather(dsl_scan_io_queue_t *queue, range_seg_t *rs, list_t *list)
{
scan_io_t srch_sio, *sio, *next_sio;
avl_index_t idx;
uint_t num_sios = 0;
int64_t bytes_issued = 0;
ASSERT(rs != NULL);
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
srch_sio.sio_offset = rs->rs_start;
/*
* The exact start of the extent might not contain any matching zios,
* so if that's the case, examine the next one in the tree.
*/
sio = avl_find(&queue->q_sios_by_addr, &srch_sio, &idx);
if (sio == NULL)
sio = avl_nearest(&queue->q_sios_by_addr, idx, AVL_AFTER);
while (sio != NULL && sio->sio_offset < rs->rs_end && num_sios <= 32) {
ASSERT3U(sio->sio_offset, >=, rs->rs_start);
ASSERT3U(sio->sio_offset + sio->sio_asize, <=, rs->rs_end);
next_sio = AVL_NEXT(&queue->q_sios_by_addr, sio);
avl_remove(&queue->q_sios_by_addr, sio);
bytes_issued += sio->sio_asize;
num_sios++;
list_insert_tail(list, sio);
sio = next_sio;
}
/*
* We limit the number of sios we process at once to 32 to avoid
* biting off more than we can chew. If we didn't take everything
* in the segment we update it to reflect the work we were able to
* complete. Otherwise, we remove it from the range tree entirely.
*/
if (sio != NULL && sio->sio_offset < rs->rs_end) {
range_tree_adjust_fill(queue->q_exts_by_addr, rs,
-bytes_issued);
range_tree_resize_segment(queue->q_exts_by_addr, rs,
sio->sio_offset, rs->rs_end - sio->sio_offset);
return (B_TRUE);
} else {
range_tree_remove(queue->q_exts_by_addr, rs->rs_start,
rs->rs_end - rs->rs_start);
return (B_FALSE);
}
}
/*
* This is called from the queue emptying thread and selects the next
* extent from which we are to issue I/Os. The behavior of this function
* depends on the state of the scan, the current memory consumption and
* whether or not we are performing a scan shutdown.
* 1) We select extents in an elevator algorithm (LBA-order) if the scan
* needs to perform a checkpoint
* 2) We select the largest available extent if we are up against the
* memory limit.
* 3) Otherwise we don't select any extents.
*/
static range_seg_t *
scan_io_queue_fetch_ext(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
ASSERT(scn->scn_is_sorted);
/* handle tunable overrides */
if (scn->scn_checkpointing || scn->scn_clearing) {
if (zfs_scan_issue_strategy == 1) {
return (range_tree_first(queue->q_exts_by_addr));
} else if (zfs_scan_issue_strategy == 2) {
return (avl_first(&queue->q_exts_by_size));
}
}
/*
* During normal clearing, we want to issue our largest segments
* first, keeping IO as sequential as possible, and leaving the
* smaller extents for later with the hope that they might eventually
* grow to larger sequential segments. However, when the scan is
* checkpointing, no new extents will be added to the sorting queue,
* so the way we are sorted now is as good as it will ever get.
* In this case, we instead switch to issuing extents in LBA order.
*/
if (scn->scn_checkpointing) {
return (range_tree_first(queue->q_exts_by_addr));
} else if (scn->scn_clearing) {
return (avl_first(&queue->q_exts_by_size));
} else {
return (NULL);
}
}
static void
scan_io_queues_run_one(void *arg)
{
dsl_scan_io_queue_t *queue = arg;
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
boolean_t suspended = B_FALSE;
range_seg_t *rs = NULL;
scan_io_t *sio = NULL;
list_t sio_list;
uint64_t bytes_per_leaf = zfs_scan_vdev_limit;
uint64_t nr_leaves = dsl_scan_count_leaves(queue->q_vd);
ASSERT(queue->q_scn->scn_is_sorted);
list_create(&sio_list, sizeof (scan_io_t),
offsetof(scan_io_t, sio_nodes.sio_list_node));
mutex_enter(q_lock);
/* calculate maximum in-flight bytes for this txg (min 1MB) */
queue->q_maxinflight_bytes =
MAX(nr_leaves * bytes_per_leaf, 1ULL << 20);
/* reset per-queue scan statistics for this txg */
queue->q_total_seg_size_this_txg = 0;
queue->q_segs_this_txg = 0;
queue->q_total_zio_size_this_txg = 0;
queue->q_zios_this_txg = 0;
/* loop until we run out of time or sios */
while ((rs = scan_io_queue_fetch_ext(queue)) != NULL) {
uint64_t seg_start = 0, seg_end = 0;
boolean_t more_left = B_TRUE;
ASSERT(list_is_empty(&sio_list));
/* loop while we still have sios left to process in this rs */
while (more_left) {
scan_io_t *first_sio, *last_sio;
/*
* We have selected which extent needs to be
* processed next. Gather up the corresponding sios.
*/
more_left = scan_io_queue_gather(queue, rs, &sio_list);
ASSERT(!list_is_empty(&sio_list));
first_sio = list_head(&sio_list);
last_sio = list_tail(&sio_list);
seg_end = last_sio->sio_offset + last_sio->sio_asize;
if (seg_start == 0)
seg_start = first_sio->sio_offset;
/*
* Issuing sios can take a long time so drop the
* queue lock. The sio queue won't be updated by
* other threads since we're in syncing context so
* we can be sure that our trees will remain exactly
* as we left them.
*/
mutex_exit(q_lock);
suspended = scan_io_queue_issue(queue, &sio_list);
mutex_enter(q_lock);
if (suspended)
break;
}
/* update statistics for debugging purposes */
scan_io_queues_update_seg_stats(queue, seg_start, seg_end);
if (suspended)
break;
}
/*
* If we were suspended in the middle of processing,
* requeue any unfinished sios and exit.
*/
while ((sio = list_head(&sio_list)) != NULL) {
list_remove(&sio_list, sio);
scan_io_queue_insert_impl(queue, sio);
}
mutex_exit(q_lock);
list_destroy(&sio_list);
}
/*
* Performs an emptying run on all scan queues in the pool. This just
* punches out one thread per top-level vdev, each of which processes
* only that vdev's scan queue. We can parallelize the I/O here because
* we know that each queue's I/Os only affect its own top-level vdev.
*
* This function waits for the queue runs to complete, and must be
* called from dsl_scan_sync (or in general, syncing context).
*/
static void
scan_io_queues_run(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
ASSERT(scn->scn_is_sorted);
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (scn->scn_bytes_pending == 0)
return;
if (scn->scn_taskq == NULL) {
int nthreads = spa->spa_root_vdev->vdev_children;
/*
* We need to make this taskq *always* execute as many
* threads in parallel as we have top-level vdevs and no
* less, otherwise strange serialization of the calls to
* scan_io_queues_run_one can occur during spa_sync runs
* and that significantly impacts performance.
*/
scn->scn_taskq = taskq_create("dsl_scan_iss", nthreads,
minclsyspri, nthreads, nthreads, TASKQ_PREPOPULATE);
}
for (uint64_t i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
mutex_enter(&vd->vdev_scan_io_queue_lock);
if (vd->vdev_scan_io_queue != NULL) {
VERIFY(taskq_dispatch(scn->scn_taskq,
scan_io_queues_run_one, vd->vdev_scan_io_queue,
TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* Wait for the queues to finish issuing their IOs for this run
* before we return. There may still be IOs in flight at this
* point.
*/
taskq_wait(scn->scn_taskq);
}
static boolean_t
-dsl_scan_free_should_suspend(dsl_scan_t *scn)
+dsl_scan_async_block_should_pause(dsl_scan_t *scn)
{
uint64_t elapsed_nanosecs;
if (zfs_recover)
return (B_FALSE);
- if (scn->scn_visited_this_txg >= zfs_free_max_blocks)
+ if (scn->scn_visited_this_txg >= zfs_async_block_max_blocks)
return (B_TRUE);
elapsed_nanosecs = gethrtime() - scn->scn_sync_start_time;
return (elapsed_nanosecs / NANOSEC > zfs_txg_timeout ||
- (NSEC2MSEC(elapsed_nanosecs) > zfs_free_min_time_ms &&
+ (NSEC2MSEC(elapsed_nanosecs) > scn->scn_async_block_min_time_ms &&
txg_sync_waiting(scn->scn_dp)) ||
spa_shutting_down(scn->scn_dp->dp_spa));
}
static int
dsl_scan_free_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
dsl_scan_t *scn = arg;
if (!scn->scn_is_bptree ||
(BP_GET_LEVEL(bp) == 0 && BP_GET_TYPE(bp) != DMU_OT_OBJSET)) {
- if (dsl_scan_free_should_suspend(scn))
+ if (dsl_scan_async_block_should_pause(scn))
return (SET_ERROR(ERESTART));
}
zio_nowait(zio_free_sync(scn->scn_zio_root, scn->scn_dp->dp_spa,
dmu_tx_get_txg(tx), bp, 0));
dsl_dir_diduse_space(tx->tx_pool->dp_free_dir, DD_USED_HEAD,
-bp_get_dsize_sync(scn->scn_dp->dp_spa, bp),
-BP_GET_PSIZE(bp), -BP_GET_UCSIZE(bp), tx);
scn->scn_visited_this_txg++;
return (0);
}
static void
dsl_scan_update_stats(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t i;
uint64_t seg_size_total = 0, zio_size_total = 0;
uint64_t seg_count_total = 0, zio_count_total = 0;
for (i = 0; i < spa->spa_root_vdev->vdev_children; i++) {
vdev_t *vd = spa->spa_root_vdev->vdev_child[i];
dsl_scan_io_queue_t *queue = vd->vdev_scan_io_queue;
if (queue == NULL)
continue;
seg_size_total += queue->q_total_seg_size_this_txg;
zio_size_total += queue->q_total_zio_size_this_txg;
seg_count_total += queue->q_segs_this_txg;
zio_count_total += queue->q_zios_this_txg;
}
if (seg_count_total == 0 || zio_count_total == 0) {
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_zios_this_txg = 0;
return;
}
scn->scn_avg_seg_size_this_txg = seg_size_total / seg_count_total;
scn->scn_avg_zio_size_this_txg = zio_size_total / zio_count_total;
scn->scn_segs_this_txg = seg_count_total;
scn->scn_zios_this_txg = zio_count_total;
}
+static int
+dsl_scan_obsolete_block_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
+{
+ dsl_scan_t *scn = arg;
+ const dva_t *dva = &bp->blk_dva[0];
+
+ if (dsl_scan_async_block_should_pause(scn))
+ return (SET_ERROR(ERESTART));
+
+ spa_vdev_indirect_mark_obsolete(scn->scn_dp->dp_spa,
+ DVA_GET_VDEV(dva), DVA_GET_OFFSET(dva),
+ DVA_GET_ASIZE(dva), tx);
+ scn->scn_visited_this_txg++;
+ return (0);
+}
+
boolean_t
dsl_scan_active(dsl_scan_t *scn)
{
spa_t *spa = scn->scn_dp->dp_spa;
uint64_t used = 0, comp, uncomp;
if (spa->spa_load_state != SPA_LOAD_NONE)
return (B_FALSE);
if (spa_shutting_down(spa))
return (B_FALSE);
if ((dsl_scan_is_running(scn) && !dsl_scan_is_paused_scrub(scn)) ||
(scn->scn_async_destroying && !scn->scn_async_stalled))
return (B_TRUE);
if (spa_version(scn->scn_dp->dp_spa) >= SPA_VERSION_DEADLISTS) {
(void) bpobj_space(&scn->scn_dp->dp_free_bpobj,
&used, &comp, &uncomp);
}
return (used != 0);
}
static boolean_t
dsl_scan_need_resilver(spa_t *spa, const dva_t *dva, size_t psize,
uint64_t phys_birth)
{
vdev_t *vd;
if (DVA_GET_GANG(dva)) {
/*
* Gang members may be spread across multiple
* vdevs, so the best estimate we have is the
* scrub range, which has already been checked.
* XXX -- it would be better to change our
* allocation policy to ensure that all
* gang members reside on the same vdev.
*/
return (B_TRUE);
}
vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
/*
* Check if the txg falls within the range which must be
* resilvered. DVAs outside this range can always be skipped.
*/
if (!vdev_dtl_contains(vd, DTL_PARTIAL, phys_birth, 1))
return (B_FALSE);
/*
* Check if the top-level vdev must resilver this offset.
* When the offset does not intersect with a dirty leaf DTL
* then it may be possible to skip the resilver IO. The psize
* is provided instead of asize to simplify the check for RAIDZ.
*/
if (!vdev_dtl_need_resilver(vd, DVA_GET_OFFSET(dva), psize))
return (B_FALSE);
return (B_TRUE);
}
/*
* This is the primary entry point for scans that is called from syncing
* context. Scans must happen entirely during syncing context so that we
* cna guarantee that blocks we are currently scanning will not change out
* from under us. While a scan is active, this function controls how quickly
* transaction groups proceed, instead of the normal handling provided by
* txg_sync_thread().
*/
void
dsl_scan_sync(dsl_pool_t *dp, dmu_tx_t *tx)
{
int err = 0;
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
state_sync_type_t sync_type = SYNC_OPTIONAL;
/*
* Check for scn_restart_txg before checking spa_load_state, so
* that we can restart an old-style scan while the pool is being
* imported (see dsl_scan_init).
*/
if (dsl_scan_restarting(scn, tx)) {
pool_scan_func_t func = POOL_SCAN_SCRUB;
dsl_scan_done(scn, B_FALSE, tx);
if (vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL))
func = POOL_SCAN_RESILVER;
zfs_dbgmsg("restarting scan func=%u txg=%llu",
func, (longlong_t)tx->tx_txg);
dsl_scan_setup_sync(&func, tx);
}
/*
* Only process scans in sync pass 1.
*/
if (spa_sync_pass(spa) > 1)
return;
/*
* If the spa is shutting down, then stop scanning. This will
* ensure that the scan does not dirty any new data during the
* shutdown phase.
*/
if (spa_shutting_down(spa))
return;
/*
* If the scan is inactive due to a stalled async destroy, try again.
*/
if (!scn->scn_async_stalled && !dsl_scan_active(scn))
return;
/* reset scan statistics */
scn->scn_visited_this_txg = 0;
scn->scn_holes_this_txg = 0;
scn->scn_lt_min_this_txg = 0;
scn->scn_gt_max_this_txg = 0;
scn->scn_ddt_contained_this_txg = 0;
scn->scn_objsets_visited_this_txg = 0;
scn->scn_avg_seg_size_this_txg = 0;
scn->scn_segs_this_txg = 0;
scn->scn_avg_zio_size_this_txg = 0;
scn->scn_zios_this_txg = 0;
scn->scn_suspending = B_FALSE;
scn->scn_sync_start_time = gethrtime();
spa->spa_scrub_active = B_TRUE;
/*
* First process the async destroys. If we suspend, don't do
* any scrubbing or resilvering. This ensures that there are no
* async destroys while we are scanning, so the scan code doesn't
* have to worry about traversing it. It is also faster to free the
* blocks than to scrub them.
*/
if (zfs_free_bpobj_enabled &&
spa_version(spa) >= SPA_VERSION_DEADLISTS) {
scn->scn_is_bptree = B_FALSE;
+ scn->scn_async_block_min_time_ms = zfs_free_min_time_ms;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bpobj_iterate(&dp->dp_free_bpobj,
dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err != 0 && err != ERESTART)
zfs_panic_recover("error %u from bpobj_iterate()", err);
}
if (err == 0 && spa_feature_is_active(spa, SPA_FEATURE_ASYNC_DESTROY)) {
ASSERT(scn->scn_async_destroying);
scn->scn_is_bptree = B_TRUE;
scn->scn_zio_root = zio_root(spa, NULL,
NULL, ZIO_FLAG_MUSTSUCCEED);
err = bptree_iterate(dp->dp_meta_objset,
dp->dp_bptree_obj, B_TRUE, dsl_scan_free_block_cb, scn, tx);
VERIFY0(zio_wait(scn->scn_zio_root));
scn->scn_zio_root = NULL;
if (err == EIO || err == ECKSUM) {
err = 0;
} else if (err != 0 && err != ERESTART) {
zfs_panic_recover("error %u from "
"traverse_dataset_destroyed()", err);
}
if (bptree_is_empty(dp->dp_meta_objset, dp->dp_bptree_obj)) {
/* finished; deactivate async destroy feature */
spa_feature_decr(spa, SPA_FEATURE_ASYNC_DESTROY, tx);
ASSERT(!spa_feature_is_active(spa,
SPA_FEATURE_ASYNC_DESTROY));
VERIFY0(zap_remove(dp->dp_meta_objset,
DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_BPTREE_OBJ, tx));
VERIFY0(bptree_free(dp->dp_meta_objset,
dp->dp_bptree_obj, tx));
dp->dp_bptree_obj = 0;
scn->scn_async_destroying = B_FALSE;
scn->scn_async_stalled = B_FALSE;
} else {
/*
* If we didn't make progress, mark the async
* destroy as stalled, so that we will not initiate
* a spa_sync() on its behalf. Note that we only
* check this if we are not finished, because if the
* bptree had no blocks for us to visit, we can
* finish without "making progress".
*/
scn->scn_async_stalled =
(scn->scn_visited_this_txg == 0);
}
}
if (scn->scn_visited_this_txg) {
zfs_dbgmsg("freed %llu blocks in %llums from "
"free_bpobj/bptree txg %llu; err=%u",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)
NSEC2MSEC(gethrtime() - scn->scn_sync_start_time),
(longlong_t)tx->tx_txg, err);
scn->scn_visited_this_txg = 0;
/*
* Write out changes to the DDT that may be required as a
* result of the blocks freed. This ensures that the DDT
* is clean when a scrub/resilver runs.
*/
ddt_sync(spa, tx->tx_txg);
}
if (err != 0)
return;
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying &&
zfs_free_leak_on_eio &&
(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes != 0 ||
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes != 0)) {
/*
* We have finished background destroying, but there is still
* some space left in the dp_free_dir. Transfer this leaked
* space to the dp_leak_dir.
*/
if (dp->dp_leak_dir == NULL) {
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
(void) dsl_dir_create_sync(dp, dp->dp_root_dir,
LEAK_DIR_NAME, tx);
VERIFY0(dsl_pool_open_special_dir(dp,
LEAK_DIR_NAME, &dp->dp_leak_dir));
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
dsl_dir_diduse_space(dp->dp_leak_dir, DD_USED_HEAD,
dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
dsl_dir_diduse_space(dp->dp_free_dir, DD_USED_HEAD,
-dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes,
-dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes, tx);
}
+
if (dp->dp_free_dir != NULL && !scn->scn_async_destroying) {
/* finished; verify that space accounting went to zero */
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_used_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_compressed_bytes);
ASSERT0(dsl_dir_phys(dp->dp_free_dir)->dd_uncompressed_bytes);
}
+ EQUIV(bpobj_is_open(&dp->dp_obsolete_bpobj),
+ 0 == zap_contains(dp->dp_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_OBSOLETE_BPOBJ));
+ if (err == 0 && bpobj_is_open(&dp->dp_obsolete_bpobj)) {
+ ASSERT(spa_feature_is_active(dp->dp_spa,
+ SPA_FEATURE_OBSOLETE_COUNTS));
+
+ scn->scn_is_bptree = B_FALSE;
+ scn->scn_async_block_min_time_ms = zfs_obsolete_min_time_ms;
+ err = bpobj_iterate(&dp->dp_obsolete_bpobj,
+ dsl_scan_obsolete_block_cb, scn, tx);
+ if (err != 0 && err != ERESTART)
+ zfs_panic_recover("error %u from bpobj_iterate()", err);
+
+ if (bpobj_is_empty(&dp->dp_obsolete_bpobj))
+ dsl_pool_destroy_obsolete_bpobj(dp, tx);
+ }
+
if (!dsl_scan_is_running(scn) || dsl_scan_is_paused_scrub(scn))
return;
/*
* Wait a few txgs after importing to begin scanning so that
* we can get the pool imported quickly.
*/
if (spa->spa_syncing_txg < spa->spa_first_txg + SCAN_IMPORT_WAIT_TXGS)
return;
/*
* It is possible to switch from unsorted to sorted at any time,
* but afterwards the scan will remain sorted unless reloaded from
* a checkpoint after a reboot.
*/
if (!zfs_scan_legacy) {
scn->scn_is_sorted = B_TRUE;
if (scn->scn_last_checkpoint == 0)
scn->scn_last_checkpoint = ddi_get_lbolt();
}
/*
* For sorted scans, determine what kind of work we will be doing
* this txg based on our memory limitations and whether or not we
* need to perform a checkpoint.
*/
if (scn->scn_is_sorted) {
/*
* If we are over our checkpoint interval, set scn_clearing
* so that we can begin checkpointing immediately. The
* checkpoint allows us to save a consistent bookmark
* representing how much data we have scrubbed so far.
* Otherwise, use the memory limit to determine if we should
* scan for metadata or start issue scrub IOs. We accumulate
* metadata until we hit our hard memory limit at which point
* we issue scrub IOs until we are at our soft memory limit.
*/
if (scn->scn_checkpointing ||
ddi_get_lbolt() - scn->scn_last_checkpoint >
SEC_TO_TICK(zfs_scan_checkpoint_intval)) {
if (!scn->scn_checkpointing)
zfs_dbgmsg("begin scan checkpoint");
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
} else {
boolean_t should_clear = dsl_scan_should_clear(scn);
if (should_clear && !scn->scn_clearing) {
zfs_dbgmsg("begin scan clearing");
scn->scn_clearing = B_TRUE;
} else if (!should_clear && scn->scn_clearing) {
zfs_dbgmsg("finish scan clearing");
scn->scn_clearing = B_FALSE;
}
}
} else {
ASSERT0(scn->scn_checkpointing);
ASSERT0(scn->scn_clearing);
}
if (!scn->scn_clearing && scn->scn_done_txg == 0) {
/* Need to scan metadata for more blocks to scrub */
dsl_scan_phys_t *scnp = &scn->scn_phys;
taskqid_t prefetch_tqid;
uint64_t bytes_per_leaf = zfs_scan_vdev_limit;
uint64_t nr_leaves = dsl_scan_count_leaves(spa->spa_root_vdev);
/*
* Recalculate the max number of in-flight bytes for pool-wide
* scanning operations (minimum 1MB). Limits for the issuing
* phase are done per top-level vdev and are handled separately.
*/
scn->scn_maxinflight_bytes =
MAX(nr_leaves * bytes_per_leaf, 1ULL << 20);
if (scnp->scn_ddt_bookmark.ddb_class <=
scnp->scn_ddt_class_max) {
ASSERT(ZB_IS_ZERO(&scnp->scn_bookmark));
zfs_dbgmsg("doing scan sync txg %llu; "
"ddt bm=%llu/%llu/%llu/%llx",
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_ddt_bookmark.ddb_class,
(longlong_t)scnp->scn_ddt_bookmark.ddb_type,
(longlong_t)scnp->scn_ddt_bookmark.ddb_checksum,
(longlong_t)scnp->scn_ddt_bookmark.ddb_cursor);
} else {
zfs_dbgmsg("doing scan sync txg %llu; "
"bm=%llu/%llu/%llu/%llu",
(longlong_t)tx->tx_txg,
(longlong_t)scnp->scn_bookmark.zb_objset,
(longlong_t)scnp->scn_bookmark.zb_object,
(longlong_t)scnp->scn_bookmark.zb_level,
(longlong_t)scnp->scn_bookmark.zb_blkid);
}
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scn->scn_prefetch_stop = B_FALSE;
prefetch_tqid = taskq_dispatch(dp->dp_sync_taskq,
dsl_scan_prefetch_thread, scn, TQ_SLEEP);
ASSERT(prefetch_tqid != TASKQID_INVALID);
dsl_pool_config_enter(dp, FTAG);
dsl_scan_visit(scn, tx);
dsl_pool_config_exit(dp, FTAG);
mutex_enter(&dp->dp_spa->spa_scrub_lock);
scn->scn_prefetch_stop = B_TRUE;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&dp->dp_spa->spa_scrub_lock);
taskq_wait_id(dp->dp_sync_taskq, prefetch_tqid);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
zfs_dbgmsg("scan visited %llu blocks in %llums "
"(%llu os's, %llu holes, %llu < mintxg, "
"%llu in ddt, %llu > maxtxg)",
(longlong_t)scn->scn_visited_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_objsets_visited_this_txg,
(longlong_t)scn->scn_holes_this_txg,
(longlong_t)scn->scn_lt_min_this_txg,
(longlong_t)scn->scn_ddt_contained_this_txg,
(longlong_t)scn->scn_gt_max_this_txg);
if (!scn->scn_suspending) {
ASSERT0(avl_numnodes(&scn->scn_queue));
scn->scn_done_txg = tx->tx_txg + 1;
if (scn->scn_is_sorted) {
scn->scn_checkpointing = B_TRUE;
scn->scn_clearing = B_TRUE;
}
zfs_dbgmsg("scan complete txg %llu",
(longlong_t)tx->tx_txg);
}
} else if (scn->scn_is_sorted && scn->scn_bytes_pending != 0) {
/* need to issue scrubbing IOs from per-vdev queues */
scn->scn_zio_root = zio_root(dp->dp_spa, NULL,
NULL, ZIO_FLAG_CANFAIL);
scan_io_queues_run(scn);
(void) zio_wait(scn->scn_zio_root);
scn->scn_zio_root = NULL;
/* calculate and dprintf the current memory usage */
(void) dsl_scan_should_clear(scn);
dsl_scan_update_stats(scn);
zfs_dbgmsg("scan issued %llu blocks (%llu segs) in %llums "
"(avg_block_size = %llu, avg_seg_size = %llu)",
(longlong_t)scn->scn_zios_this_txg,
(longlong_t)scn->scn_segs_this_txg,
(longlong_t)NSEC2MSEC(gethrtime() -
scn->scn_sync_start_time),
(longlong_t)scn->scn_avg_zio_size_this_txg,
(longlong_t)scn->scn_avg_seg_size_this_txg);
} else if (scn->scn_done_txg != 0 && scn->scn_done_txg <= tx->tx_txg) {
/* Finished with everything. Mark the scrub as complete */
zfs_dbgmsg("scan issuing complete txg %llu",
(longlong_t)tx->tx_txg);
ASSERT3U(scn->scn_done_txg, !=, 0);
ASSERT0(spa->spa_scrub_inflight);
ASSERT0(scn->scn_bytes_pending);
dsl_scan_done(scn, B_TRUE, tx);
sync_type = SYNC_MANDATORY;
}
dsl_scan_sync_state(scn, tx, sync_type);
}
static void
count_block(dsl_scan_t *scn, zfs_all_blkstats_t *zab, const blkptr_t *bp)
{
int i;
/* update the spa's stats on how many bytes we have issued */
for (i = 0; i < BP_GET_NDVAS(bp); i++) {
atomic_add_64(&scn->scn_dp->dp_spa->spa_scan_pass_issued,
DVA_GET_ASIZE(&bp->blk_dva[i]));
}
/*
* If we resume after a reboot, zab will be NULL; don't record
* incomplete stats in that case.
*/
if (zab == NULL)
return;
mutex_enter(&zab->zab_lock);
for (i = 0; i < 4; i++) {
int l = (i < 2) ? BP_GET_LEVEL(bp) : DN_MAX_LEVELS;
int t = (i & 1) ? BP_GET_TYPE(bp) : DMU_OT_TOTAL;
if (t & DMU_OT_NEWTYPE)
t = DMU_OT_OTHER;
zfs_blkstat_t *zb = &zab->zab_type[l][t];
int equal;
zb->zb_count++;
zb->zb_asize += BP_GET_ASIZE(bp);
zb->zb_lsize += BP_GET_LSIZE(bp);
zb->zb_psize += BP_GET_PSIZE(bp);
zb->zb_gangs += BP_COUNT_GANG(bp);
switch (BP_GET_NDVAS(bp)) {
case 2:
if (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1]))
zb->zb_ditto_2_of_2_samevdev++;
break;
case 3:
equal = (DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[1])) +
(DVA_GET_VDEV(&bp->blk_dva[0]) ==
DVA_GET_VDEV(&bp->blk_dva[2])) +
(DVA_GET_VDEV(&bp->blk_dva[1]) ==
DVA_GET_VDEV(&bp->blk_dva[2]));
if (equal == 1)
zb->zb_ditto_2_of_3_samevdev++;
else if (equal == 3)
zb->zb_ditto_3_of_3_samevdev++;
break;
}
}
mutex_exit(&zab->zab_lock);
}
static void
scan_io_queue_insert_impl(dsl_scan_io_queue_t *queue, scan_io_t *sio)
{
avl_index_t idx;
int64_t asize = sio->sio_asize;
dsl_scan_t *scn = queue->q_scn;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
if (avl_find(&queue->q_sios_by_addr, sio, &idx) != NULL) {
/* block is already scheduled for reading */
atomic_add_64(&scn->scn_bytes_pending, -asize);
kmem_cache_free(sio_cache, sio);
return;
}
avl_insert(&queue->q_sios_by_addr, sio, idx);
range_tree_add(queue->q_exts_by_addr, sio->sio_offset, asize);
}
/*
* Given all the info we got from our metadata scanning process, we
* construct a scan_io_t and insert it into the scan sorting queue. The
* I/O must already be suitable for us to process. This is controlled
* by dsl_scan_enqueue().
*/
static void
scan_io_queue_insert(dsl_scan_io_queue_t *queue, const blkptr_t *bp, int dva_i,
int zio_flags, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio = kmem_cache_alloc(sio_cache, KM_SLEEP);
ASSERT0(BP_IS_GANG(bp));
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
bp2sio(bp, sio, dva_i);
sio->sio_flags = zio_flags;
sio->sio_zb = *zb;
/*
* Increment the bytes pending counter now so that we can't
* get an integer underflow in case the worker processes the
* zio before we get to incrementing this counter.
*/
atomic_add_64(&scn->scn_bytes_pending, sio->sio_asize);
scan_io_queue_insert_impl(queue, sio);
}
/*
* Given a set of I/O parameters as discovered by the metadata traversal
* process, attempts to place the I/O into the sorted queues (if allowed),
* or immediately executes the I/O.
*/
static void
dsl_scan_enqueue(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb)
{
spa_t *spa = dp->dp_spa;
ASSERT(!BP_IS_EMBEDDED(bp));
/*
* Gang blocks are hard to issue sequentially, so we just issue them
* here immediately instead of queuing them.
*/
if (!dp->dp_scan->scn_is_sorted || BP_IS_GANG(bp)) {
scan_exec_io(dp, bp, zio_flags, zb, NULL);
return;
}
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
dva_t dva;
vdev_t *vdev;
dva = bp->blk_dva[i];
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&dva));
ASSERT(vdev != NULL);
mutex_enter(&vdev->vdev_scan_io_queue_lock);
if (vdev->vdev_scan_io_queue == NULL)
vdev->vdev_scan_io_queue = scan_io_queue_create(vdev);
ASSERT(dp->dp_scan != NULL);
scan_io_queue_insert(vdev->vdev_scan_io_queue, bp,
i, zio_flags, zb);
mutex_exit(&vdev->vdev_scan_io_queue_lock);
}
}
static int
dsl_scan_scrub_cb(dsl_pool_t *dp,
const blkptr_t *bp, const zbookmark_phys_t *zb)
{
dsl_scan_t *scn = dp->dp_scan;
spa_t *spa = dp->dp_spa;
uint64_t phys_birth = BP_PHYSICAL_BIRTH(bp);
size_t psize = BP_GET_PSIZE(bp);
boolean_t needs_io = B_FALSE;
int zio_flags = ZIO_FLAG_SCAN_THREAD | ZIO_FLAG_RAW | ZIO_FLAG_CANFAIL;
if (phys_birth <= scn->scn_phys.scn_min_txg ||
phys_birth >= scn->scn_phys.scn_max_txg)
return (0);
if (BP_IS_EMBEDDED(bp)) {
count_block(scn, dp->dp_blkstats, bp);
return (0);
}
ASSERT(DSL_SCAN_IS_SCRUB_RESILVER(scn));
if (scn->scn_phys.scn_func == POOL_SCAN_SCRUB) {
zio_flags |= ZIO_FLAG_SCRUB;
needs_io = B_TRUE;
} else {
ASSERT3U(scn->scn_phys.scn_func, ==, POOL_SCAN_RESILVER);
zio_flags |= ZIO_FLAG_RESILVER;
needs_io = B_FALSE;
}
/* If it's an intent log block, failure is expected. */
if (zb->zb_level == ZB_ZIL_LEVEL)
zio_flags |= ZIO_FLAG_SPECULATIVE;
for (int d = 0; d < BP_GET_NDVAS(bp); d++) {
const dva_t *dva = &bp->blk_dva[d];
/*
* Keep track of how much data we've examined so that
* zpool(1M) status can make useful progress reports.
*/
scn->scn_phys.scn_examined += DVA_GET_ASIZE(dva);
spa->spa_scan_pass_exam += DVA_GET_ASIZE(dva);
/* if it's a resilver, this may not be in the target range */
if (!needs_io)
needs_io = dsl_scan_need_resilver(spa, dva, psize,
phys_birth);
}
if (needs_io && !zfs_no_scrub_io) {
dsl_scan_enqueue(dp, bp, zio_flags, zb);
} else {
count_block(scn, dp->dp_blkstats, bp);
}
/* do not relocate this block */
return (0);
}
static void
dsl_scan_scrub_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
dsl_scan_io_queue_t *queue = zio->io_private;
abd_free(zio->io_abd);
if (queue == NULL) {
mutex_enter(&spa->spa_scrub_lock);
ASSERT3U(spa->spa_scrub_inflight, >=, BP_GET_PSIZE(bp));
spa->spa_scrub_inflight -= BP_GET_PSIZE(bp);
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
} else {
mutex_enter(&queue->q_vd->vdev_scan_io_queue_lock);
ASSERT3U(queue->q_inflight_bytes, >=, BP_GET_PSIZE(bp));
queue->q_inflight_bytes -= BP_GET_PSIZE(bp);
cv_broadcast(&queue->q_zio_cv);
mutex_exit(&queue->q_vd->vdev_scan_io_queue_lock);
}
if (zio->io_error && (zio->io_error != ECKSUM ||
!(zio->io_flags & ZIO_FLAG_SPECULATIVE))) {
atomic_inc_64(&spa->spa_dsl_pool->dp_scan->scn_phys.scn_errors);
}
}
/*
* Given a scanning zio's information, executes the zio. The zio need
* not necessarily be only sortable, this function simply executes the
* zio, no matter what it is. The optional queue argument allows the
* caller to specify that they want per top level vdev IO rate limiting
* instead of the legacy global limiting.
*/
static void
scan_exec_io(dsl_pool_t *dp, const blkptr_t *bp, int zio_flags,
const zbookmark_phys_t *zb, dsl_scan_io_queue_t *queue)
{
spa_t *spa = dp->dp_spa;
dsl_scan_t *scn = dp->dp_scan;
size_t size = BP_GET_PSIZE(bp);
abd_t *data = abd_alloc_for_io(size, B_FALSE);
ASSERT3U(scn->scn_maxinflight_bytes, >, 0);
if (queue == NULL) {
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_scrub_inflight >= scn->scn_maxinflight_bytes)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_scrub_inflight += BP_GET_PSIZE(bp);
mutex_exit(&spa->spa_scrub_lock);
} else {
kmutex_t *q_lock = &queue->q_vd->vdev_scan_io_queue_lock;
mutex_enter(q_lock);
while (queue->q_inflight_bytes >= queue->q_maxinflight_bytes)
cv_wait(&queue->q_zio_cv, q_lock);
queue->q_inflight_bytes += BP_GET_PSIZE(bp);
mutex_exit(q_lock);
}
count_block(scn, dp->dp_blkstats, bp);
zio_nowait(zio_read(scn->scn_zio_root, spa, bp, data, size,
dsl_scan_scrub_done, queue, ZIO_PRIORITY_SCRUB, zio_flags, zb));
}
/*
* This is the primary extent sorting algorithm. We balance two parameters:
* 1) how many bytes of I/O are in an extent
* 2) how well the extent is filled with I/O (as a fraction of its total size)
* Since we allow extents to have gaps between their constituent I/Os, it's
* possible to have a fairly large extent that contains the same amount of
* I/O bytes than a much smaller extent, which just packs the I/O more tightly.
* The algorithm sorts based on a score calculated from the extent's size,
* the relative fill volume (in %) and a "fill weight" parameter that controls
* the split between whether we prefer larger extents or more well populated
* extents:
*
* SCORE = FILL_IN_BYTES + (FILL_IN_PERCENT * FILL_IN_BYTES * FILL_WEIGHT)
*
* Example:
* 1) assume extsz = 64 MiB
* 2) assume fill = 32 MiB (extent is half full)
* 3) assume fill_weight = 3
* 4) SCORE = 32M + (((32M * 100) / 64M) * 3 * 32M) / 100
* SCORE = 32M + (50 * 3 * 32M) / 100
* SCORE = 32M + (4800M / 100)
* SCORE = 32M + 48M
* ^ ^
* | +--- final total relative fill-based score
* +--------- final total fill-based score
* SCORE = 80M
*
* As can be seen, at fill_ratio=3, the algorithm is slightly biased towards
* extents that are more completely filled (in a 3:2 ratio) vs just larger.
* Note that as an optimization, we replace multiplication and division by
* 100 with bitshifting by 7 (which effecitvely multiplies and divides by 128).
*/
static int
ext_size_compare(const void *x, const void *y)
{
const range_seg_t *rsa = x, *rsb = y;
uint64_t sa = rsa->rs_end - rsa->rs_start,
sb = rsb->rs_end - rsb->rs_start;
uint64_t score_a, score_b;
score_a = rsa->rs_fill + ((((rsa->rs_fill << 7) / sa) *
fill_weight * rsa->rs_fill) >> 7);
score_b = rsb->rs_fill + ((((rsb->rs_fill << 7) / sb) *
fill_weight * rsb->rs_fill) >> 7);
if (score_a > score_b)
return (-1);
if (score_a == score_b) {
if (rsa->rs_start < rsb->rs_start)
return (-1);
if (rsa->rs_start == rsb->rs_start)
return (0);
return (1);
}
return (1);
}
/*
* Comparator for the q_sios_by_addr tree. Sorting is simply performed
* based on LBA-order (from lowest to highest).
*/
static int
sio_addr_compare(const void *x, const void *y)
{
const scan_io_t *a = x, *b = y;
if (a->sio_offset < b->sio_offset)
return (-1);
if (a->sio_offset == b->sio_offset)
return (0);
return (1);
}
/* IO queues are created on demand when they are needed. */
static dsl_scan_io_queue_t *
scan_io_queue_create(vdev_t *vd)
{
dsl_scan_t *scn = vd->vdev_spa->spa_dsl_pool->dp_scan;
dsl_scan_io_queue_t *q = kmem_zalloc(sizeof (*q), KM_SLEEP);
q->q_scn = scn;
q->q_vd = vd;
cv_init(&q->q_zio_cv, NULL, CV_DEFAULT, NULL);
q->q_exts_by_addr = range_tree_create_impl(&rt_avl_ops,
- &q->q_exts_by_size, ext_size_compare,
- &q->q_vd->vdev_scan_io_queue_lock, zfs_scan_max_ext_gap);
+ &q->q_exts_by_size, ext_size_compare, zfs_scan_max_ext_gap);
avl_create(&q->q_sios_by_addr, sio_addr_compare,
sizeof (scan_io_t), offsetof(scan_io_t, sio_nodes.sio_addr_node));
return (q);
}
/*
* Destroys a scan queue and all segments and scan_io_t's contained in it.
* No further execution of I/O occurs, anything pending in the queue is
* simply freed without being executed.
*/
void
dsl_scan_io_queue_destroy(dsl_scan_io_queue_t *queue)
{
dsl_scan_t *scn = queue->q_scn;
scan_io_t *sio;
void *cookie = NULL;
int64_t bytes_dequeued = 0;
ASSERT(MUTEX_HELD(&queue->q_vd->vdev_scan_io_queue_lock));
while ((sio = avl_destroy_nodes(&queue->q_sios_by_addr, &cookie)) !=
NULL) {
ASSERT(range_tree_contains(queue->q_exts_by_addr,
sio->sio_offset, sio->sio_asize));
bytes_dequeued += sio->sio_asize;
kmem_cache_free(sio_cache, sio);
}
atomic_add_64(&scn->scn_bytes_pending, -bytes_dequeued);
range_tree_vacate(queue->q_exts_by_addr, NULL, queue);
range_tree_destroy(queue->q_exts_by_addr);
avl_destroy(&queue->q_sios_by_addr);
cv_destroy(&queue->q_zio_cv);
kmem_free(queue, sizeof (*queue));
}
/*
* Properly transfers a dsl_scan_queue_t from `svd' to `tvd'. This is
* called on behalf of vdev_top_transfer when creating or destroying
* a mirror vdev due to zpool attach/detach.
*/
void
dsl_scan_io_queue_vdev_xfer(vdev_t *svd, vdev_t *tvd)
{
mutex_enter(&svd->vdev_scan_io_queue_lock);
mutex_enter(&tvd->vdev_scan_io_queue_lock);
VERIFY3P(tvd->vdev_scan_io_queue, ==, NULL);
tvd->vdev_scan_io_queue = svd->vdev_scan_io_queue;
svd->vdev_scan_io_queue = NULL;
- if (tvd->vdev_scan_io_queue != NULL) {
+ if (tvd->vdev_scan_io_queue != NULL)
tvd->vdev_scan_io_queue->q_vd = tvd;
- range_tree_set_lock(tvd->vdev_scan_io_queue->q_exts_by_addr,
- &tvd->vdev_scan_io_queue_lock);
- }
mutex_exit(&tvd->vdev_scan_io_queue_lock);
mutex_exit(&svd->vdev_scan_io_queue_lock);
}
static void
scan_io_queues_destroy(dsl_scan_t *scn)
{
vdev_t *rvd = scn->scn_dp->dp_spa->spa_root_vdev;
for (uint64_t i = 0; i < rvd->vdev_children; i++) {
vdev_t *tvd = rvd->vdev_child[i];
mutex_enter(&tvd->vdev_scan_io_queue_lock);
if (tvd->vdev_scan_io_queue != NULL)
dsl_scan_io_queue_destroy(tvd->vdev_scan_io_queue);
tvd->vdev_scan_io_queue = NULL;
mutex_exit(&tvd->vdev_scan_io_queue_lock);
}
}
static void
dsl_scan_freed_dva(spa_t *spa, const blkptr_t *bp, int dva_i)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
vdev_t *vdev;
kmutex_t *q_lock;
dsl_scan_io_queue_t *queue;
scan_io_t srch, *sio;
avl_index_t idx;
uint64_t start, size;
vdev = vdev_lookup_top(spa, DVA_GET_VDEV(&bp->blk_dva[dva_i]));
ASSERT(vdev != NULL);
q_lock = &vdev->vdev_scan_io_queue_lock;
queue = vdev->vdev_scan_io_queue;
mutex_enter(q_lock);
if (queue == NULL) {
mutex_exit(q_lock);
return;
}
bp2sio(bp, &srch, dva_i);
start = srch.sio_offset;
size = srch.sio_asize;
/*
* We can find the zio in two states:
* 1) Cold, just sitting in the queue of zio's to be issued at
* some point in the future. In this case, all we do is
* remove the zio from the q_sios_by_addr tree, decrement
* its data volume from the containing range_seg_t and
* resort the q_exts_by_size tree to reflect that the
* range_seg_t has lost some of its 'fill'. We don't shorten
* the range_seg_t - this is usually rare enough not to be
* worth the extra hassle of trying keep track of precise
* extent boundaries.
* 2) Hot, where the zio is currently in-flight in
* dsl_scan_issue_ios. In this case, we can't simply
* reach in and stop the in-flight zio's, so we instead
* block the caller. Eventually, dsl_scan_issue_ios will
* be done with issuing the zio's it gathered and will
* signal us.
*/
sio = avl_find(&queue->q_sios_by_addr, &srch, &idx);
if (sio != NULL) {
int64_t asize = sio->sio_asize;
blkptr_t tmpbp;
/* Got it while it was cold in the queue */
ASSERT3U(start, ==, sio->sio_offset);
ASSERT3U(size, ==, asize);
avl_remove(&queue->q_sios_by_addr, sio);
ASSERT(range_tree_contains(queue->q_exts_by_addr, start, size));
range_tree_remove_fill(queue->q_exts_by_addr, start, size);
/*
* We only update scn_bytes_pending in the cold path,
* otherwise it will already have been accounted for as
* part of the zio's execution.
*/
atomic_add_64(&scn->scn_bytes_pending, -asize);
/* count the block as though we issued it */
sio2bp(sio, &tmpbp, dva_i);
count_block(scn, dp->dp_blkstats, &tmpbp);
kmem_cache_free(sio_cache, sio);
}
mutex_exit(q_lock);
}
/*
* Callback invoked when a zio_free() zio is executing. This needs to be
* intercepted to prevent the zio from deallocating a particular portion
* of disk space and it then getting reallocated and written to, while we
* still have it queued up for processing.
*/
void
dsl_scan_freed(spa_t *spa, const blkptr_t *bp)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
dsl_scan_t *scn = dp->dp_scan;
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(scn != NULL);
if (!dsl_scan_is_running(scn))
return;
for (int i = 0; i < BP_GET_NDVAS(bp); i++)
dsl_scan_freed_dva(spa, bp, i);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* CSTYLED */
module_param(zfs_scan_vdev_limit, ulong, 0644);
MODULE_PARM_DESC(zfs_scan_vdev_limit,
"Max bytes in flight per leaf vdev for scrubs and resilvers");
module_param(zfs_scrub_min_time_ms, int, 0644);
MODULE_PARM_DESC(zfs_scrub_min_time_ms, "Min millisecs to scrub per txg");
+module_param(zfs_obsolete_min_time_ms, int, 0644);
+MODULE_PARM_DESC(zfs_obsolete_min_time_ms, "Min millisecs to obsolete per txg");
+
module_param(zfs_free_min_time_ms, int, 0644);
MODULE_PARM_DESC(zfs_free_min_time_ms, "Min millisecs to free per txg");
module_param(zfs_resilver_min_time_ms, int, 0644);
MODULE_PARM_DESC(zfs_resilver_min_time_ms, "Min millisecs to resilver per txg");
module_param(zfs_no_scrub_io, int, 0644);
MODULE_PARM_DESC(zfs_no_scrub_io, "Set to disable scrub I/O");
module_param(zfs_no_scrub_prefetch, int, 0644);
MODULE_PARM_DESC(zfs_no_scrub_prefetch, "Set to disable scrub prefetching");
/* CSTYLED */
-module_param(zfs_free_max_blocks, ulong, 0644);
-MODULE_PARM_DESC(zfs_free_max_blocks, "Max number of blocks freed in one txg");
+module_param(zfs_async_block_max_blocks, ulong, 0644);
+MODULE_PARM_DESC(zfs_async_block_max_blocks,
+ "Max number of blocks freed in one txg");
module_param(zfs_free_bpobj_enabled, int, 0644);
MODULE_PARM_DESC(zfs_free_bpobj_enabled, "Enable processing of the free_bpobj");
module_param(zfs_scan_mem_lim_fact, int, 0644);
MODULE_PARM_DESC(zfs_scan_mem_lim_fact, "Fraction of RAM for scan hard limit");
module_param(zfs_scan_issue_strategy, int, 0644);
MODULE_PARM_DESC(zfs_scan_issue_strategy,
"IO issuing strategy during scrubbing. 0 = default, 1 = LBA, 2 = size");
module_param(zfs_scan_legacy, int, 0644);
MODULE_PARM_DESC(zfs_scan_legacy, "Scrub using legacy non-sequential method");
module_param(zfs_scan_checkpoint_intval, int, 0644);
MODULE_PARM_DESC(zfs_scan_checkpoint_intval,
"Scan progress on-disk checkpointing interval");
/* CSTYLED */
module_param(zfs_scan_max_ext_gap, ulong, 0644);
MODULE_PARM_DESC(zfs_scan_max_ext_gap,
"Max gap in bytes between sequential scrub / resilver I/Os");
module_param(zfs_scan_mem_lim_soft_fact, int, 0644);
MODULE_PARM_DESC(zfs_scan_mem_lim_soft_fact,
"Fraction of hard limit used as soft limit");
module_param(zfs_scan_strict_mem_lim, int, 0644);
MODULE_PARM_DESC(zfs_scan_strict_mem_lim,
"Tunable to attempt to reduce lock contention");
module_param(zfs_scan_fill_weight, int, 0644);
MODULE_PARM_DESC(zfs_scan_fill_weight,
"Tunable to adjust bias towards more filled segments during scans");
#endif
diff --git a/module/zfs/metaslab.c b/module/zfs/metaslab.c
index 6320fd388ff2..25090f089be5 100644
--- a/module/zfs/metaslab.c
+++ b/module/zfs/metaslab.c
@@ -1,3505 +1,3906 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2016 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/space_map.h>
#include <sys/metaslab_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/spa_impl.h>
#include <sys/zfeature.h>
+#include <sys/vdev_indirect_mapping.h>
#define WITH_DF_BLOCK_ALLOCATOR
#define GANG_ALLOCATION(flags) \
((flags) & (METASLAB_GANG_CHILD | METASLAB_GANG_HEADER))
/*
* Metaslab granularity, in bytes. This is roughly similar to what would be
* referred to as the "stripe size" in traditional RAID arrays. In normal
* operation, we will try to write this amount of data to a top-level vdev
* before moving on to the next one.
*/
unsigned long metaslab_aliquot = 512 << 10;
-uint64_t metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1; /* force gang blocks */
+/* force gang blocks */
+unsigned long metaslab_gang_bang = SPA_MAXBLOCKSIZE + 1;
/*
* The in-core space map representation is more compact than its on-disk form.
* The zfs_condense_pct determines how much more compact the in-core
* space map representation must be before we compact it on-disk.
* Values should be greater than or equal to 100.
*/
int zfs_condense_pct = 200;
/*
* Condensing a metaslab is not guaranteed to actually reduce the amount of
* space used on disk. In particular, a space map uses data in increments of
* MAX(1 << ashift, space_map_blksz), so a metaslab might use the
* same number of blocks after condensing. Since the goal of condensing is to
* reduce the number of IOPs required to read the space map, we only want to
* condense when we can be sure we will reduce the number of blocks used by the
* space map. Unfortunately, we cannot precisely compute whether or not this is
* the case in metaslab_should_condense since we are holding ms_lock. Instead,
* we apply the following heuristic: do not condense a spacemap unless the
* uncondensed size consumes greater than zfs_metaslab_condense_block_threshold
* blocks.
*/
int zfs_metaslab_condense_block_threshold = 4;
/*
* The zfs_mg_noalloc_threshold defines which metaslab groups should
* be eligible for allocation. The value is defined as a percentage of
* free space. Metaslab groups that have more free space than
* zfs_mg_noalloc_threshold are always eligible for allocations. Once
* a metaslab group's free space is less than or equal to the
* zfs_mg_noalloc_threshold the allocator will avoid allocating to that
* group unless all groups in the pool have reached zfs_mg_noalloc_threshold.
* Once all groups in the pool reach zfs_mg_noalloc_threshold then all
* groups are allowed to accept allocations. Gang blocks are always
* eligible to allocate on any metaslab group. The default value of 0 means
* no metaslab group will be excluded based on this criterion.
*/
int zfs_mg_noalloc_threshold = 0;
/*
* Metaslab groups are considered eligible for allocations if their
* fragmenation metric (measured as a percentage) is less than or equal to
* zfs_mg_fragmentation_threshold. If a metaslab group exceeds this threshold
* then it will be skipped unless all metaslab groups within the metaslab
* class have also crossed this threshold.
*/
int zfs_mg_fragmentation_threshold = 85;
/*
* Allow metaslabs to keep their active state as long as their fragmentation
* percentage is less than or equal to zfs_metaslab_fragmentation_threshold. An
* active metaslab that exceeds this threshold will no longer keep its active
* status allowing better metaslabs to be selected.
*/
int zfs_metaslab_fragmentation_threshold = 70;
/*
* When set will load all metaslabs when pool is first opened.
*/
int metaslab_debug_load = 0;
/*
* When set will prevent metaslabs from being unloaded.
*/
int metaslab_debug_unload = 0;
/*
* Minimum size which forces the dynamic allocator to change
* it's allocation strategy. Once the space map cannot satisfy
* an allocation of this size then it switches to using more
* aggressive strategy (i.e search by size rather than offset).
*/
uint64_t metaslab_df_alloc_threshold = SPA_OLD_MAXBLOCKSIZE;
/*
* The minimum free space, in percent, which must be available
* in a space map to continue allocations in a first-fit fashion.
* Once the space map's free space drops below this level we dynamically
* switch to using best-fit allocations.
*/
int metaslab_df_free_pct = 4;
/*
* Percentage of all cpus that can be used by the metaslab taskq.
*/
int metaslab_load_pct = 50;
/*
* Determines how many txgs a metaslab may remain loaded without having any
* allocations from it. As long as a metaslab continues to be used we will
* keep it loaded.
*/
int metaslab_unload_delay = TXG_SIZE * 2;
/*
* Max number of metaslabs per group to preload.
*/
int metaslab_preload_limit = SPA_DVAS_PER_BP;
/*
* Enable/disable preloading of metaslab.
*/
int metaslab_preload_enabled = B_TRUE;
/*
* Enable/disable fragmentation weighting on metaslabs.
*/
int metaslab_fragmentation_factor_enabled = B_TRUE;
/*
* Enable/disable lba weighting (i.e. outer tracks are given preference).
*/
int metaslab_lba_weighting_enabled = B_TRUE;
/*
* Enable/disable metaslab group biasing.
*/
int metaslab_bias_enabled = B_TRUE;
+/*
+ * Enable/disable remapping of indirect DVAs to their concrete vdevs.
+ */
+boolean_t zfs_remap_blkptr_enable = B_TRUE;
+
/*
* Enable/disable segment-based metaslab selection.
*/
int zfs_metaslab_segment_weight_enabled = B_TRUE;
/*
* When using segment-based metaslab selection, we will continue
* allocating from the active metaslab until we have exhausted
* zfs_metaslab_switch_threshold of its buckets.
*/
int zfs_metaslab_switch_threshold = 2;
/*
* Internal switch to enable/disable the metaslab allocation tracing
* facility.
*/
#ifdef _METASLAB_TRACING
boolean_t metaslab_trace_enabled = B_TRUE;
#endif
/*
* Maximum entries that the metaslab allocation tracing facility will keep
* in a given list when running in non-debug mode. We limit the number
* of entries in non-debug mode to prevent us from using up too much memory.
* The limit should be sufficiently large that we don't expect any allocation
* to every exceed this value. In debug mode, the system will panic if this
* limit is ever reached allowing for further investigation.
*/
#ifdef _METASLAB_TRACING
uint64_t metaslab_trace_max_entries = 5000;
#endif
static uint64_t metaslab_weight(metaslab_t *);
static void metaslab_set_fragmentation(metaslab_t *);
+static void metaslab_free_impl(vdev_t *, uint64_t, uint64_t, uint64_t);
+static void metaslab_check_free_impl(vdev_t *, uint64_t, uint64_t);
#ifdef _METASLAB_TRACING
kmem_cache_t *metaslab_alloc_trace_cache;
#endif
/*
* ==========================================================================
* Metaslab classes
* ==========================================================================
*/
metaslab_class_t *
metaslab_class_create(spa_t *spa, metaslab_ops_t *ops)
{
metaslab_class_t *mc;
mc = kmem_zalloc(sizeof (metaslab_class_t), KM_SLEEP);
mc->mc_spa = spa;
mc->mc_rotor = NULL;
mc->mc_ops = ops;
mutex_init(&mc->mc_lock, NULL, MUTEX_DEFAULT, NULL);
refcount_create_tracked(&mc->mc_alloc_slots);
return (mc);
}
void
metaslab_class_destroy(metaslab_class_t *mc)
{
ASSERT(mc->mc_rotor == NULL);
ASSERT(mc->mc_alloc == 0);
ASSERT(mc->mc_deferred == 0);
ASSERT(mc->mc_space == 0);
ASSERT(mc->mc_dspace == 0);
refcount_destroy(&mc->mc_alloc_slots);
mutex_destroy(&mc->mc_lock);
kmem_free(mc, sizeof (metaslab_class_t));
}
int
metaslab_class_validate(metaslab_class_t *mc)
{
metaslab_group_t *mg;
vdev_t *vd;
/*
* Must hold one of the spa_config locks.
*/
ASSERT(spa_config_held(mc->mc_spa, SCL_ALL, RW_READER) ||
spa_config_held(mc->mc_spa, SCL_ALL, RW_WRITER));
if ((mg = mc->mc_rotor) == NULL)
return (0);
do {
vd = mg->mg_vd;
ASSERT(vd->vdev_mg != NULL);
ASSERT3P(vd->vdev_top, ==, vd);
ASSERT3P(mg->mg_class, ==, mc);
ASSERT3P(vd->vdev_ops, !=, &vdev_hole_ops);
} while ((mg = mg->mg_next) != mc->mc_rotor);
return (0);
}
void
metaslab_class_space_update(metaslab_class_t *mc, int64_t alloc_delta,
int64_t defer_delta, int64_t space_delta, int64_t dspace_delta)
{
atomic_add_64(&mc->mc_alloc, alloc_delta);
atomic_add_64(&mc->mc_deferred, defer_delta);
atomic_add_64(&mc->mc_space, space_delta);
atomic_add_64(&mc->mc_dspace, dspace_delta);
}
uint64_t
metaslab_class_get_alloc(metaslab_class_t *mc)
{
return (mc->mc_alloc);
}
uint64_t
metaslab_class_get_deferred(metaslab_class_t *mc)
{
return (mc->mc_deferred);
}
uint64_t
metaslab_class_get_space(metaslab_class_t *mc)
{
return (mc->mc_space);
}
uint64_t
metaslab_class_get_dspace(metaslab_class_t *mc)
{
return (spa_deflate(mc->mc_spa) ? mc->mc_dspace : mc->mc_space);
}
void
metaslab_class_histogram_verify(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t *mc_hist;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mc_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
* Skip any holes, uninitialized top-levels, or
* vdevs that are not in this metalab class.
*/
- if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
+ if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
mc_hist[i] += mg->mg_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
VERIFY3U(mc_hist[i], ==, mc->mc_histogram[i]);
kmem_free(mc_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
/*
* Calculate the metaslab class's fragmentation metric. The metric
* is weighted based on the space contribution of each metaslab group.
* The return value will be a number between 0 and 100 (inclusive), or
* ZFS_FRAG_INVALID if the metric has not been set. See comment above the
* zfs_frag_table for more information about the metric.
*/
uint64_t
metaslab_class_fragmentation(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t fragmentation = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
/*
- * Skip any holes, uninitialized top-levels, or
- * vdevs that are not in this metalab class.
+ * Skip any holes, uninitialized top-levels,
+ * or vdevs that are not in this metalab class.
*/
- if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
+ if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* If a metaslab group does not contain a fragmentation
* metric then just bail out.
*/
if (mg->mg_fragmentation == ZFS_FRAG_INVALID) {
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (ZFS_FRAG_INVALID);
}
/*
* Determine how much this metaslab_group is contributing
* to the overall pool fragmentation metric.
*/
fragmentation += mg->mg_fragmentation *
metaslab_group_get_space(mg);
}
fragmentation /= metaslab_class_get_space(mc);
ASSERT3U(fragmentation, <=, 100);
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (fragmentation);
}
/*
* Calculate the amount of expandable space that is available in
* this metaslab class. If a device is expanded then its expandable
* space will be the amount of allocatable space that is currently not
* part of this metaslab class.
*/
uint64_t
metaslab_class_expandable_space(metaslab_class_t *mc)
{
vdev_t *rvd = mc->mc_spa->spa_root_vdev;
uint64_t space = 0;
spa_config_enter(mc->mc_spa, SCL_VDEV, FTAG, RW_READER);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
- if (tvd->vdev_ishole || tvd->vdev_ms_shift == 0 ||
+ if (!vdev_is_concrete(tvd) || tvd->vdev_ms_shift == 0 ||
mg->mg_class != mc) {
continue;
}
/*
* Calculate if we have enough space to add additional
* metaslabs. We report the expandable space in terms
* of the metaslab size since that's the unit of expansion.
*/
space += P2ALIGN(tvd->vdev_max_asize - tvd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
spa_config_exit(mc->mc_spa, SCL_VDEV, FTAG);
return (space);
}
static int
metaslab_compare(const void *x1, const void *x2)
{
const metaslab_t *m1 = (const metaslab_t *)x1;
const metaslab_t *m2 = (const metaslab_t *)x2;
int cmp = AVL_CMP(m2->ms_weight, m1->ms_weight);
if (likely(cmp))
return (cmp);
IMPLY(AVL_CMP(m1->ms_start, m2->ms_start) == 0, m1 == m2);
return (AVL_CMP(m1->ms_start, m2->ms_start));
}
/*
* Verify that the space accounting on disk matches the in-core range_trees.
*/
void
metaslab_verify_space(metaslab_t *msp, uint64_t txg)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t allocated = 0;
uint64_t sm_free_space, msp_free_space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((zfs_flags & ZFS_DEBUG_METASLAB_VERIFY) == 0)
return;
/*
* We can only verify the metaslab space when we're called
* from syncing context with a loaded metaslab that has an allocated
* space map. Calling this in non-syncing context does not
* provide a consistent view of the metaslab since we're performing
* allocations in the future.
*/
if (txg != spa_syncing_txg(spa) || msp->ms_sm == NULL ||
!msp->ms_loaded)
return;
sm_free_space = msp->ms_size - space_map_allocated(msp->ms_sm) -
space_map_alloc_delta(msp->ms_sm);
/*
* Account for future allocations since we would have already
* deducted that space from the ms_freetree.
*/
for (int t = 0; t < TXG_CONCURRENT_STATES; t++) {
allocated +=
range_tree_space(msp->ms_alloctree[(txg + t) & TXG_MASK]);
}
msp_free_space = range_tree_space(msp->ms_tree) + allocated +
msp->ms_deferspace + range_tree_space(msp->ms_freedtree);
VERIFY3U(sm_free_space, ==, msp_free_space);
}
/*
* ==========================================================================
* Metaslab groups
* ==========================================================================
*/
/*
* Update the allocatable flag and the metaslab group's capacity.
* The allocatable flag is set to true if the capacity is below
* the zfs_mg_noalloc_threshold or has a fragmentation value that is
* greater than zfs_mg_fragmentation_threshold. If a metaslab group
* transitions from allocatable to non-allocatable or vice versa then the
* metaslab group's class is updated to reflect the transition.
*/
static void
metaslab_group_alloc_update(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
metaslab_class_t *mc = mg->mg_class;
vdev_stat_t *vs = &vd->vdev_stat;
boolean_t was_allocatable;
boolean_t was_initialized;
ASSERT(vd == vd->vdev_top);
+ ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_READER), ==,
+ SCL_ALLOC);
mutex_enter(&mg->mg_lock);
was_allocatable = mg->mg_allocatable;
was_initialized = mg->mg_initialized;
mg->mg_free_capacity = ((vs->vs_space - vs->vs_alloc) * 100) /
(vs->vs_space + 1);
mutex_enter(&mc->mc_lock);
/*
* If the metaslab group was just added then it won't
* have any space until we finish syncing out this txg.
* At that point we will consider it initialized and available
* for allocations. We also don't consider non-activated
* metaslab groups (e.g. vdevs that are in the middle of being removed)
* to be initialized, because they can't be used for allocation.
*/
mg->mg_initialized = metaslab_group_initialized(mg);
if (!was_initialized && mg->mg_initialized) {
mc->mc_groups++;
} else if (was_initialized && !mg->mg_initialized) {
ASSERT3U(mc->mc_groups, >, 0);
mc->mc_groups--;
}
if (mg->mg_initialized)
mg->mg_no_free_space = B_FALSE;
/*
* A metaslab group is considered allocatable if it has plenty
* of free space or is not heavily fragmented. We only take
* fragmentation into account if the metaslab group has a valid
* fragmentation metric (i.e. a value between 0 and 100).
*/
mg->mg_allocatable = (mg->mg_activation_count > 0 &&
mg->mg_free_capacity > zfs_mg_noalloc_threshold &&
(mg->mg_fragmentation == ZFS_FRAG_INVALID ||
mg->mg_fragmentation <= zfs_mg_fragmentation_threshold));
/*
* The mc_alloc_groups maintains a count of the number of
* groups in this metaslab class that are still above the
* zfs_mg_noalloc_threshold. This is used by the allocating
* threads to determine if they should avoid allocations to
* a given group. The allocator will avoid allocations to a group
* if that group has reached or is below the zfs_mg_noalloc_threshold
* and there are still other groups that are above the threshold.
* When a group transitions from allocatable to non-allocatable or
* vice versa we update the metaslab class to reflect that change.
* When the mc_alloc_groups value drops to 0 that means that all
* groups have reached the zfs_mg_noalloc_threshold making all groups
* eligible for allocations. This effectively means that all devices
* are balanced again.
*/
if (was_allocatable && !mg->mg_allocatable)
mc->mc_alloc_groups--;
else if (!was_allocatable && mg->mg_allocatable)
mc->mc_alloc_groups++;
mutex_exit(&mc->mc_lock);
mutex_exit(&mg->mg_lock);
}
metaslab_group_t *
metaslab_group_create(metaslab_class_t *mc, vdev_t *vd)
{
metaslab_group_t *mg;
mg = kmem_zalloc(sizeof (metaslab_group_t), KM_SLEEP);
mutex_init(&mg->mg_lock, NULL, MUTEX_DEFAULT, NULL);
avl_create(&mg->mg_metaslab_tree, metaslab_compare,
sizeof (metaslab_t), offsetof(struct metaslab, ms_group_node));
mg->mg_vd = vd;
mg->mg_class = mc;
mg->mg_activation_count = 0;
mg->mg_initialized = B_FALSE;
mg->mg_no_free_space = B_TRUE;
refcount_create_tracked(&mg->mg_alloc_queue_depth);
mg->mg_taskq = taskq_create("metaslab_group_taskq", metaslab_load_pct,
maxclsyspri, 10, INT_MAX, TASKQ_THREADS_CPU_PCT | TASKQ_DYNAMIC);
return (mg);
}
void
metaslab_group_destroy(metaslab_group_t *mg)
{
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
/*
* We may have gone below zero with the activation count
* either because we never activated in the first place or
* because we're done, and possibly removing the vdev.
*/
ASSERT(mg->mg_activation_count <= 0);
taskq_destroy(mg->mg_taskq);
avl_destroy(&mg->mg_metaslab_tree);
mutex_destroy(&mg->mg_lock);
refcount_destroy(&mg->mg_alloc_queue_depth);
kmem_free(mg, sizeof (metaslab_group_t));
}
void
metaslab_group_activate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
metaslab_group_t *mgprev, *mgnext;
- ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
+ ASSERT3U(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER), !=, 0);
ASSERT(mc->mc_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count <= 0);
if (++mg->mg_activation_count <= 0)
return;
mg->mg_aliquot = metaslab_aliquot * MAX(1, mg->mg_vd->vdev_children);
metaslab_group_alloc_update(mg);
if ((mgprev = mc->mc_rotor) == NULL) {
mg->mg_prev = mg;
mg->mg_next = mg;
} else {
mgnext = mgprev->mg_next;
mg->mg_prev = mgprev;
mg->mg_next = mgnext;
mgprev->mg_next = mg;
mgnext->mg_prev = mg;
}
mc->mc_rotor = mg;
}
+/*
+ * Passivate a metaslab group and remove it from the allocation rotor.
+ * Callers must hold both the SCL_ALLOC and SCL_ZIO lock prior to passivating
+ * a metaslab group. This function will momentarily drop spa_config_locks
+ * that are lower than the SCL_ALLOC lock (see comment below).
+ */
void
metaslab_group_passivate(metaslab_group_t *mg)
{
metaslab_class_t *mc = mg->mg_class;
+ spa_t *spa = mc->mc_spa;
metaslab_group_t *mgprev, *mgnext;
+ int locks = spa_config_held(spa, SCL_ALL, RW_WRITER);
- ASSERT(spa_config_held(mc->mc_spa, SCL_ALLOC, RW_WRITER));
+ ASSERT3U(spa_config_held(spa, SCL_ALLOC | SCL_ZIO, RW_WRITER), ==,
+ (SCL_ALLOC | SCL_ZIO));
if (--mg->mg_activation_count != 0) {
ASSERT(mc->mc_rotor != mg);
ASSERT(mg->mg_prev == NULL);
ASSERT(mg->mg_next == NULL);
ASSERT(mg->mg_activation_count < 0);
return;
}
+ /*
+ * The spa_config_lock is an array of rwlocks, ordered as
+ * follows (from highest to lowest):
+ * SCL_CONFIG > SCL_STATE > SCL_L2ARC > SCL_ALLOC >
+ * SCL_ZIO > SCL_FREE > SCL_VDEV
+ * (For more information about the spa_config_lock see spa_misc.c)
+ * The higher the lock, the broader its coverage. When we passivate
+ * a metaslab group, we must hold both the SCL_ALLOC and the SCL_ZIO
+ * config locks. However, the metaslab group's taskq might be trying
+ * to preload metaslabs so we must drop the SCL_ZIO lock and any
+ * lower locks to allow the I/O to complete. At a minimum,
+ * we continue to hold the SCL_ALLOC lock, which prevents any future
+ * allocations from taking place and any changes to the vdev tree.
+ */
+ spa_config_exit(spa, locks & ~(SCL_ZIO - 1), spa);
taskq_wait_outstanding(mg->mg_taskq, 0);
+ spa_config_enter(spa, locks & ~(SCL_ZIO - 1), spa, RW_WRITER);
metaslab_group_alloc_update(mg);
mgprev = mg->mg_prev;
mgnext = mg->mg_next;
if (mg == mgnext) {
mc->mc_rotor = NULL;
} else {
mc->mc_rotor = mgnext;
mgprev->mg_next = mgnext;
mgnext->mg_prev = mgprev;
}
mg->mg_prev = NULL;
mg->mg_next = NULL;
}
boolean_t
metaslab_group_initialized(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
vdev_stat_t *vs = &vd->vdev_stat;
return (vs->vs_space != 0 && mg->mg_activation_count > 0);
}
uint64_t
metaslab_group_get_space(metaslab_group_t *mg)
{
return ((1ULL << mg->mg_vd->vdev_ms_shift) * mg->mg_vd->vdev_ms_count);
}
void
metaslab_group_histogram_verify(metaslab_group_t *mg)
{
uint64_t *mg_hist;
vdev_t *vd = mg->mg_vd;
uint64_t ashift = vd->vdev_ashift;
int i;
if ((zfs_flags & ZFS_DEBUG_HISTOGRAM_VERIFY) == 0)
return;
mg_hist = kmem_zalloc(sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE,
KM_SLEEP);
ASSERT3U(RANGE_TREE_HISTOGRAM_SIZE, >=,
SPACE_MAP_HISTOGRAM_SIZE + ashift);
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_sm == NULL)
continue;
for (i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++)
mg_hist[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i ++)
VERIFY3U(mg_hist[i], ==, mg->mg_histogram[i]);
kmem_free(mg_hist, sizeof (uint64_t) * RANGE_TREE_HISTOGRAM_SIZE);
}
static void
metaslab_group_histogram_add(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
mg->mg_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] +=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mg->mg_lock);
}
void
metaslab_group_histogram_remove(metaslab_group_t *mg, metaslab_t *msp)
{
metaslab_class_t *mc = mg->mg_class;
uint64_t ashift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
if (msp->ms_sm == NULL)
return;
mutex_enter(&mg->mg_lock);
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
ASSERT3U(mg->mg_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
ASSERT3U(mc->mc_histogram[i + ashift], >=,
msp->ms_sm->sm_phys->smp_histogram[i]);
mg->mg_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
mc->mc_histogram[i + ashift] -=
msp->ms_sm->sm_phys->smp_histogram[i];
}
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_add(metaslab_group_t *mg, metaslab_t *msp)
{
ASSERT(msp->ms_group == NULL);
mutex_enter(&mg->mg_lock);
msp->ms_group = mg;
msp->ms_weight = 0;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_add(mg, msp);
mutex_exit(&msp->ms_lock);
}
static void
metaslab_group_remove(metaslab_group_t *mg, metaslab_t *msp)
{
mutex_enter(&msp->ms_lock);
metaslab_group_histogram_remove(mg, msp);
mutex_exit(&msp->ms_lock);
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_group = NULL;
mutex_exit(&mg->mg_lock);
}
static void
metaslab_group_sort(metaslab_group_t *mg, metaslab_t *msp, uint64_t weight)
{
/*
* Although in principle the weight can be any value, in
* practice we do not use values in the range [1, 511].
*/
ASSERT(weight >= SPA_MINBLOCKSIZE || weight == 0);
ASSERT(MUTEX_HELD(&msp->ms_lock));
mutex_enter(&mg->mg_lock);
ASSERT(msp->ms_group == mg);
avl_remove(&mg->mg_metaslab_tree, msp);
msp->ms_weight = weight;
avl_add(&mg->mg_metaslab_tree, msp);
mutex_exit(&mg->mg_lock);
}
/*
* Calculate the fragmentation for a given metaslab group. We can use
* a simple average here since all metaslabs within the group must have
* the same size. The return value will be a value between 0 and 100
* (inclusive), or ZFS_FRAG_INVALID if less than half of the metaslab in this
* group have a fragmentation metric.
*/
uint64_t
metaslab_group_fragmentation(metaslab_group_t *mg)
{
vdev_t *vd = mg->mg_vd;
uint64_t fragmentation = 0;
uint64_t valid_ms = 0;
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp->ms_fragmentation == ZFS_FRAG_INVALID)
continue;
valid_ms++;
fragmentation += msp->ms_fragmentation;
}
if (valid_ms <= vd->vdev_ms_count / 2)
return (ZFS_FRAG_INVALID);
fragmentation /= valid_ms;
ASSERT3U(fragmentation, <=, 100);
return (fragmentation);
}
/*
* Determine if a given metaslab group should skip allocations. A metaslab
* group should avoid allocations if its free capacity is less than the
* zfs_mg_noalloc_threshold or its fragmentation metric is greater than
* zfs_mg_fragmentation_threshold and there is at least one metaslab group
* that can still handle allocations. If the allocation throttle is enabled
* then we skip allocations to devices that have reached their maximum
* allocation queue depth unless the selected metaslab group is the only
* eligible group remaining.
*/
static boolean_t
metaslab_group_allocatable(metaslab_group_t *mg, metaslab_group_t *rotor,
uint64_t psize)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_class_t *mc = mg->mg_class;
/*
* We can only consider skipping this metaslab group if it's
* in the normal metaslab class and there are other metaslab
* groups to select from. Otherwise, we always consider it eligible
* for allocations.
*/
if (mc != spa_normal_class(spa) || mc->mc_groups <= 1)
return (B_TRUE);
/*
* If the metaslab group's mg_allocatable flag is set (see comments
* in metaslab_group_alloc_update() for more information) and
* the allocation throttle is disabled then allow allocations to this
* device. However, if the allocation throttle is enabled then
* check if we have reached our allocation limit (mg_alloc_queue_depth)
* to determine if we should allow allocations to this metaslab group.
* If all metaslab groups are no longer considered allocatable
* (mc_alloc_groups == 0) or we're trying to allocate the smallest
* gang block size then we allow allocations on this metaslab group
* regardless of the mg_allocatable or throttle settings.
*/
if (mg->mg_allocatable) {
metaslab_group_t *mgp;
int64_t qdepth;
uint64_t qmax = mg->mg_max_alloc_queue_depth;
if (!mc->mc_alloc_throttle_enabled)
return (B_TRUE);
/*
* If this metaslab group does not have any free space, then
* there is no point in looking further.
*/
if (mg->mg_no_free_space)
return (B_FALSE);
qdepth = refcount_count(&mg->mg_alloc_queue_depth);
/*
* If this metaslab group is below its qmax or it's
* the only allocatable metasable group, then attempt
* to allocate from it.
*/
if (qdepth < qmax || mc->mc_alloc_groups == 1)
return (B_TRUE);
ASSERT3U(mc->mc_alloc_groups, >, 1);
/*
* Since this metaslab group is at or over its qmax, we
* need to determine if there are metaslab groups after this
* one that might be able to handle this allocation. This is
* racy since we can't hold the locks for all metaslab
* groups at the same time when we make this check.
*/
for (mgp = mg->mg_next; mgp != rotor; mgp = mgp->mg_next) {
qmax = mgp->mg_max_alloc_queue_depth;
qdepth = refcount_count(&mgp->mg_alloc_queue_depth);
/*
* If there is another metaslab group that
* might be able to handle the allocation, then
* we return false so that we skip this group.
*/
if (qdepth < qmax && !mgp->mg_no_free_space)
return (B_FALSE);
}
/*
* We didn't find another group to handle the allocation
* so we can't skip this metaslab group even though
* we are at or over our qmax.
*/
return (B_TRUE);
} else if (mc->mc_alloc_groups == 0 || psize == SPA_MINBLOCKSIZE) {
return (B_TRUE);
}
return (B_FALSE);
}
/*
* ==========================================================================
* Range tree callbacks
* ==========================================================================
*/
/*
* Comparison function for the private size-ordered tree. Tree is sorted
* by size, larger sizes at the end of the tree.
*/
static int
metaslab_rangesize_compare(const void *x1, const void *x2)
{
const range_seg_t *r1 = x1;
const range_seg_t *r2 = x2;
uint64_t rs_size1 = r1->rs_end - r1->rs_start;
uint64_t rs_size2 = r2->rs_end - r2->rs_start;
int cmp = AVL_CMP(rs_size1, rs_size2);
if (likely(cmp))
return (cmp);
return (AVL_CMP(r1->rs_start, r2->rs_start));
}
/*
* ==========================================================================
* Common allocator routines
* ==========================================================================
*/
/*
* Return the maximum contiguous segment within the metaslab.
*/
uint64_t
metaslab_block_maxsize(metaslab_t *msp)
{
avl_tree_t *t = &msp->ms_size_tree;
range_seg_t *rs;
if (t == NULL || (rs = avl_last(t)) == NULL)
return (0ULL);
return (rs->rs_end - rs->rs_start);
}
static range_seg_t *
metaslab_block_find(avl_tree_t *t, uint64_t start, uint64_t size)
{
range_seg_t *rs, rsearch;
avl_index_t where;
rsearch.rs_start = start;
rsearch.rs_end = start + size;
rs = avl_find(t, &rsearch, &where);
if (rs == NULL) {
rs = avl_nearest(t, where, AVL_AFTER);
}
return (rs);
}
#if defined(WITH_FF_BLOCK_ALLOCATOR) || \
defined(WITH_DF_BLOCK_ALLOCATOR) || \
defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* This is a helper function that can be used by the allocator to find
* a suitable block to allocate. This will search the specified AVL
* tree looking for a block that matches the specified criteria.
*/
static uint64_t
metaslab_block_picker(avl_tree_t *t, uint64_t *cursor, uint64_t size,
uint64_t align)
{
range_seg_t *rs = metaslab_block_find(t, *cursor, size);
while (rs != NULL) {
uint64_t offset = P2ROUNDUP(rs->rs_start, align);
if (offset + size <= rs->rs_end) {
*cursor = offset + size;
return (offset);
}
rs = AVL_NEXT(t, rs);
}
/*
* If we know we've searched the whole map (*cursor == 0), give up.
* Otherwise, reset the cursor to the beginning and try again.
*/
if (*cursor == 0)
return (-1ULL);
*cursor = 0;
return (metaslab_block_picker(t, cursor, size, align));
}
#endif /* WITH_FF/DF/CF_BLOCK_ALLOCATOR */
#if defined(WITH_FF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* The first-fit block allocator
* ==========================================================================
*/
static uint64_t
metaslab_ff_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
avl_tree_t *t = &msp->ms_tree->rt_root;
return (metaslab_block_picker(t, cursor, size, align));
}
static metaslab_ops_t metaslab_ff_ops = {
metaslab_ff_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_ff_ops;
#endif /* WITH_FF_BLOCK_ALLOCATOR */
#if defined(WITH_DF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Dynamic block allocator -
* Uses the first fit allocation scheme until space get low and then
* adjusts to a best fit allocation method. Uses metaslab_df_alloc_threshold
* and metaslab_df_free_pct to determine when to switch the allocation scheme.
* ==========================================================================
*/
static uint64_t
metaslab_df_alloc(metaslab_t *msp, uint64_t size)
{
/*
* Find the largest power of 2 block size that evenly divides the
* requested size. This is used to try to allocate blocks with similar
* alignment from the same area of the metaslab (i.e. same cursor
* bucket) but it does not guarantee that other allocations sizes
* may exist in the same region.
*/
uint64_t align = size & -size;
uint64_t *cursor = &msp->ms_lbas[highbit64(align) - 1];
range_tree_t *rt = msp->ms_tree;
avl_tree_t *t = &rt->rt_root;
uint64_t max_size = metaslab_block_maxsize(msp);
int free_pct = range_tree_space(rt) * 100 / msp->ms_size;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
if (max_size < size)
return (-1ULL);
/*
* If we're running low on space switch to using the size
* sorted AVL tree (best-fit).
*/
if (max_size < metaslab_df_alloc_threshold ||
free_pct < metaslab_df_free_pct) {
t = &msp->ms_size_tree;
*cursor = 0;
}
return (metaslab_block_picker(t, cursor, size, 1ULL));
}
static metaslab_ops_t metaslab_df_ops = {
metaslab_df_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_df_ops;
#endif /* WITH_DF_BLOCK_ALLOCATOR */
#if defined(WITH_CF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* Cursor fit block allocator -
* Select the largest region in the metaslab, set the cursor to the beginning
* of the range and the cursor_end to the end of the range. As allocations
* are made advance the cursor. Continue allocating from the cursor until
* the range is exhausted and then find a new range.
* ==========================================================================
*/
static uint64_t
metaslab_cf_alloc(metaslab_t *msp, uint64_t size)
{
range_tree_t *rt = msp->ms_tree;
avl_tree_t *t = &msp->ms_size_tree;
uint64_t *cursor = &msp->ms_lbas[0];
uint64_t *cursor_end = &msp->ms_lbas[1];
uint64_t offset = 0;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&rt->rt_root));
ASSERT3U(*cursor_end, >=, *cursor);
if ((*cursor + size) > *cursor_end) {
range_seg_t *rs;
rs = avl_last(&msp->ms_size_tree);
if (rs == NULL || (rs->rs_end - rs->rs_start) < size)
return (-1ULL);
*cursor = rs->rs_start;
*cursor_end = rs->rs_end;
}
offset = *cursor;
*cursor += size;
return (offset);
}
static metaslab_ops_t metaslab_cf_ops = {
metaslab_cf_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_cf_ops;
#endif /* WITH_CF_BLOCK_ALLOCATOR */
#if defined(WITH_NDF_BLOCK_ALLOCATOR)
/*
* ==========================================================================
* New dynamic fit allocator -
* Select a region that is large enough to allocate 2^metaslab_ndf_clump_shift
* contiguous blocks. If no region is found then just use the largest segment
* that remains.
* ==========================================================================
*/
/*
* Determines desired number of contiguous blocks (2^metaslab_ndf_clump_shift)
* to request from the allocator.
*/
uint64_t metaslab_ndf_clump_shift = 4;
static uint64_t
metaslab_ndf_alloc(metaslab_t *msp, uint64_t size)
{
avl_tree_t *t = &msp->ms_tree->rt_root;
avl_index_t where;
range_seg_t *rs, rsearch;
uint64_t hbit = highbit64(size);
uint64_t *cursor = &msp->ms_lbas[hbit - 1];
uint64_t max_size = metaslab_block_maxsize(msp);
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(avl_numnodes(t), ==, avl_numnodes(&msp->ms_size_tree));
if (max_size < size)
return (-1ULL);
rsearch.rs_start = *cursor;
rsearch.rs_end = *cursor + size;
rs = avl_find(t, &rsearch, &where);
if (rs == NULL || (rs->rs_end - rs->rs_start) < size) {
t = &msp->ms_size_tree;
rsearch.rs_start = 0;
rsearch.rs_end = MIN(max_size,
1ULL << (hbit + metaslab_ndf_clump_shift));
rs = avl_find(t, &rsearch, &where);
if (rs == NULL)
rs = avl_nearest(t, where, AVL_AFTER);
ASSERT(rs != NULL);
}
if ((rs->rs_end - rs->rs_start) >= size) {
*cursor = rs->rs_start + size;
return (rs->rs_start);
}
return (-1ULL);
}
static metaslab_ops_t metaslab_ndf_ops = {
metaslab_ndf_alloc
};
metaslab_ops_t *zfs_metaslab_ops = &metaslab_ndf_ops;
#endif /* WITH_NDF_BLOCK_ALLOCATOR */
/*
* ==========================================================================
* Metaslabs
* ==========================================================================
*/
/*
* Wait for any in-progress metaslab loads to complete.
*/
void
metaslab_load_wait(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
while (msp->ms_loading) {
ASSERT(!msp->ms_loaded);
cv_wait(&msp->ms_load_cv, &msp->ms_lock);
}
}
int
metaslab_load(metaslab_t *msp)
{
int error = 0;
boolean_t success = B_FALSE;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!msp->ms_loaded);
ASSERT(!msp->ms_loading);
msp->ms_loading = B_TRUE;
+ /*
+ * Nobody else can manipulate a loading metaslab, so it's now safe
+ * to drop the lock. This way we don't have to hold the lock while
+ * reading the spacemap from disk.
+ */
+ mutex_exit(&msp->ms_lock);
/*
* If the space map has not been allocated yet, then treat
* all the space in the metaslab as free and add it to the
* ms_tree.
*/
if (msp->ms_sm != NULL)
error = space_map_load(msp->ms_sm, msp->ms_tree, SM_FREE);
else
range_tree_add(msp->ms_tree, msp->ms_start, msp->ms_size);
success = (error == 0);
+
+ mutex_enter(&msp->ms_lock);
msp->ms_loading = B_FALSE;
if (success) {
ASSERT3P(msp->ms_group, !=, NULL);
msp->ms_loaded = B_TRUE;
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defertree[t],
range_tree_remove, msp->ms_tree);
}
msp->ms_max_size = metaslab_block_maxsize(msp);
}
cv_broadcast(&msp->ms_load_cv);
return (error);
}
void
metaslab_unload(metaslab_t *msp)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
range_tree_vacate(msp->ms_tree, NULL, NULL);
msp->ms_loaded = B_FALSE;
msp->ms_weight &= ~METASLAB_ACTIVE_MASK;
msp->ms_max_size = 0;
}
int
metaslab_init(metaslab_group_t *mg, uint64_t id, uint64_t object, uint64_t txg,
metaslab_t **msp)
{
vdev_t *vd = mg->mg_vd;
objset_t *mos = vd->vdev_spa->spa_meta_objset;
metaslab_t *ms;
int error;
ms = kmem_zalloc(sizeof (metaslab_t), KM_SLEEP);
mutex_init(&ms->ms_lock, NULL, MUTEX_DEFAULT, NULL);
+ mutex_init(&ms->ms_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&ms->ms_load_cv, NULL, CV_DEFAULT, NULL);
ms->ms_id = id;
ms->ms_start = id << vd->vdev_ms_shift;
ms->ms_size = 1ULL << vd->vdev_ms_shift;
/*
* We only open space map objects that already exist. All others
* will be opened when we finally allocate an object for it.
*/
if (object != 0) {
error = space_map_open(&ms->ms_sm, mos, object, ms->ms_start,
- ms->ms_size, vd->vdev_ashift, &ms->ms_lock);
+ ms->ms_size, vd->vdev_ashift);
if (error != 0) {
kmem_free(ms, sizeof (metaslab_t));
return (error);
}
ASSERT(ms->ms_sm != NULL);
}
/*
* We create the main range tree here, but we don't create the
* other range trees until metaslab_sync_done(). This serves
* two purposes: it allows metaslab_sync_done() to detect the
* addition of new space; and for debugging, it ensures that we'd
* data fault on any attempt to use this metaslab before it's ready.
*/
ms->ms_tree = range_tree_create_impl(&rt_avl_ops, &ms->ms_size_tree,
- metaslab_rangesize_compare, &ms->ms_lock, 0);
+ metaslab_rangesize_compare, 0);
metaslab_group_add(mg, ms);
metaslab_set_fragmentation(ms);
/*
* If we're opening an existing pool (txg == 0) or creating
* a new one (txg == TXG_INITIAL), all space is available now.
* If we're adding space to an existing pool, the new space
* does not become available until after this txg has synced.
* The metaslab's weight will also be initialized when we sync
* out this txg. This ensures that we don't attempt to allocate
* from it before we have initialized it completely.
*/
if (txg <= TXG_INITIAL)
metaslab_sync_done(ms, 0);
/*
* If metaslab_debug_load is set and we're initializing a metaslab
* that has an allocated space map object then load the its space
* map so that can verify frees.
*/
if (metaslab_debug_load && ms->ms_sm != NULL) {
mutex_enter(&ms->ms_lock);
VERIFY0(metaslab_load(ms));
mutex_exit(&ms->ms_lock);
}
if (txg != 0) {
vdev_dirty(vd, 0, NULL, txg);
vdev_dirty(vd, VDD_METASLAB, ms, txg);
}
*msp = ms;
return (0);
}
void
metaslab_fini(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
metaslab_group_remove(mg, msp);
mutex_enter(&msp->ms_lock);
VERIFY(msp->ms_group == NULL);
vdev_space_update(mg->mg_vd, -space_map_allocated(msp->ms_sm),
0, -msp->ms_size);
space_map_close(msp->ms_sm);
metaslab_unload(msp);
range_tree_destroy(msp->ms_tree);
range_tree_destroy(msp->ms_freeingtree);
range_tree_destroy(msp->ms_freedtree);
for (int t = 0; t < TXG_SIZE; t++) {
range_tree_destroy(msp->ms_alloctree[t]);
}
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_destroy(msp->ms_defertree[t]);
}
ASSERT0(msp->ms_deferspace);
mutex_exit(&msp->ms_lock);
cv_destroy(&msp->ms_load_cv);
mutex_destroy(&msp->ms_lock);
+ mutex_destroy(&msp->ms_sync_lock);
kmem_free(msp, sizeof (metaslab_t));
}
#define FRAGMENTATION_TABLE_SIZE 17
/*
* This table defines a segment size based fragmentation metric that will
* allow each metaslab to derive its own fragmentation value. This is done
* by calculating the space in each bucket of the spacemap histogram and
* multiplying that by the fragmetation metric in this table. Doing
* this for all buckets and dividing it by the total amount of free
* space in this metaslab (i.e. the total free space in all buckets) gives
* us the fragmentation metric. This means that a high fragmentation metric
* equates to most of the free space being comprised of small segments.
* Conversely, if the metric is low, then most of the free space is in
* large segments. A 10% change in fragmentation equates to approximately
* double the number of segments.
*
* This table defines 0% fragmented space using 16MB segments. Testing has
* shown that segments that are greater than or equal to 16MB do not suffer
* from drastic performance problems. Using this value, we derive the rest
* of the table. Since the fragmentation value is never stored on disk, it
* is possible to change these calculations in the future.
*/
int zfs_frag_table[FRAGMENTATION_TABLE_SIZE] = {
100, /* 512B */
100, /* 1K */
98, /* 2K */
95, /* 4K */
90, /* 8K */
80, /* 16K */
70, /* 32K */
60, /* 64K */
50, /* 128K */
40, /* 256K */
30, /* 512K */
20, /* 1M */
15, /* 2M */
10, /* 4M */
5, /* 8M */
0 /* 16M */
};
/*
* Calclate the metaslab's fragmentation metric. A return value
* of ZFS_FRAG_INVALID means that the metaslab has not been upgraded and does
* not support this metric. Otherwise, the return value should be in the
* range [0, 100].
*/
static void
metaslab_set_fragmentation(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
uint64_t fragmentation = 0;
uint64_t total = 0;
boolean_t feature_enabled = spa_feature_is_enabled(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM);
if (!feature_enabled) {
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
/*
* A null space map means that the entire metaslab is free
* and thus is not fragmented.
*/
if (msp->ms_sm == NULL) {
msp->ms_fragmentation = 0;
return;
}
/*
* If this metaslab's space map has not been upgraded, flag it
* so that we upgrade next time we encounter it.
*/
if (msp->ms_sm->sm_dbuf->db_size != sizeof (space_map_phys_t)) {
uint64_t txg = spa_syncing_txg(spa);
vdev_t *vd = msp->ms_group->mg_vd;
/*
* If we've reached the final dirty txg, then we must
* be shutting down the pool. We don't want to dirty
* any data past this point so skip setting the condense
* flag. We can retry this action the next time the pool
* is imported.
*/
if (spa_writeable(spa) && txg < spa_final_dirty_txg(spa)) {
msp->ms_condense_wanted = B_TRUE;
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
spa_dbgmsg(spa, "txg %llu, requesting force condense: "
"ms_id %llu, vdev_id %llu", txg, msp->ms_id,
vd->vdev_id);
}
msp->ms_fragmentation = ZFS_FRAG_INVALID;
return;
}
for (int i = 0; i < SPACE_MAP_HISTOGRAM_SIZE; i++) {
uint64_t space = 0;
uint8_t shift = msp->ms_sm->sm_shift;
int idx = MIN(shift - SPA_MINBLOCKSHIFT + i,
FRAGMENTATION_TABLE_SIZE - 1);
if (msp->ms_sm->sm_phys->smp_histogram[i] == 0)
continue;
space = msp->ms_sm->sm_phys->smp_histogram[i] << (i + shift);
total += space;
ASSERT3U(idx, <, FRAGMENTATION_TABLE_SIZE);
fragmentation += space * zfs_frag_table[idx];
}
if (total > 0)
fragmentation /= total;
ASSERT3U(fragmentation, <=, 100);
msp->ms_fragmentation = fragmentation;
}
/*
* Compute a weight -- a selection preference value -- for the given metaslab.
* This is based on the amount of free space, the level of fragmentation,
* the LBA range, and whether the metaslab is loaded.
*/
static uint64_t
metaslab_space_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
uint64_t weight, space;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(!vd->vdev_removing);
/*
* The baseline weight is the metaslab's free space.
*/
space = msp->ms_size - space_map_allocated(msp->ms_sm);
if (metaslab_fragmentation_factor_enabled &&
msp->ms_fragmentation != ZFS_FRAG_INVALID) {
/*
* Use the fragmentation information to inversely scale
* down the baseline weight. We need to ensure that we
* don't exclude this metaslab completely when it's 100%
* fragmented. To avoid this we reduce the fragmented value
* by 1.
*/
space = (space * (100 - (msp->ms_fragmentation - 1))) / 100;
/*
* If space < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. The fragmentation metric may have
* decreased the space to something smaller than
* SPA_MINBLOCKSIZE, so reset the space to SPA_MINBLOCKSIZE
* so that we can consume any remaining space.
*/
if (space > 0 && space < SPA_MINBLOCKSIZE)
space = SPA_MINBLOCKSIZE;
}
weight = space;
/*
* Modern disks have uniform bit density and constant angular velocity.
* Therefore, the outer recording zones are faster (higher bandwidth)
* than the inner zones by the ratio of outer to inner track diameter,
* which is typically around 2:1. We account for this by assigning
* higher weight to lower metaslabs (multiplier ranging from 2x to 1x).
* In effect, this means that we'll select the metaslab with the most
* free bandwidth rather than simply the one with the most free space.
*/
if (!vd->vdev_nonrot && metaslab_lba_weighting_enabled) {
weight = 2 * weight - (msp->ms_id * weight) / vd->vdev_ms_count;
ASSERT(weight >= space && weight <= 2 * space);
}
/*
* If this metaslab is one we're actively using, adjust its
* weight to make it preferable to any inactive metaslab so
* we'll polish it off. If the fragmentation on this metaslab
* has exceed our threshold, then don't mark it active.
*/
if (msp->ms_loaded && msp->ms_fragmentation != ZFS_FRAG_INVALID &&
msp->ms_fragmentation <= zfs_metaslab_fragmentation_threshold) {
weight |= (msp->ms_weight & METASLAB_ACTIVE_MASK);
}
WEIGHT_SET_SPACEBASED(weight);
return (weight);
}
/*
* Return the weight of the specified metaslab, according to the segment-based
* weighting algorithm. The metaslab must be loaded. This function can
* be called within a sync pass since it relies only on the metaslab's
* range tree which is always accurate when the metaslab is loaded.
*/
static uint64_t
metaslab_weight_from_range_tree(metaslab_t *msp)
{
uint64_t weight = 0;
uint32_t segments = 0;
ASSERT(msp->ms_loaded);
for (int i = RANGE_TREE_HISTOGRAM_SIZE - 1; i >= SPA_MINBLOCKSHIFT;
i--) {
uint8_t shift = msp->ms_group->mg_vd->vdev_ashift;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
segments <<= 1;
segments += msp->ms_tree->rt_histogram[i];
/*
* The range tree provides more precision than the space map
* and must be downgraded so that all values fit within the
* space map's histogram. This allows us to compare loaded
* vs. unloaded metaslabs to determine which metaslab is
* considered "best".
*/
if (i > max_idx)
continue;
if (segments != 0) {
WEIGHT_SET_COUNT(weight, segments);
WEIGHT_SET_INDEX(weight, i);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Calculate the weight based on the on-disk histogram. This should only
* be called after a sync pass has completely finished since the on-disk
* information is updated in metaslab_sync().
*/
static uint64_t
metaslab_weight_from_spacemap(metaslab_t *msp)
{
uint64_t weight = 0;
for (int i = SPACE_MAP_HISTOGRAM_SIZE - 1; i >= 0; i--) {
if (msp->ms_sm->sm_phys->smp_histogram[i] != 0) {
WEIGHT_SET_COUNT(weight,
msp->ms_sm->sm_phys->smp_histogram[i]);
WEIGHT_SET_INDEX(weight, i +
msp->ms_sm->sm_shift);
WEIGHT_SET_ACTIVE(weight, 0);
break;
}
}
return (weight);
}
/*
* Compute a segment-based weight for the specified metaslab. The weight
* is determined by highest bucket in the histogram. The information
* for the highest bucket is encoded into the weight value.
*/
static uint64_t
metaslab_segment_weight(metaslab_t *msp)
{
metaslab_group_t *mg = msp->ms_group;
uint64_t weight = 0;
uint8_t shift = mg->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
* The metaslab is completely free.
*/
if (space_map_allocated(msp->ms_sm) == 0) {
int idx = highbit64(msp->ms_size) - 1;
int max_idx = SPACE_MAP_HISTOGRAM_SIZE + shift - 1;
if (idx < max_idx) {
WEIGHT_SET_COUNT(weight, 1ULL);
WEIGHT_SET_INDEX(weight, idx);
} else {
WEIGHT_SET_COUNT(weight, 1ULL << (idx - max_idx));
WEIGHT_SET_INDEX(weight, max_idx);
}
WEIGHT_SET_ACTIVE(weight, 0);
ASSERT(!WEIGHT_IS_SPACEBASED(weight));
return (weight);
}
ASSERT3U(msp->ms_sm->sm_dbuf->db_size, ==, sizeof (space_map_phys_t));
/*
* If the metaslab is fully allocated then just make the weight 0.
*/
if (space_map_allocated(msp->ms_sm) == msp->ms_size)
return (0);
/*
* If the metaslab is already loaded, then use the range tree to
* determine the weight. Otherwise, we rely on the space map information
* to generate the weight.
*/
if (msp->ms_loaded) {
weight = metaslab_weight_from_range_tree(msp);
} else {
weight = metaslab_weight_from_spacemap(msp);
}
/*
* If the metaslab was active the last time we calculated its weight
* then keep it active. We want to consume the entire region that
* is associated with this weight.
*/
if (msp->ms_activation_weight != 0 && weight != 0)
WEIGHT_SET_ACTIVE(weight, WEIGHT_GET_ACTIVE(msp->ms_weight));
return (weight);
}
/*
* Determine if we should attempt to allocate from this metaslab. If the
* metaslab has a maximum size then we can quickly determine if the desired
* allocation size can be satisfied. Otherwise, if we're using segment-based
* weighting then we can determine the maximum allocation that this metaslab
* can accommodate based on the index encoded in the weight. If we're using
* space-based weights then rely on the entire weight (excluding the weight
* type bit).
*/
boolean_t
metaslab_should_allocate(metaslab_t *msp, uint64_t asize)
{
boolean_t should_allocate;
if (msp->ms_max_size != 0)
return (msp->ms_max_size >= asize);
if (!WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
/*
* The metaslab segment weight indicates segments in the
* range [2^i, 2^(i+1)), where i is the index in the weight.
* Since the asize might be in the middle of the range, we
* should attempt the allocation if asize < 2^(i+1).
*/
should_allocate = (asize <
1ULL << (WEIGHT_GET_INDEX(msp->ms_weight) + 1));
} else {
should_allocate = (asize <=
(msp->ms_weight & ~METASLAB_WEIGHT_TYPE));
}
return (should_allocate);
}
static uint64_t
metaslab_weight(metaslab_t *msp)
{
vdev_t *vd = msp->ms_group->mg_vd;
spa_t *spa = vd->vdev_spa;
uint64_t weight;
ASSERT(MUTEX_HELD(&msp->ms_lock));
/*
- * This vdev is in the process of being removed so there is nothing
+ * If this vdev is in the process of being removed, there is nothing
* for us to do here.
*/
- if (vd->vdev_removing) {
- ASSERT0(space_map_allocated(msp->ms_sm));
- ASSERT0(vd->vdev_ms_shift);
+ if (vd->vdev_removing)
return (0);
- }
metaslab_set_fragmentation(msp);
/*
* Update the maximum size if the metaslab is loaded. This will
* ensure that we get an accurate maximum size if newly freed space
* has been added back into the free tree.
*/
if (msp->ms_loaded)
msp->ms_max_size = metaslab_block_maxsize(msp);
/*
* Segment-based weighting requires space map histogram support.
*/
if (zfs_metaslab_segment_weight_enabled &&
spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
(msp->ms_sm == NULL || msp->ms_sm->sm_dbuf->db_size ==
sizeof (space_map_phys_t))) {
weight = metaslab_segment_weight(msp);
} else {
weight = metaslab_space_weight(msp);
}
return (weight);
}
static int
metaslab_activate(metaslab_t *msp, uint64_t activation_weight)
{
ASSERT(MUTEX_HELD(&msp->ms_lock));
if ((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0) {
metaslab_load_wait(msp);
if (!msp->ms_loaded) {
int error = metaslab_load(msp);
if (error) {
metaslab_group_sort(msp->ms_group, msp, 0);
return (error);
}
}
msp->ms_activation_weight = msp->ms_weight;
metaslab_group_sort(msp->ms_group, msp,
msp->ms_weight | activation_weight);
}
ASSERT(msp->ms_loaded);
ASSERT(msp->ms_weight & METASLAB_ACTIVE_MASK);
return (0);
}
static void
metaslab_passivate(metaslab_t *msp, uint64_t weight)
{
ASSERTV(uint64_t size = weight & ~METASLAB_WEIGHT_TYPE);
/*
* If size < SPA_MINBLOCKSIZE, then we will not allocate from
* this metaslab again. In that case, it had better be empty,
* or we would be leaving space on the table.
*/
ASSERT(!WEIGHT_IS_SPACEBASED(msp->ms_weight) ||
size >= SPA_MINBLOCKSIZE ||
range_tree_space(msp->ms_tree) == 0);
ASSERT0(weight & METASLAB_ACTIVE_MASK);
msp->ms_activation_weight = 0;
metaslab_group_sort(msp->ms_group, msp, weight);
ASSERT((msp->ms_weight & METASLAB_ACTIVE_MASK) == 0);
}
/*
* Segment-based metaslabs are activated once and remain active until
* we either fail an allocation attempt (similar to space-based metaslabs)
* or have exhausted the free space in zfs_metaslab_switch_threshold
* buckets since the metaslab was activated. This function checks to see
* if we've exhaused the zfs_metaslab_switch_threshold buckets in the
* metaslab and passivates it proactively. This will allow us to select a
* metaslab with a larger contiguous region, if any, remaining within this
* metaslab group. If we're in sync pass > 1, then we continue using this
* metaslab so that we don't dirty more block and cause more sync passes.
*/
void
metaslab_segment_may_passivate(metaslab_t *msp)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
if (WEIGHT_IS_SPACEBASED(msp->ms_weight) || spa_sync_pass(spa) > 1)
return;
/*
* Since we are in the middle of a sync pass, the most accurate
* information that is accessible to us is the in-core range tree
* histogram; calculate the new weight based on that information.
*/
uint64_t weight = metaslab_weight_from_range_tree(msp);
int activation_idx = WEIGHT_GET_INDEX(msp->ms_activation_weight);
int current_idx = WEIGHT_GET_INDEX(weight);
if (current_idx <= activation_idx - zfs_metaslab_switch_threshold)
metaslab_passivate(msp, weight);
}
static void
metaslab_preload(void *arg)
{
metaslab_t *msp = arg;
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
fstrans_cookie_t cookie = spl_fstrans_mark();
ASSERT(!MUTEX_HELD(&msp->ms_group->mg_lock));
mutex_enter(&msp->ms_lock);
metaslab_load_wait(msp);
if (!msp->ms_loaded)
(void) metaslab_load(msp);
msp->ms_selected_txg = spa_syncing_txg(spa);
mutex_exit(&msp->ms_lock);
spl_fstrans_unmark(cookie);
}
static void
metaslab_group_preload(metaslab_group_t *mg)
{
spa_t *spa = mg->mg_vd->vdev_spa;
metaslab_t *msp;
avl_tree_t *t = &mg->mg_metaslab_tree;
int m = 0;
if (spa_shutting_down(spa) || !metaslab_preload_enabled) {
taskq_wait_outstanding(mg->mg_taskq, 0);
return;
}
mutex_enter(&mg->mg_lock);
+
/*
* Load the next potential metaslabs
*/
for (msp = avl_first(t); msp != NULL; msp = AVL_NEXT(t, msp)) {
+ ASSERT3P(msp->ms_group, ==, mg);
+
/*
* We preload only the maximum number of metaslabs specified
* by metaslab_preload_limit. If a metaslab is being forced
* to condense then we preload it too. This will ensure
* that force condensing happens in the next txg.
*/
if (++m > metaslab_preload_limit && !msp->ms_condense_wanted) {
continue;
}
VERIFY(taskq_dispatch(mg->mg_taskq, metaslab_preload,
msp, TQ_SLEEP) != TASKQID_INVALID);
}
mutex_exit(&mg->mg_lock);
}
/*
* Determine if the space map's on-disk footprint is past our tolerance
* for inefficiency. We would like to use the following criteria to make
* our decision:
*
* 1. The size of the space map object should not dramatically increase as a
* result of writing out the free space range tree.
*
* 2. The minimal on-disk space map representation is zfs_condense_pct/100
* times the size than the free space range tree representation
- * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1.MB).
+ * (i.e. zfs_condense_pct = 110 and in-core = 1MB, minimal = 1.1MB).
*
* 3. The on-disk size of the space map should actually decrease.
*
* Checking the first condition is tricky since we don't want to walk
* the entire AVL tree calculating the estimated on-disk size. Instead we
* use the size-ordered range tree in the metaslab and calculate the
* size required to write out the largest segment in our free tree. If the
* size required to represent that segment on disk is larger than the space
* map object then we avoid condensing this map.
*
* To determine the second criterion we use a best-case estimate and assume
* each segment can be represented on-disk as a single 64-bit entry. We refer
* to this best-case estimate as the space map's minimal form.
*
* Unfortunately, we cannot compute the on-disk size of the space map in this
* context because we cannot accurately compute the effects of compression, etc.
* Instead, we apply the heuristic described in the block comment for
* zfs_metaslab_condense_block_threshold - we only condense if the space used
* is greater than a threshold number of blocks.
*/
static boolean_t
metaslab_should_condense(metaslab_t *msp)
{
space_map_t *sm = msp->ms_sm;
range_seg_t *rs;
uint64_t size, entries, segsz, object_size, optimal_size, record_size;
dmu_object_info_t doi;
uint64_t vdev_blocksize = 1ULL << msp->ms_group->mg_vd->vdev_ashift;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT(msp->ms_loaded);
/*
* Use the ms_size_tree range tree, which is ordered by size, to
* obtain the largest segment in the free tree. We always condense
* metaslabs that are empty and metaslabs for which a condense
* request has been made.
*/
rs = avl_last(&msp->ms_size_tree);
if (rs == NULL || msp->ms_condense_wanted)
return (B_TRUE);
/*
* Calculate the number of 64-bit entries this segment would
* require when written to disk. If this single segment would be
* larger on-disk than the entire current on-disk structure, then
* clearly condensing will increase the on-disk structure size.
*/
size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
entries = size / (MIN(size, SM_RUN_MAX));
segsz = entries * sizeof (uint64_t);
optimal_size = sizeof (uint64_t) * avl_numnodes(&msp->ms_tree->rt_root);
object_size = space_map_length(msp->ms_sm);
dmu_object_info_from_db(sm->sm_dbuf, &doi);
record_size = MAX(doi.doi_data_block_size, vdev_blocksize);
return (segsz <= object_size &&
object_size >= (optimal_size * zfs_condense_pct / 100) &&
object_size > zfs_metaslab_condense_block_threshold * record_size);
}
/*
* Condense the on-disk space map representation to its minimized form.
* The minimized form consists of a small number of allocations followed by
* the entries of the free range tree.
*/
static void
metaslab_condense(metaslab_t *msp, uint64_t txg, dmu_tx_t *tx)
{
spa_t *spa = msp->ms_group->mg_vd->vdev_spa;
range_tree_t *condense_tree;
space_map_t *sm = msp->ms_sm;
ASSERT(MUTEX_HELD(&msp->ms_lock));
ASSERT3U(spa_sync_pass(spa), ==, 1);
ASSERT(msp->ms_loaded);
spa_dbgmsg(spa, "condensing: txg %llu, msp[%llu] %p, vdev id %llu, "
"spa %s, smp size %llu, segments %lu, forcing condense=%s", txg,
msp->ms_id, msp, msp->ms_group->mg_vd->vdev_id,
msp->ms_group->mg_vd->vdev_spa->spa_name,
space_map_length(msp->ms_sm), avl_numnodes(&msp->ms_tree->rt_root),
msp->ms_condense_wanted ? "TRUE" : "FALSE");
msp->ms_condense_wanted = B_FALSE;
/*
* Create an range tree that is 100% allocated. We remove segments
* that have been freed in this txg, any deferred frees that exist,
* and any allocation in the future. Removing segments should be
* a relatively inexpensive operation since we expect these trees to
* have a small number of nodes.
*/
- condense_tree = range_tree_create(NULL, NULL, &msp->ms_lock);
+ condense_tree = range_tree_create(NULL, NULL);
range_tree_add(condense_tree, msp->ms_start, msp->ms_size);
/*
* Remove what's been freed in this txg from the condense_tree.
* Since we're in sync_pass 1, we know that all the frees from
* this txg are in the freeingtree.
*/
range_tree_walk(msp->ms_freeingtree, range_tree_remove, condense_tree);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
range_tree_walk(msp->ms_defertree[t],
range_tree_remove, condense_tree);
}
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
range_tree_walk(msp->ms_alloctree[(txg + t) & TXG_MASK],
range_tree_remove, condense_tree);
}
/*
* We're about to drop the metaslab's lock thus allowing
* other consumers to change it's content. Set the
* metaslab's ms_condensing flag to ensure that
* allocations on this metaslab do not occur while we're
* in the middle of committing it to disk. This is only critical
* for the ms_tree as all other range trees use per txg
* views of their content.
*/
msp->ms_condensing = B_TRUE;
mutex_exit(&msp->ms_lock);
space_map_truncate(sm, tx);
- mutex_enter(&msp->ms_lock);
/*
* While we would ideally like to create a space map representation
* that consists only of allocation records, doing so can be
* prohibitively expensive because the in-core free tree can be
* large, and therefore computationally expensive to subtract
* from the condense_tree. Instead we sync out two trees, a cheap
* allocation only tree followed by the in-core free tree. While not
* optimal, this is typically close to optimal, and much cheaper to
* compute.
*/
space_map_write(sm, condense_tree, SM_ALLOC, tx);
range_tree_vacate(condense_tree, NULL, NULL);
range_tree_destroy(condense_tree);
space_map_write(sm, msp->ms_tree, SM_FREE, tx);
+ mutex_enter(&msp->ms_lock);
msp->ms_condensing = B_FALSE;
}
/*
* Write a metaslab to disk in the context of the specified transaction group.
*/
void
metaslab_sync(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa_meta_objset(spa);
range_tree_t *alloctree = msp->ms_alloctree[txg & TXG_MASK];
dmu_tx_t *tx;
uint64_t object = space_map_object(msp->ms_sm);
ASSERT(!vd->vdev_ishole);
/*
* This metaslab has just been added so there's no work to do now.
*/
if (msp->ms_freeingtree == NULL) {
ASSERT3P(alloctree, ==, NULL);
return;
}
ASSERT3P(alloctree, !=, NULL);
ASSERT3P(msp->ms_freeingtree, !=, NULL);
ASSERT3P(msp->ms_freedtree, !=, NULL);
/*
* Normally, we don't want to process a metaslab if there
* are no allocations or frees to perform. However, if the metaslab
* is being forced to condense and it's loaded, we need to let it
* through.
*/
if (range_tree_space(alloctree) == 0 &&
range_tree_space(msp->ms_freeingtree) == 0 &&
!(msp->ms_loaded && msp->ms_condense_wanted))
return;
VERIFY(txg <= spa_final_dirty_txg(spa));
/*
* The only state that can actually be changing concurrently with
* metaslab_sync() is the metaslab's ms_tree. No other thread can
* be modifying this txg's alloctree, freeingtree, freedtree, or
- * space_map_phys_t. Therefore, we only hold ms_lock to satify
- * space map ASSERTs. We drop it whenever we call into the DMU,
- * because the DMU can call down to us (e.g. via zio_free()) at
- * any time.
+ * space_map_phys_t. We drop ms_lock whenever we could call
+ * into the DMU, because the DMU can call down to us
+ * (e.g. via zio_free()) at any time.
+ *
+ * The spa_vdev_remove_thread() can be reading metaslab state
+ * concurrently, and it is locked out by the ms_sync_lock. Note
+ * that the ms_lock is insufficient for this, because it is dropped
+ * by space_map_write().
*/
tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
if (msp->ms_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&msp->ms_sm, mos, new_object,
- msp->ms_start, msp->ms_size, vd->vdev_ashift,
- &msp->ms_lock));
+ msp->ms_start, msp->ms_size, vd->vdev_ashift));
ASSERT(msp->ms_sm != NULL);
}
+ mutex_enter(&msp->ms_sync_lock);
mutex_enter(&msp->ms_lock);
/*
* Note: metaslab_condense() clears the space map's histogram.
* Therefore we must verify and remove this histogram before
* condensing.
*/
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
metaslab_group_histogram_remove(mg, msp);
if (msp->ms_loaded && spa_sync_pass(spa) == 1 &&
metaslab_should_condense(msp)) {
metaslab_condense(msp, txg, tx);
} else {
+ mutex_exit(&msp->ms_lock);
space_map_write(msp->ms_sm, alloctree, SM_ALLOC, tx);
space_map_write(msp->ms_sm, msp->ms_freeingtree, SM_FREE, tx);
+ mutex_enter(&msp->ms_lock);
}
if (msp->ms_loaded) {
/*
- * When the space map is loaded, we have an accruate
+ * When the space map is loaded, we have an accurate
* histogram in the range tree. This gives us an opportunity
* to bring the space map's histogram up-to-date so we clear
* it first before updating it.
*/
space_map_histogram_clear(msp->ms_sm);
space_map_histogram_add(msp->ms_sm, msp->ms_tree, tx);
/*
* Since we've cleared the histogram we need to add back
* any free space that has already been processed, plus
* any deferred space. This allows the on-disk histogram
* to accurately reflect all free space even if some space
* is not yet available for allocation (i.e. deferred).
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freedtree, tx);
/*
* Add back any deferred free space that has not been
* added back into the in-core free tree yet. This will
* ensure that we don't end up with a space map histogram
* that is completely empty unless the metaslab is fully
* allocated.
*/
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
space_map_histogram_add(msp->ms_sm,
msp->ms_defertree[t], tx);
}
}
/*
* Always add the free space from this sync pass to the space
* map histogram. We want to make sure that the on-disk histogram
* accounts for all free space. If the space map is not loaded,
* then we will lose some accuracy but will correct it the next
* time we load the space map.
*/
space_map_histogram_add(msp->ms_sm, msp->ms_freeingtree, tx);
metaslab_group_histogram_add(mg, msp);
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
/*
* For sync pass 1, we avoid traversing this txg's free range tree
* and instead will just swap the pointers for freeingtree and
* freedtree. We can safely do this since the freed_tree is
* guaranteed to be empty on the initial pass.
*/
if (spa_sync_pass(spa) == 1) {
range_tree_swap(&msp->ms_freeingtree, &msp->ms_freedtree);
} else {
range_tree_vacate(msp->ms_freeingtree,
range_tree_add, msp->ms_freedtree);
}
range_tree_vacate(alloctree, NULL, NULL);
ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_alloctree[TXG_CLEAN(txg) & TXG_MASK]));
ASSERT0(range_tree_space(msp->ms_freeingtree));
mutex_exit(&msp->ms_lock);
if (object != space_map_object(msp->ms_sm)) {
object = space_map_object(msp->ms_sm);
dmu_write(mos, vd->vdev_ms_array, sizeof (uint64_t) *
msp->ms_id, sizeof (uint64_t), &object, tx);
}
+ mutex_exit(&msp->ms_sync_lock);
dmu_tx_commit(tx);
}
/*
* Called after a transaction group has completely synced to mark
* all of the metaslab's free space as usable.
*/
void
metaslab_sync_done(metaslab_t *msp, uint64_t txg)
{
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
spa_t *spa = vd->vdev_spa;
range_tree_t **defer_tree;
int64_t alloc_delta, defer_delta;
boolean_t defer_allowed = B_TRUE;
ASSERT(!vd->vdev_ishole);
mutex_enter(&msp->ms_lock);
/*
* If this metaslab is just becoming available, initialize its
* range trees and add its capacity to the vdev.
*/
if (msp->ms_freedtree == NULL) {
for (int t = 0; t < TXG_SIZE; t++) {
ASSERT(msp->ms_alloctree[t] == NULL);
- msp->ms_alloctree[t] = range_tree_create(NULL, msp,
- &msp->ms_lock);
+ msp->ms_alloctree[t] = range_tree_create(NULL, NULL);
}
ASSERT3P(msp->ms_freeingtree, ==, NULL);
- msp->ms_freeingtree = range_tree_create(NULL, msp,
- &msp->ms_lock);
+ msp->ms_freeingtree = range_tree_create(NULL, NULL);
ASSERT3P(msp->ms_freedtree, ==, NULL);
- msp->ms_freedtree = range_tree_create(NULL, msp,
- &msp->ms_lock);
+ msp->ms_freedtree = range_tree_create(NULL, NULL);
for (int t = 0; t < TXG_DEFER_SIZE; t++) {
ASSERT(msp->ms_defertree[t] == NULL);
- msp->ms_defertree[t] = range_tree_create(NULL, msp,
- &msp->ms_lock);
+ msp->ms_defertree[t] = range_tree_create(NULL, NULL);
}
vdev_space_update(vd, 0, 0, msp->ms_size);
}
defer_tree = &msp->ms_defertree[txg % TXG_DEFER_SIZE];
uint64_t free_space = metaslab_class_get_space(spa_normal_class(spa)) -
metaslab_class_get_alloc(spa_normal_class(spa));
- if (free_space <= spa_get_slop_space(spa)) {
+ if (free_space <= spa_get_slop_space(spa) || vd->vdev_removing) {
defer_allowed = B_FALSE;
}
defer_delta = 0;
alloc_delta = space_map_alloc_delta(msp->ms_sm);
if (defer_allowed) {
defer_delta = range_tree_space(msp->ms_freedtree) -
range_tree_space(*defer_tree);
} else {
defer_delta -= range_tree_space(*defer_tree);
}
vdev_space_update(vd, alloc_delta + defer_delta, defer_delta, 0);
/*
* If there's a metaslab_load() in progress, wait for it to complete
* so that we have a consistent view of the in-core space map.
*/
metaslab_load_wait(msp);
/*
* Move the frees from the defer_tree back to the free
* range tree (if it's loaded). Swap the freed_tree and the
* defer_tree -- this is safe to do because we've just emptied out
* the defer_tree.
*/
range_tree_vacate(*defer_tree,
msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
if (defer_allowed) {
range_tree_swap(&msp->ms_freedtree, defer_tree);
} else {
range_tree_vacate(msp->ms_freedtree,
msp->ms_loaded ? range_tree_add : NULL, msp->ms_tree);
}
space_map_update(msp->ms_sm);
msp->ms_deferspace += defer_delta;
ASSERT3S(msp->ms_deferspace, >=, 0);
ASSERT3S(msp->ms_deferspace, <=, msp->ms_size);
if (msp->ms_deferspace != 0) {
/*
* Keep syncing this metaslab until all deferred frees
* are back in circulation.
*/
vdev_dirty(vd, VDD_METASLAB, msp, txg + 1);
}
/*
* Calculate the new weights before unloading any metaslabs.
* This will give us the most accurate weighting.
*/
metaslab_group_sort(mg, msp, metaslab_weight(msp));
/*
* If the metaslab is loaded and we've not tried to load or allocate
* from it in 'metaslab_unload_delay' txgs, then unload it.
*/
if (msp->ms_loaded &&
msp->ms_selected_txg + metaslab_unload_delay < txg) {
for (int t = 1; t < TXG_CONCURRENT_STATES; t++) {
VERIFY0(range_tree_space(
msp->ms_alloctree[(txg + t) & TXG_MASK]));
}
if (!metaslab_debug_unload)
metaslab_unload(msp);
}
+ ASSERT0(range_tree_space(msp->ms_alloctree[txg & TXG_MASK]));
+ ASSERT0(range_tree_space(msp->ms_freeingtree));
+ ASSERT0(range_tree_space(msp->ms_freedtree));
+
mutex_exit(&msp->ms_lock);
}
void
metaslab_sync_reassess(metaslab_group_t *mg)
{
+ spa_t *spa = mg->mg_class->mc_spa;
+
+ spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
metaslab_group_alloc_update(mg);
mg->mg_fragmentation = metaslab_group_fragmentation(mg);
/*
- * Preload the next potential metaslabs
+ * Preload the next potential metaslabs but only on active
+ * metaslab groups. We can get into a state where the metaslab
+ * is no longer active since we dirty metaslabs as we remove a
+ * a device, thus potentially making the metaslab group eligible
+ * for preloading.
*/
- metaslab_group_preload(mg);
+ if (mg->mg_activation_count > 0) {
+ metaslab_group_preload(mg);
+ }
+ spa_config_exit(spa, SCL_ALLOC, FTAG);
}
static uint64_t
metaslab_distance(metaslab_t *msp, dva_t *dva)
{
uint64_t ms_shift = msp->ms_group->mg_vd->vdev_ms_shift;
uint64_t offset = DVA_GET_OFFSET(dva) >> ms_shift;
uint64_t start = msp->ms_id;
if (msp->ms_group->mg_vd->vdev_id != DVA_GET_VDEV(dva))
return (1ULL << 63);
if (offset < start)
return ((start - offset) << ms_shift);
if (offset > start)
return ((offset - start) << ms_shift);
return (0);
}
/*
* ==========================================================================
* Metaslab allocation tracing facility
* ==========================================================================
*/
#ifdef _METASLAB_TRACING
kstat_t *metaslab_trace_ksp;
kstat_named_t metaslab_trace_over_limit;
void
metaslab_alloc_trace_init(void)
{
ASSERT(metaslab_alloc_trace_cache == NULL);
metaslab_alloc_trace_cache = kmem_cache_create(
"metaslab_alloc_trace_cache", sizeof (metaslab_alloc_trace_t),
0, NULL, NULL, NULL, NULL, NULL, 0);
metaslab_trace_ksp = kstat_create("zfs", 0, "metaslab_trace_stats",
"misc", KSTAT_TYPE_NAMED, 1, KSTAT_FLAG_VIRTUAL);
if (metaslab_trace_ksp != NULL) {
metaslab_trace_ksp->ks_data = &metaslab_trace_over_limit;
kstat_named_init(&metaslab_trace_over_limit,
"metaslab_trace_over_limit", KSTAT_DATA_UINT64);
kstat_install(metaslab_trace_ksp);
}
}
void
metaslab_alloc_trace_fini(void)
{
if (metaslab_trace_ksp != NULL) {
kstat_delete(metaslab_trace_ksp);
metaslab_trace_ksp = NULL;
}
kmem_cache_destroy(metaslab_alloc_trace_cache);
metaslab_alloc_trace_cache = NULL;
}
/*
* Add an allocation trace element to the allocation tracing list.
*/
static void
metaslab_trace_add(zio_alloc_list_t *zal, metaslab_group_t *mg,
metaslab_t *msp, uint64_t psize, uint32_t dva_id, uint64_t offset)
{
metaslab_alloc_trace_t *mat;
if (!metaslab_trace_enabled)
return;
/*
* When the tracing list reaches its maximum we remove
* the second element in the list before adding a new one.
* By removing the second element we preserve the original
* entry as a clue to what allocations steps have already been
* performed.
*/
if (zal->zal_size == metaslab_trace_max_entries) {
metaslab_alloc_trace_t *mat_next;
#ifdef DEBUG
panic("too many entries in allocation list");
#endif
atomic_inc_64(&metaslab_trace_over_limit.value.ui64);
zal->zal_size--;
mat_next = list_next(&zal->zal_list, list_head(&zal->zal_list));
list_remove(&zal->zal_list, mat_next);
kmem_cache_free(metaslab_alloc_trace_cache, mat_next);
}
mat = kmem_cache_alloc(metaslab_alloc_trace_cache, KM_SLEEP);
list_link_init(&mat->mat_list_node);
mat->mat_mg = mg;
mat->mat_msp = msp;
mat->mat_size = psize;
mat->mat_dva_id = dva_id;
mat->mat_offset = offset;
mat->mat_weight = 0;
if (msp != NULL)
mat->mat_weight = msp->ms_weight;
/*
* The list is part of the zio so locking is not required. Only
* a single thread will perform allocations for a given zio.
*/
list_insert_tail(&zal->zal_list, mat);
zal->zal_size++;
ASSERT3U(zal->zal_size, <=, metaslab_trace_max_entries);
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
list_create(&zal->zal_list, sizeof (metaslab_alloc_trace_t),
offsetof(metaslab_alloc_trace_t, mat_list_node));
zal->zal_size = 0;
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
metaslab_alloc_trace_t *mat;
while ((mat = list_remove_head(&zal->zal_list)) != NULL)
kmem_cache_free(metaslab_alloc_trace_cache, mat);
list_destroy(&zal->zal_list);
zal->zal_size = 0;
}
#else
#define metaslab_trace_add(zal, mg, msp, psize, id, off)
void
metaslab_alloc_trace_init(void)
{
}
void
metaslab_alloc_trace_fini(void)
{
}
void
metaslab_trace_init(zio_alloc_list_t *zal)
{
}
void
metaslab_trace_fini(zio_alloc_list_t *zal)
{
}
#endif /* _METASLAB_TRACING */
/*
* ==========================================================================
* Metaslab block operations
* ==========================================================================
*/
static void
metaslab_group_alloc_increment(spa_t *spa, uint64_t vdev, void *tag, int flags)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
flags & METASLAB_DONT_THROTTLE)
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
(void) refcount_add(&mg->mg_alloc_queue_depth, tag);
}
void
metaslab_group_alloc_decrement(spa_t *spa, uint64_t vdev, void *tag, int flags)
{
if (!(flags & METASLAB_ASYNC_ALLOC) ||
flags & METASLAB_DONT_THROTTLE)
return;
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
if (!mg->mg_class->mc_alloc_throttle_enabled)
return;
(void) refcount_remove(&mg->mg_alloc_queue_depth, tag);
}
void
metaslab_group_alloc_verify(spa_t *spa, const blkptr_t *bp, void *tag)
{
#ifdef ZFS_DEBUG
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
for (int d = 0; d < ndvas; d++) {
uint64_t vdev = DVA_GET_VDEV(&dva[d]);
metaslab_group_t *mg = vdev_lookup_top(spa, vdev)->vdev_mg;
VERIFY(refcount_not_held(&mg->mg_alloc_queue_depth, tag));
}
#endif
}
static uint64_t
metaslab_block_alloc(metaslab_t *msp, uint64_t size, uint64_t txg)
{
uint64_t start;
range_tree_t *rt = msp->ms_tree;
metaslab_class_t *mc = msp->ms_group->mg_class;
VERIFY(!msp->ms_condensing);
start = mc->mc_ops->msop_alloc(msp, size);
if (start != -1ULL) {
metaslab_group_t *mg = msp->ms_group;
vdev_t *vd = mg->mg_vd;
VERIFY0(P2PHASE(start, 1ULL << vd->vdev_ashift));
VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
VERIFY3U(range_tree_space(rt) - size, <=, msp->ms_size);
range_tree_remove(rt, start, size);
if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
vdev_dirty(mg->mg_vd, VDD_METASLAB, msp, txg);
range_tree_add(msp->ms_alloctree[txg & TXG_MASK], start, size);
/* Track the last successful allocation */
msp->ms_alloc_txg = txg;
metaslab_verify_space(msp, txg);
}
/*
* Now that we've attempted the allocation we need to update the
* metaslab's maximum block size since it may have changed.
*/
msp->ms_max_size = metaslab_block_maxsize(msp);
return (start);
}
static uint64_t
metaslab_group_alloc_normal(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
{
metaslab_t *msp = NULL;
uint64_t offset = -1ULL;
uint64_t activation_weight;
uint64_t target_distance;
int i;
activation_weight = METASLAB_WEIGHT_PRIMARY;
for (i = 0; i < d; i++) {
if (DVA_GET_VDEV(&dva[i]) == mg->mg_vd->vdev_id) {
activation_weight = METASLAB_WEIGHT_SECONDARY;
break;
}
}
metaslab_t *search = kmem_alloc(sizeof (*search), KM_SLEEP);
search->ms_weight = UINT64_MAX;
search->ms_start = 0;
for (;;) {
boolean_t was_active;
avl_tree_t *t = &mg->mg_metaslab_tree;
avl_index_t idx;
mutex_enter(&mg->mg_lock);
/*
* Find the metaslab with the highest weight that is less
* than what we've already tried. In the common case, this
* means that we will examine each metaslab at most once.
* Note that concurrent callers could reorder metaslabs
* by activation/passivation once we have dropped the mg_lock.
* If a metaslab is activated by another thread, and we fail
* to allocate from the metaslab we have selected, we may
* not try the newly-activated metaslab, and instead activate
* another metaslab. This is not optimal, but generally
* does not cause any problems (a possible exception being
* if every metaslab is completely full except for the
* the newly-activated metaslab which we fail to examine).
*/
msp = avl_find(t, search, &idx);
if (msp == NULL)
msp = avl_nearest(t, idx, AVL_AFTER);
for (; msp != NULL; msp = AVL_NEXT(t, msp)) {
if (!metaslab_should_allocate(msp, asize)) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL);
continue;
}
/*
* If the selected metaslab is condensing, skip it.
*/
if (msp->ms_condensing)
continue;
was_active = msp->ms_weight & METASLAB_ACTIVE_MASK;
if (activation_weight == METASLAB_WEIGHT_PRIMARY)
break;
target_distance = min_distance +
(space_map_allocated(msp->ms_sm) != 0 ? 0 :
min_distance >> 1);
for (i = 0; i < d; i++) {
if (metaslab_distance(msp, &dva[i]) <
target_distance)
break;
}
if (i == d)
break;
}
mutex_exit(&mg->mg_lock);
if (msp == NULL) {
kmem_free(search, sizeof (*search));
return (-1ULL);
}
search->ms_weight = msp->ms_weight;
search->ms_start = msp->ms_start + 1;
mutex_enter(&msp->ms_lock);
/*
* Ensure that the metaslab we have selected is still
* capable of handling our request. It's possible that
* another thread may have changed the weight while we
* were blocked on the metaslab lock. We check the
* active status first to see if we need to reselect
* a new metaslab.
*/
if (was_active && !(msp->ms_weight & METASLAB_ACTIVE_MASK)) {
mutex_exit(&msp->ms_lock);
continue;
}
if ((msp->ms_weight & METASLAB_WEIGHT_SECONDARY) &&
activation_weight == METASLAB_WEIGHT_PRIMARY) {
metaslab_passivate(msp,
msp->ms_weight & ~METASLAB_ACTIVE_MASK);
mutex_exit(&msp->ms_lock);
continue;
}
if (metaslab_activate(msp, activation_weight) != 0) {
mutex_exit(&msp->ms_lock);
continue;
}
msp->ms_selected_txg = txg;
/*
* Now that we have the lock, recheck to see if we should
* continue to use this metaslab for this allocation. The
* the metaslab is now loaded so metaslab_should_allocate() can
* accurately determine if the allocation attempt should
* proceed.
*/
if (!metaslab_should_allocate(msp, asize)) {
/* Passivate this metaslab and select a new one. */
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_TOO_SMALL);
goto next;
}
/*
* If this metaslab is currently condensing then pick again as
* we can't manipulate this metaslab until it's committed
* to disk.
*/
if (msp->ms_condensing) {
metaslab_trace_add(zal, mg, msp, asize, d,
TRACE_CONDENSING);
mutex_exit(&msp->ms_lock);
continue;
}
offset = metaslab_block_alloc(msp, asize, txg);
metaslab_trace_add(zal, mg, msp, asize, d, offset);
if (offset != -1ULL) {
/* Proactively passivate the metaslab, if needed */
metaslab_segment_may_passivate(msp);
break;
}
next:
ASSERT(msp->ms_loaded);
/*
* We were unable to allocate from this metaslab so determine
* a new weight for this metaslab. Now that we have loaded
* the metaslab we can provide a better hint to the metaslab
* selector.
*
* For space-based metaslabs, we use the maximum block size.
* This information is only available when the metaslab
* is loaded and is more accurate than the generic free
* space weight that was calculated by metaslab_weight().
* This information allows us to quickly compare the maximum
* available allocation in the metaslab to the allocation
* size being requested.
*
* For segment-based metaslabs, determine the new weight
* based on the highest bucket in the range tree. We
* explicitly use the loaded segment weight (i.e. the range
* tree histogram) since it contains the space that is
* currently available for allocation and is accurate
* even within a sync pass.
*/
if (WEIGHT_IS_SPACEBASED(msp->ms_weight)) {
uint64_t weight = metaslab_block_maxsize(msp);
WEIGHT_SET_SPACEBASED(weight);
metaslab_passivate(msp, weight);
} else {
metaslab_passivate(msp,
metaslab_weight_from_range_tree(msp));
}
/*
* We have just failed an allocation attempt, check
* that metaslab_should_allocate() agrees. Otherwise,
* we may end up in an infinite loop retrying the same
* metaslab.
*/
ASSERT(!metaslab_should_allocate(msp, asize));
mutex_exit(&msp->ms_lock);
}
mutex_exit(&msp->ms_lock);
kmem_free(search, sizeof (*search));
return (offset);
}
static uint64_t
metaslab_group_alloc(metaslab_group_t *mg, zio_alloc_list_t *zal,
uint64_t asize, uint64_t txg, uint64_t min_distance, dva_t *dva, int d)
{
uint64_t offset;
ASSERT(mg->mg_initialized);
offset = metaslab_group_alloc_normal(mg, zal, asize, txg,
min_distance, dva, d);
mutex_enter(&mg->mg_lock);
if (offset == -1ULL) {
mg->mg_failed_allocations++;
metaslab_trace_add(zal, mg, NULL, asize, d,
TRACE_GROUP_FAILURE);
if (asize == SPA_GANGBLOCKSIZE) {
/*
* This metaslab group was unable to allocate
* the minimum gang block size so it must be out of
* space. We must notify the allocation throttle
* to start skipping allocation attempts to this
* metaslab group until more space becomes available.
* Note: this failure cannot be caused by the
* allocation throttle since the allocation throttle
* is only responsible for skipping devices and
* not failing block allocations.
*/
mg->mg_no_free_space = B_TRUE;
}
}
mg->mg_allocations++;
mutex_exit(&mg->mg_lock);
return (offset);
}
/*
* If we have to write a ditto block (i.e. more than one DVA for a given BP)
* on the same vdev as an existing DVA of this BP, then try to allocate it
* at least (vdev_asize / (2 ^ ditto_same_vdev_distance_shift)) away from the
* existing DVAs.
*/
int ditto_same_vdev_distance_shift = 3;
/*
* Allocate a block for the specified i/o.
*/
-static int
+int
metaslab_alloc_dva(spa_t *spa, metaslab_class_t *mc, uint64_t psize,
dva_t *dva, int d, dva_t *hintdva, uint64_t txg, int flags,
zio_alloc_list_t *zal)
{
metaslab_group_t *mg, *fast_mg, *rotor;
vdev_t *vd;
boolean_t try_hard = B_FALSE;
ASSERT(!DVA_IS_VALID(&dva[d]));
/*
* For testing, make some blocks above a certain size be gang blocks.
*/
if (psize >= metaslab_gang_bang && (ddi_get_lbolt() & 3) == 0) {
metaslab_trace_add(zal, NULL, NULL, psize, d, TRACE_FORCE_GANG);
return (SET_ERROR(ENOSPC));
}
/*
* Start at the rotor and loop through all mgs until we find something.
* Note that there's no locking on mc_rotor or mc_aliquot because
* nothing actually breaks if we miss a few updates -- we just won't
* allocate quite as evenly. It all balances out over time.
*
* If we are doing ditto or log blocks, try to spread them across
* consecutive vdevs. If we're forced to reuse a vdev before we've
* allocated all of our ditto blocks, then try and spread them out on
* that vdev as much as possible. If it turns out to not be possible,
* gradually lower our standards until anything becomes acceptable.
* Also, allocating on consecutive vdevs (as opposed to random vdevs)
* gives us hope of containing our fault domains to something we're
* able to reason about. Otherwise, any two top-level vdev failures
* will guarantee the loss of data. With consecutive allocation,
* only two adjacent top-level vdev failures will result in data loss.
*
* If we are doing gang blocks (hintdva is non-NULL), try to keep
* ourselves on the same vdev as our gang block header. That
* way, we can hope for locality in vdev_cache, plus it makes our
* fault domains something tractable.
*/
if (hintdva) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&hintdva[d]));
/*
* It's possible the vdev we're using as the hint no
- * longer exists (i.e. removed). Consult the rotor when
+ * longer exists or its mg has been closed (e.g. by
+ * device removal). Consult the rotor when
* all else fails.
*/
- if (vd != NULL) {
+ if (vd != NULL && vd->vdev_mg != NULL) {
mg = vd->vdev_mg;
if (flags & METASLAB_HINTBP_AVOID &&
mg->mg_next != NULL)
mg = mg->mg_next;
} else {
mg = mc->mc_rotor;
}
} else if (d != 0) {
vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d - 1]));
mg = vd->vdev_mg->mg_next;
} else if (flags & METASLAB_FASTWRITE) {
mg = fast_mg = mc->mc_rotor;
do {
if (fast_mg->mg_vd->vdev_pending_fastwrite <
mg->mg_vd->vdev_pending_fastwrite)
mg = fast_mg;
} while ((fast_mg = fast_mg->mg_next) != mc->mc_rotor);
} else {
mg = mc->mc_rotor;
}
/*
* If the hint put us into the wrong metaslab class, or into a
* metaslab group that has been passivated, just follow the rotor.
*/
if (mg->mg_class != mc || mg->mg_activation_count <= 0)
mg = mc->mc_rotor;
rotor = mg;
top:
do {
boolean_t allocatable;
ASSERT(mg->mg_activation_count == 1);
vd = mg->mg_vd;
/*
* Don't allocate from faulted devices.
*/
if (try_hard) {
spa_config_enter(spa, SCL_ZIO, FTAG, RW_READER);
allocatable = vdev_allocatable(vd);
spa_config_exit(spa, SCL_ZIO, FTAG);
} else {
allocatable = vdev_allocatable(vd);
}
/*
* Determine if the selected metaslab group is eligible
* for allocations. If we're ganging then don't allow
* this metaslab group to skip allocations since that would
* inadvertently return ENOSPC and suspend the pool
* even though space is still available.
*/
if (allocatable && !GANG_ALLOCATION(flags) && !try_hard) {
allocatable = metaslab_group_allocatable(mg, rotor,
psize);
}
if (!allocatable) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_NOT_ALLOCATABLE);
goto next;
}
ASSERT(mg->mg_initialized);
/*
* Avoid writing single-copy data to a failing,
* non-redundant vdev, unless we've already tried all
* other vdevs.
*/
if ((vd->vdev_stat.vs_write_errors > 0 ||
vd->vdev_state < VDEV_STATE_HEALTHY) &&
d == 0 && !try_hard && vd->vdev_children == 0) {
metaslab_trace_add(zal, mg, NULL, psize, d,
TRACE_VDEV_ERROR);
goto next;
}
ASSERT(mg->mg_class == mc);
/*
* If we don't need to try hard, then require that the
* block be 1/8th of the device away from any other DVAs
* in this BP. If we are trying hard, allow any offset
* to be used (distance=0).
*/
uint64_t distance = 0;
if (!try_hard) {
distance = vd->vdev_asize >>
ditto_same_vdev_distance_shift;
if (distance <= (1ULL << vd->vdev_ms_shift))
distance = 0;
}
uint64_t asize = vdev_psize_to_asize(vd, psize);
ASSERT(P2PHASE(asize, 1ULL << vd->vdev_ashift) == 0);
uint64_t offset = metaslab_group_alloc(mg, zal, asize, txg,
distance, dva, d);
if (offset != -1ULL) {
/*
* If we've just selected this metaslab group,
* figure out whether the corresponding vdev is
* over- or under-used relative to the pool,
* and set an allocation bias to even it out.
*
* Bias is also used to compensate for unequally
* sized vdevs so that space is allocated fairly.
*/
if (mc->mc_aliquot == 0 && metaslab_bias_enabled) {
vdev_stat_t *vs = &vd->vdev_stat;
int64_t vs_free = vs->vs_space - vs->vs_alloc;
int64_t mc_free = mc->mc_space - mc->mc_alloc;
int64_t ratio;
/*
* Calculate how much more or less we should
* try to allocate from this device during
* this iteration around the rotor.
*
* This basically introduces a zero-centered
* bias towards the devices with the most
* free space, while compensating for vdev
* size differences.
*
* Examples:
* vdev V1 = 16M/128M
* vdev V2 = 16M/128M
* ratio(V1) = 100% ratio(V2) = 100%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/128M
* ratio(V1) = 127% ratio(V2) = 72%
*
* vdev V1 = 16M/128M
* vdev V2 = 64M/512M
* ratio(V1) = 40% ratio(V2) = 160%
*/
ratio = (vs_free * mc->mc_alloc_groups * 100) /
(mc_free + 1);
mg->mg_bias = ((ratio - 100) *
(int64_t)mg->mg_aliquot) / 100;
} else if (!metaslab_bias_enabled) {
mg->mg_bias = 0;
}
if ((flags & METASLAB_FASTWRITE) ||
atomic_add_64_nv(&mc->mc_aliquot, asize) >=
mg->mg_aliquot + mg->mg_bias) {
mc->mc_rotor = mg->mg_next;
mc->mc_aliquot = 0;
}
DVA_SET_VDEV(&dva[d], vd->vdev_id);
DVA_SET_OFFSET(&dva[d], offset);
DVA_SET_GANG(&dva[d],
((flags & METASLAB_GANG_HEADER) ? 1 : 0));
DVA_SET_ASIZE(&dva[d], asize);
if (flags & METASLAB_FASTWRITE) {
atomic_add_64(&vd->vdev_pending_fastwrite,
psize);
}
return (0);
}
next:
mc->mc_rotor = mg->mg_next;
mc->mc_aliquot = 0;
} while ((mg = mg->mg_next) != rotor);
/*
* If we haven't tried hard, do so now.
*/
if (!try_hard) {
try_hard = B_TRUE;
goto top;
}
bzero(&dva[d], sizeof (dva_t));
metaslab_trace_add(zal, rotor, NULL, psize, d, TRACE_ENOSPC);
return (SET_ERROR(ENOSPC));
}
+void
+metaslab_free_concrete(vdev_t *vd, uint64_t offset, uint64_t asize,
+ uint64_t txg)
+{
+ metaslab_t *msp;
+ ASSERTV(spa_t *spa = vd->vdev_spa);
+
+ ASSERT3U(txg, ==, spa->spa_syncing_txg);
+ ASSERT(vdev_is_concrete(vd));
+ ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
+ ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
+
+ msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+
+ VERIFY(!msp->ms_condensing);
+ VERIFY3U(offset, >=, msp->ms_start);
+ VERIFY3U(offset + asize, <=, msp->ms_start + msp->ms_size);
+ VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
+ VERIFY0(P2PHASE(asize, 1ULL << vd->vdev_ashift));
+
+ metaslab_check_free_impl(vd, offset, asize);
+ mutex_enter(&msp->ms_lock);
+ if (range_tree_space(msp->ms_freeingtree) == 0) {
+ vdev_dirty(vd, VDD_METASLAB, msp, txg);
+ }
+ range_tree_add(msp->ms_freeingtree, offset, asize);
+ mutex_exit(&msp->ms_lock);
+}
+
+/* ARGSUSED */
+void
+metaslab_free_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
+ uint64_t size, void *arg)
+{
+ uint64_t *txgp = arg;
+
+ if (vd->vdev_ops->vdev_op_remap != NULL)
+ vdev_indirect_mark_obsolete(vd, offset, size, *txgp);
+ else
+ metaslab_free_impl(vd, offset, size, *txgp);
+}
+
+static void
+metaslab_free_impl(vdev_t *vd, uint64_t offset, uint64_t size,
+ uint64_t txg)
+{
+ spa_t *spa = vd->vdev_spa;
+
+ ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
+
+ if (txg > spa_freeze_txg(spa))
+ return;
+
+ if (spa->spa_vdev_removal != NULL &&
+ spa->spa_vdev_removal->svr_vdev == vd &&
+ vdev_is_concrete(vd)) {
+ /*
+ * Note: we check if the vdev is concrete because when
+ * we complete the removal, we first change the vdev to be
+ * an indirect vdev (in open context), and then (in syncing
+ * context) clear spa_vdev_removal.
+ */
+ free_from_removing_vdev(vd, offset, size, txg);
+ } else if (vd->vdev_ops->vdev_op_remap != NULL) {
+ vdev_indirect_mark_obsolete(vd, offset, size, txg);
+ vd->vdev_ops->vdev_op_remap(vd, offset, size,
+ metaslab_free_impl_cb, &txg);
+ } else {
+ metaslab_free_concrete(vd, offset, size, txg);
+ }
+}
+
+typedef struct remap_blkptr_cb_arg {
+ blkptr_t *rbca_bp;
+ spa_remap_cb_t rbca_cb;
+ vdev_t *rbca_remap_vd;
+ uint64_t rbca_remap_offset;
+ void *rbca_cb_arg;
+} remap_blkptr_cb_arg_t;
+
+void
+remap_blkptr_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
+ uint64_t size, void *arg)
+{
+ remap_blkptr_cb_arg_t *rbca = arg;
+ blkptr_t *bp = rbca->rbca_bp;
+
+ /* We can not remap split blocks. */
+ if (size != DVA_GET_ASIZE(&bp->blk_dva[0]))
+ return;
+ ASSERT0(inner_offset);
+
+ if (rbca->rbca_cb != NULL) {
+ /*
+ * At this point we know that we are not handling split
+ * blocks and we invoke the callback on the previous
+ * vdev which must be indirect.
+ */
+ ASSERT3P(rbca->rbca_remap_vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ rbca->rbca_cb(rbca->rbca_remap_vd->vdev_id,
+ rbca->rbca_remap_offset, size, rbca->rbca_cb_arg);
+
+ /* set up remap_blkptr_cb_arg for the next call */
+ rbca->rbca_remap_vd = vd;
+ rbca->rbca_remap_offset = offset;
+ }
+
+ /*
+ * The phys birth time is that of dva[0]. This ensures that we know
+ * when each dva was written, so that resilver can determine which
+ * blocks need to be scrubbed (i.e. those written during the time
+ * the vdev was offline). It also ensures that the key used in
+ * the ARC hash table is unique (i.e. dva[0] + phys_birth). If
+ * we didn't change the phys_birth, a lookup in the ARC for a
+ * remapped BP could find the data that was previously stored at
+ * this vdev + offset.
+ */
+ vdev_t *oldvd = vdev_lookup_top(vd->vdev_spa,
+ DVA_GET_VDEV(&bp->blk_dva[0]));
+ vdev_indirect_births_t *vib = oldvd->vdev_indirect_births;
+ bp->blk_phys_birth = vdev_indirect_births_physbirth(vib,
+ DVA_GET_OFFSET(&bp->blk_dva[0]), DVA_GET_ASIZE(&bp->blk_dva[0]));
+
+ DVA_SET_VDEV(&bp->blk_dva[0], vd->vdev_id);
+ DVA_SET_OFFSET(&bp->blk_dva[0], offset);
+}
+
/*
- * Free the block represented by DVA in the context of the specified
- * transaction group.
+ * If the block pointer contains any indirect DVAs, modify them to refer to
+ * concrete DVAs. Note that this will sometimes not be possible, leaving
+ * the indirect DVA in place. This happens if the indirect DVA spans multiple
+ * segments in the mapping (i.e. it is a "split block").
+ *
+ * If the BP was remapped, calls the callback on the original dva (note the
+ * callback can be called multiple times if the original indirect DVA refers
+ * to another indirect DVA, etc).
+ *
+ * Returns TRUE if the BP was remapped.
*/
-static void
-metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg, boolean_t now)
+boolean_t
+spa_remap_blkptr(spa_t *spa, blkptr_t *bp, spa_remap_cb_t callback, void *arg)
{
- uint64_t vdev = DVA_GET_VDEV(dva);
+ remap_blkptr_cb_arg_t rbca;
+
+ if (!zfs_remap_blkptr_enable)
+ return (B_FALSE);
+
+ if (!spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS))
+ return (B_FALSE);
+
+ /*
+ * Dedup BP's can not be remapped, because ddt_phys_select() depends
+ * on DVA[0] being the same in the BP as in the DDT (dedup table).
+ */
+ if (BP_GET_DEDUP(bp))
+ return (B_FALSE);
+
+ /*
+ * Gang blocks can not be remapped, because
+ * zio_checksum_gang_verifier() depends on the DVA[0] that's in
+ * the BP used to read the gang block header (GBH) being the same
+ * as the DVA[0] that we allocated for the GBH.
+ */
+ if (BP_IS_GANG(bp))
+ return (B_FALSE);
+
+ /*
+ * Embedded BP's have no DVA to remap.
+ */
+ if (BP_GET_NDVAS(bp) < 1)
+ return (B_FALSE);
+
+ /*
+ * Note: we only remap dva[0]. If we remapped other dvas, we
+ * would no longer know what their phys birth txg is.
+ */
+ dva_t *dva = &bp->blk_dva[0];
+
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
- vdev_t *vd;
+ vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
+
+ if (vd->vdev_ops->vdev_op_remap == NULL)
+ return (B_FALSE);
+
+ rbca.rbca_bp = bp;
+ rbca.rbca_cb = callback;
+ rbca.rbca_remap_vd = vd;
+ rbca.rbca_remap_offset = offset;
+ rbca.rbca_cb_arg = arg;
+
+ /*
+ * remap_blkptr_cb() will be called in order for each level of
+ * indirection, until a concrete vdev is reached or a split block is
+ * encountered. old_vd and old_offset are updated within the callback
+ * as we go from the one indirect vdev to the next one (either concrete
+ * or indirect again) in that order.
+ */
+ vd->vdev_ops->vdev_op_remap(vd, offset, size, remap_blkptr_cb, &rbca);
+
+ /* Check if the DVA wasn't remapped because it is a split block */
+ if (DVA_GET_VDEV(&rbca.rbca_bp->blk_dva[0]) == vd->vdev_id)
+ return (B_FALSE);
+
+ return (B_TRUE);
+}
+
+/*
+ * Undo the allocation of a DVA which happened in the given transaction group.
+ */
+void
+metaslab_unalloc_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
+{
metaslab_t *msp;
+ vdev_t *vd;
+ uint64_t vdev = DVA_GET_VDEV(dva);
+ uint64_t offset = DVA_GET_OFFSET(dva);
+ uint64_t size = DVA_GET_ASIZE(dva);
+
+ ASSERT(DVA_IS_VALID(dva));
+ ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
if (txg > spa_freeze_txg(spa))
return;
if ((vd = vdev_lookup_top(spa, vdev)) == NULL || !DVA_IS_VALID(dva) ||
(offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count) {
zfs_panic_recover("metaslab_free_dva(): bad DVA %llu:%llu:%llu",
(u_longlong_t)vdev, (u_longlong_t)offset,
(u_longlong_t)size);
return;
}
- msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+ ASSERT(!vd->vdev_removing);
+ ASSERT(vdev_is_concrete(vd));
+ ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
+ ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
if (DVA_GET_GANG(dva))
size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
- mutex_enter(&msp->ms_lock);
-
- if (now) {
- range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
- offset, size);
+ msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
- VERIFY(!msp->ms_condensing);
- VERIFY3U(offset, >=, msp->ms_start);
- VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
- VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
- msp->ms_size);
- VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
- VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
- range_tree_add(msp->ms_tree, offset, size);
- msp->ms_max_size = metaslab_block_maxsize(msp);
- } else {
- VERIFY3U(txg, ==, spa->spa_syncing_txg);
- if (range_tree_space(msp->ms_freeingtree) == 0)
- vdev_dirty(vd, VDD_METASLAB, msp, txg);
- range_tree_add(msp->ms_freeingtree, offset, size);
- }
+ mutex_enter(&msp->ms_lock);
+ range_tree_remove(msp->ms_alloctree[txg & TXG_MASK],
+ offset, size);
+ VERIFY(!msp->ms_condensing);
+ VERIFY3U(offset, >=, msp->ms_start);
+ VERIFY3U(offset + size, <=, msp->ms_start + msp->ms_size);
+ VERIFY3U(range_tree_space(msp->ms_tree) + size, <=,
+ msp->ms_size);
+ VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
+ VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
+ range_tree_add(msp->ms_tree, offset, size);
mutex_exit(&msp->ms_lock);
}
/*
- * Intent log support: upon opening the pool after a crash, notify the SPA
- * of blocks that the intent log has allocated for immediate write, but
- * which are still considered free by the SPA because the last transaction
- * group didn't commit yet.
+ * Free the block represented by DVA in the context of the specified
+ * transaction group.
*/
-static int
-metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
+void
+metaslab_free_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
{
uint64_t vdev = DVA_GET_VDEV(dva);
uint64_t offset = DVA_GET_OFFSET(dva);
uint64_t size = DVA_GET_ASIZE(dva);
- vdev_t *vd;
- metaslab_t *msp;
- int error = 0;
+ vdev_t *vd = vdev_lookup_top(spa, vdev);
ASSERT(DVA_IS_VALID(dva));
+ ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
- if ((vd = vdev_lookup_top(spa, vdev)) == NULL ||
- (offset >> vd->vdev_ms_shift) >= vd->vdev_ms_count)
- return (SET_ERROR(ENXIO));
-
- msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
-
- if (DVA_GET_GANG(dva))
+ if (DVA_GET_GANG(dva)) {
size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
-
- mutex_enter(&msp->ms_lock);
-
- if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
- error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
-
- if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
- error = SET_ERROR(ENOENT);
-
- if (error || txg == 0) { /* txg == 0 indicates dry run */
- mutex_exit(&msp->ms_lock);
- return (error);
}
- VERIFY(!msp->ms_condensing);
- VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
- VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
- VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
- range_tree_remove(msp->ms_tree, offset, size);
-
- if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
- if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
- vdev_dirty(vd, VDD_METASLAB, msp, txg);
- range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
- }
-
- mutex_exit(&msp->ms_lock);
-
- return (0);
+ metaslab_free_impl(vd, offset, size, txg);
}
/*
* Reserve some allocation slots. The reservation system must be called
* before we call into the allocator. If there aren't any available slots
* then the I/O will be throttled until an I/O completes and its slots are
* freed up. The function returns true if it was successful in placing
* the reservation.
*/
boolean_t
metaslab_class_throttle_reserve(metaslab_class_t *mc, int slots, zio_t *zio,
int flags)
{
uint64_t available_slots = 0;
boolean_t slot_reserved = B_FALSE;
ASSERT(mc->mc_alloc_throttle_enabled);
mutex_enter(&mc->mc_lock);
uint64_t reserved_slots = refcount_count(&mc->mc_alloc_slots);
if (reserved_slots < mc->mc_alloc_max_slots)
available_slots = mc->mc_alloc_max_slots - reserved_slots;
if (slots <= available_slots || GANG_ALLOCATION(flags)) {
/*
* We reserve the slots individually so that we can unreserve
* them individually when an I/O completes.
*/
for (int d = 0; d < slots; d++) {
reserved_slots = refcount_add(&mc->mc_alloc_slots, zio);
}
zio->io_flags |= ZIO_FLAG_IO_ALLOCATING;
slot_reserved = B_TRUE;
}
mutex_exit(&mc->mc_lock);
return (slot_reserved);
}
void
metaslab_class_throttle_unreserve(metaslab_class_t *mc, int slots, zio_t *zio)
{
ASSERT(mc->mc_alloc_throttle_enabled);
mutex_enter(&mc->mc_lock);
for (int d = 0; d < slots; d++) {
(void) refcount_remove(&mc->mc_alloc_slots, zio);
}
mutex_exit(&mc->mc_lock);
}
+static int
+metaslab_claim_concrete(vdev_t *vd, uint64_t offset, uint64_t size,
+ uint64_t txg)
+{
+ metaslab_t *msp;
+ spa_t *spa = vd->vdev_spa;
+ int error = 0;
+
+ if (offset >> vd->vdev_ms_shift >= vd->vdev_ms_count)
+ return (ENXIO);
+
+ ASSERT3P(vd->vdev_ms, !=, NULL);
+ msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+
+ mutex_enter(&msp->ms_lock);
+
+ if ((txg != 0 && spa_writeable(spa)) || !msp->ms_loaded)
+ error = metaslab_activate(msp, METASLAB_WEIGHT_SECONDARY);
+
+ if (error == 0 && !range_tree_contains(msp->ms_tree, offset, size))
+ error = SET_ERROR(ENOENT);
+
+ if (error || txg == 0) { /* txg == 0 indicates dry run */
+ mutex_exit(&msp->ms_lock);
+ return (error);
+ }
+
+ VERIFY(!msp->ms_condensing);
+ VERIFY0(P2PHASE(offset, 1ULL << vd->vdev_ashift));
+ VERIFY0(P2PHASE(size, 1ULL << vd->vdev_ashift));
+ VERIFY3U(range_tree_space(msp->ms_tree) - size, <=, msp->ms_size);
+ range_tree_remove(msp->ms_tree, offset, size);
+
+ if (spa_writeable(spa)) { /* don't dirty if we're zdb(1M) */
+ if (range_tree_space(msp->ms_alloctree[txg & TXG_MASK]) == 0)
+ vdev_dirty(vd, VDD_METASLAB, msp, txg);
+ range_tree_add(msp->ms_alloctree[txg & TXG_MASK], offset, size);
+ }
+
+ mutex_exit(&msp->ms_lock);
+
+ return (0);
+}
+
+typedef struct metaslab_claim_cb_arg_t {
+ uint64_t mcca_txg;
+ int mcca_error;
+} metaslab_claim_cb_arg_t;
+
+/* ARGSUSED */
+static void
+metaslab_claim_impl_cb(uint64_t inner_offset, vdev_t *vd, uint64_t offset,
+ uint64_t size, void *arg)
+{
+ metaslab_claim_cb_arg_t *mcca_arg = arg;
+
+ if (mcca_arg->mcca_error == 0) {
+ mcca_arg->mcca_error = metaslab_claim_concrete(vd, offset,
+ size, mcca_arg->mcca_txg);
+ }
+}
+
+int
+metaslab_claim_impl(vdev_t *vd, uint64_t offset, uint64_t size, uint64_t txg)
+{
+ if (vd->vdev_ops->vdev_op_remap != NULL) {
+ metaslab_claim_cb_arg_t arg;
+
+ /*
+ * Only zdb(1M) can claim on indirect vdevs. This is used
+ * to detect leaks of mapped space (that are not accounted
+ * for in the obsolete counts, spacemap, or bpobj).
+ */
+ ASSERT(!spa_writeable(vd->vdev_spa));
+ arg.mcca_error = 0;
+ arg.mcca_txg = txg;
+
+ vd->vdev_ops->vdev_op_remap(vd, offset, size,
+ metaslab_claim_impl_cb, &arg);
+
+ if (arg.mcca_error == 0) {
+ arg.mcca_error = metaslab_claim_concrete(vd,
+ offset, size, txg);
+ }
+ return (arg.mcca_error);
+ } else {
+ return (metaslab_claim_concrete(vd, offset, size, txg));
+ }
+}
+
+/*
+ * Intent log support: upon opening the pool after a crash, notify the SPA
+ * of blocks that the intent log has allocated for immediate write, but
+ * which are still considered free by the SPA because the last transaction
+ * group didn't commit yet.
+ */
+static int
+metaslab_claim_dva(spa_t *spa, const dva_t *dva, uint64_t txg)
+{
+ uint64_t vdev = DVA_GET_VDEV(dva);
+ uint64_t offset = DVA_GET_OFFSET(dva);
+ uint64_t size = DVA_GET_ASIZE(dva);
+ vdev_t *vd;
+
+ if ((vd = vdev_lookup_top(spa, vdev)) == NULL) {
+ return (SET_ERROR(ENXIO));
+ }
+
+ ASSERT(DVA_IS_VALID(dva));
+
+ if (DVA_GET_GANG(dva))
+ size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
+
+ return (metaslab_claim_impl(vd, offset, size, txg));
+}
+
int
metaslab_alloc(spa_t *spa, metaslab_class_t *mc, uint64_t psize, blkptr_t *bp,
int ndvas, uint64_t txg, blkptr_t *hintbp, int flags,
zio_alloc_list_t *zal, zio_t *zio)
{
dva_t *dva = bp->blk_dva;
dva_t *hintdva = hintbp->blk_dva;
int error = 0;
ASSERT(bp->blk_birth == 0);
ASSERT(BP_PHYSICAL_BIRTH(bp) == 0);
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
if (mc->mc_rotor == NULL) { /* no vdevs in this class */
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (SET_ERROR(ENOSPC));
}
ASSERT(ndvas > 0 && ndvas <= spa_max_replication(spa));
ASSERT(BP_GET_NDVAS(bp) == 0);
ASSERT(hintbp == NULL || ndvas <= BP_GET_NDVAS(hintbp));
ASSERT3P(zal, !=, NULL);
for (int d = 0; d < ndvas; d++) {
error = metaslab_alloc_dva(spa, mc, psize, dva, d, hintdva,
txg, flags, zal);
if (error != 0) {
for (d--; d >= 0; d--) {
- metaslab_free_dva(spa, &dva[d], txg, B_TRUE);
+ metaslab_unalloc_dva(spa, &dva[d], txg);
metaslab_group_alloc_decrement(spa,
DVA_GET_VDEV(&dva[d]), zio, flags);
bzero(&dva[d], sizeof (dva_t));
}
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (error);
} else {
/*
* Update the metaslab group's queue depth
* based on the newly allocated dva.
*/
metaslab_group_alloc_increment(spa,
DVA_GET_VDEV(&dva[d]), zio, flags);
}
}
ASSERT(error == 0);
ASSERT(BP_GET_NDVAS(bp) == ndvas);
spa_config_exit(spa, SCL_ALLOC, FTAG);
BP_SET_BIRTH(bp, txg, 0);
return (0);
}
void
metaslab_free(spa_t *spa, const blkptr_t *bp, uint64_t txg, boolean_t now)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!now || bp->blk_birth >= spa_syncing_txg(spa));
spa_config_enter(spa, SCL_FREE, FTAG, RW_READER);
- for (int d = 0; d < ndvas; d++)
- metaslab_free_dva(spa, &dva[d], txg, now);
+ for (int d = 0; d < ndvas; d++) {
+ if (now) {
+ metaslab_unalloc_dva(spa, &dva[d], txg);
+ } else {
+ metaslab_free_dva(spa, &dva[d], txg);
+ }
+ }
spa_config_exit(spa, SCL_FREE, FTAG);
}
int
metaslab_claim(spa_t *spa, const blkptr_t *bp, uint64_t txg)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
int error = 0;
ASSERT(!BP_IS_HOLE(bp));
if (txg != 0) {
/*
* First do a dry run to make sure all DVAs are claimable,
* so we don't have to unwind from partial failures below.
*/
if ((error = metaslab_claim(spa, bp, 0)) != 0)
return (error);
}
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_READER);
for (int d = 0; d < ndvas; d++)
if ((error = metaslab_claim_dva(spa, &dva[d], txg)) != 0)
break;
spa_config_exit(spa, SCL_ALLOC, FTAG);
ASSERT(error == 0 || txg == 0);
return (error);
}
void
metaslab_fastwrite_mark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
atomic_add_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
void
metaslab_fastwrite_unmark(spa_t *spa, const blkptr_t *bp)
{
const dva_t *dva = bp->blk_dva;
int ndvas = BP_GET_NDVAS(bp);
uint64_t psize = BP_GET_PSIZE(bp);
int d;
vdev_t *vd;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(!BP_IS_EMBEDDED(bp));
ASSERT(psize > 0);
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (d = 0; d < ndvas; d++) {
if ((vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[d]))) == NULL)
continue;
ASSERT3U(vd->vdev_pending_fastwrite, >=, psize);
atomic_sub_64(&vd->vdev_pending_fastwrite, psize);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
+/* ARGSUSED */
+static void
+metaslab_check_free_impl_cb(uint64_t inner, vdev_t *vd, uint64_t offset,
+ uint64_t size, void *arg)
+{
+ if (vd->vdev_ops == &vdev_indirect_ops)
+ return;
+
+ metaslab_check_free_impl(vd, offset, size);
+}
+
+static void
+metaslab_check_free_impl(vdev_t *vd, uint64_t offset, uint64_t size)
+{
+ metaslab_t *msp;
+ ASSERTV(spa_t *spa = vd->vdev_spa);
+
+ if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
+ return;
+
+ if (vd->vdev_ops->vdev_op_remap != NULL) {
+ vd->vdev_ops->vdev_op_remap(vd, offset, size,
+ metaslab_check_free_impl_cb, NULL);
+ return;
+ }
+
+ ASSERT(vdev_is_concrete(vd));
+ ASSERT3U(offset >> vd->vdev_ms_shift, <, vd->vdev_ms_count);
+ ASSERT3U(spa_config_held(spa, SCL_ALL, RW_READER), !=, 0);
+
+ msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
+
+ mutex_enter(&msp->ms_lock);
+ if (msp->ms_loaded)
+ range_tree_verify(msp->ms_tree, offset, size);
+
+ range_tree_verify(msp->ms_freeingtree, offset, size);
+ range_tree_verify(msp->ms_freedtree, offset, size);
+ for (int j = 0; j < TXG_DEFER_SIZE; j++)
+ range_tree_verify(msp->ms_defertree[j], offset, size);
+ mutex_exit(&msp->ms_lock);
+}
+
void
metaslab_check_free(spa_t *spa, const blkptr_t *bp)
{
if ((zfs_flags & ZFS_DEBUG_ZIO_FREE) == 0)
return;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
vdev_t *vd = vdev_lookup_top(spa, vdev);
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t size = DVA_GET_ASIZE(&bp->blk_dva[i]);
- metaslab_t *msp = vd->vdev_ms[offset >> vd->vdev_ms_shift];
- if (msp->ms_loaded)
- range_tree_verify(msp->ms_tree, offset, size);
+ if (DVA_GET_GANG(&bp->blk_dva[i]))
+ size = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
+
+ ASSERT3P(vd, !=, NULL);
- range_tree_verify(msp->ms_freeingtree, offset, size);
- range_tree_verify(msp->ms_freedtree, offset, size);
- for (int j = 0; j < TXG_DEFER_SIZE; j++)
- range_tree_verify(msp->ms_defertree[j], offset, size);
+ metaslab_check_free_impl(vd, offset, size);
}
spa_config_exit(spa, SCL_VDEV, FTAG);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* CSTYLED */
module_param(metaslab_aliquot, ulong, 0644);
MODULE_PARM_DESC(metaslab_aliquot,
"allocation granularity (a.k.a. stripe size)");
module_param(metaslab_debug_load, int, 0644);
MODULE_PARM_DESC(metaslab_debug_load,
"load all metaslabs when pool is first opened");
module_param(metaslab_debug_unload, int, 0644);
MODULE_PARM_DESC(metaslab_debug_unload,
"prevent metaslabs from being unloaded");
module_param(metaslab_preload_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_preload_enabled,
"preload potential metaslabs during reassessment");
module_param(zfs_mg_noalloc_threshold, int, 0644);
MODULE_PARM_DESC(zfs_mg_noalloc_threshold,
"percentage of free space for metaslab group to allow allocation");
module_param(zfs_mg_fragmentation_threshold, int, 0644);
MODULE_PARM_DESC(zfs_mg_fragmentation_threshold,
"fragmentation for metaslab group to allow allocation");
module_param(zfs_metaslab_fragmentation_threshold, int, 0644);
MODULE_PARM_DESC(zfs_metaslab_fragmentation_threshold,
"fragmentation for metaslab to allow allocation");
module_param(metaslab_fragmentation_factor_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_fragmentation_factor_enabled,
"use the fragmentation metric to prefer less fragmented metaslabs");
module_param(metaslab_lba_weighting_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_lba_weighting_enabled,
"prefer metaslabs with lower LBAs");
module_param(metaslab_bias_enabled, int, 0644);
MODULE_PARM_DESC(metaslab_bias_enabled,
"enable metaslab group biasing");
module_param(zfs_metaslab_segment_weight_enabled, int, 0644);
MODULE_PARM_DESC(zfs_metaslab_segment_weight_enabled,
"enable segment-based metaslab selection");
module_param(zfs_metaslab_switch_threshold, int, 0644);
MODULE_PARM_DESC(zfs_metaslab_switch_threshold,
"segment-based metaslab selection maximum buckets before switching");
+
+/* CSTYLED */
+module_param(metaslab_gang_bang, ulong, 0644);
+MODULE_PARM_DESC(metaslab_gang_bang,
+ "blocks larger than this size are forced to be gang blocks");
#endif /* _KERNEL && HAVE_SPL */
diff --git a/module/zfs/mmp.c b/module/zfs/mmp.c
index 4d3c7401e4c2..48aab673ad5e 100644
--- a/module/zfs/mmp.c
+++ b/module/zfs/mmp.c
@@ -1,630 +1,631 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2017 by Lawrence Livermore National Security, LLC.
*/
#include <sys/abd.h>
#include <sys/mmp.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/time.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/zfs_context.h>
#include <sys/callb.h>
/*
* Multi-Modifier Protection (MMP) attempts to prevent a user from importing
* or opening a pool on more than one host at a time. In particular, it
* prevents "zpool import -f" on a host from succeeding while the pool is
* already imported on another host. There are many other ways in which a
* device could be used by two hosts for different purposes at the same time
* resulting in pool damage. This implementation does not attempt to detect
* those cases.
*
* MMP operates by ensuring there are frequent visible changes on disk (a
* "heartbeat") at all times. And by altering the import process to check
* for these changes and failing the import when they are detected. This
* functionality is enabled by setting the 'multihost' pool property to on.
*
* Uberblocks written by the txg_sync thread always go into the first
* (N-MMP_BLOCKS_PER_LABEL) slots, the remaining slots are reserved for MMP.
* They are used to hold uberblocks which are exactly the same as the last
* synced uberblock except that the ub_timestamp is frequently updated.
* Like all other uberblocks, the slot is written with an embedded checksum,
* and slots with invalid checksums are ignored. This provides the
* "heartbeat", with no risk of overwriting good uberblocks that must be
* preserved, e.g. previous txgs and associated block pointers.
*
* Two optional fields are added to uberblock structure: ub_mmp_magic and
* ub_mmp_delay. The magic field allows zfs to tell whether ub_mmp_delay is
* valid. The delay field is a decaying average of the amount of time between
* completion of successive MMP writes, in nanoseconds. It is used to predict
* how long the import must wait to detect activity in the pool, before
* concluding it is not in use.
*
* During import an activity test may now be performed to determine if
* the pool is in use. The activity test is typically required if the
* ZPOOL_CONFIG_HOSTID does not match the system hostid, the pool state is
* POOL_STATE_ACTIVE, and the pool is not a root pool.
*
* The activity test finds the "best" uberblock (highest txg & timestamp),
* waits some time, and then finds the "best" uberblock again. If the txg
* and timestamp in both "best" uberblocks do not match, the pool is in use
* by another host and the import fails. Since the granularity of the
* timestamp is in seconds this activity test must take a bare minimum of one
* second. In order to assure the accuracy of the activity test, the default
* values result in an activity test duration of 10x the mmp write interval.
*
* The "zpool import" activity test can be expected to take a minimum time of
* zfs_multihost_import_intervals * zfs_multihost_interval milliseconds. If the
* "best" uberblock has a valid ub_mmp_delay field, then the duration of the
* test may take longer if MMP writes were occurring less frequently than
* expected. Additionally, the duration is then extended by a random 25% to
* attempt to to detect simultaneous imports. For example, if both partner
* hosts are rebooted at the same time and automatically attempt to import the
* pool.
*/
/*
* Used to control the frequency of mmp writes which are performed when the
* 'multihost' pool property is on. This is one factor used to determine the
* length of the activity check during import.
*
* The mmp write period is zfs_multihost_interval / leaf-vdevs milliseconds.
* This means that on average an mmp write will be issued for each leaf vdev
* every zfs_multihost_interval milliseconds. In practice, the observed period
* can vary with the I/O load and this observed value is the delay which is
* stored in the uberblock. The minimum allowed value is 100 ms.
*/
ulong_t zfs_multihost_interval = MMP_DEFAULT_INTERVAL;
/*
* Used to control the duration of the activity test on import. Smaller values
* of zfs_multihost_import_intervals will reduce the import time but increase
* the risk of failing to detect an active pool. The total activity check time
* is never allowed to drop below one second. A value of 0 is ignored and
* treated as if it was set to 1.
*/
uint_t zfs_multihost_import_intervals = MMP_DEFAULT_IMPORT_INTERVALS;
/*
* Controls the behavior of the pool when mmp write failures are detected.
*
* When zfs_multihost_fail_intervals = 0 then mmp write failures are ignored.
* The failures will still be reported to the ZED which depending on its
* configuration may take action such as suspending the pool or taking a
* device offline.
*
* When zfs_multihost_fail_intervals > 0 then sequential mmp write failures will
* cause the pool to be suspended. This occurs when
* zfs_multihost_fail_intervals * zfs_multihost_interval milliseconds have
* passed since the last successful mmp write. This guarantees the activity
* test will see mmp writes if the
* pool is imported.
*/
uint_t zfs_multihost_fail_intervals = MMP_DEFAULT_FAIL_INTERVALS;
char *mmp_tag = "mmp_write_uberblock";
static void mmp_thread(void *arg);
void
mmp_init(spa_t *spa)
{
mmp_thread_t *mmp = &spa->spa_mmp;
mutex_init(&mmp->mmp_thread_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&mmp->mmp_thread_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mmp->mmp_io_lock, NULL, MUTEX_DEFAULT, NULL);
mmp->mmp_kstat_id = 1;
}
void
mmp_fini(spa_t *spa)
{
mmp_thread_t *mmp = &spa->spa_mmp;
mutex_destroy(&mmp->mmp_thread_lock);
cv_destroy(&mmp->mmp_thread_cv);
mutex_destroy(&mmp->mmp_io_lock);
}
static void
mmp_thread_enter(mmp_thread_t *mmp, callb_cpr_t *cpr)
{
CALLB_CPR_INIT(cpr, &mmp->mmp_thread_lock, callb_generic_cpr, FTAG);
mutex_enter(&mmp->mmp_thread_lock);
}
static void
mmp_thread_exit(mmp_thread_t *mmp, kthread_t **mpp, callb_cpr_t *cpr)
{
ASSERT(*mpp != NULL);
*mpp = NULL;
cv_broadcast(&mmp->mmp_thread_cv);
CALLB_CPR_EXIT(cpr); /* drops &mmp->mmp_thread_lock */
thread_exit();
}
void
mmp_thread_start(spa_t *spa)
{
mmp_thread_t *mmp = &spa->spa_mmp;
if (spa_writeable(spa)) {
mutex_enter(&mmp->mmp_thread_lock);
if (!mmp->mmp_thread) {
dprintf("mmp_thread_start pool %s\n",
spa->spa_name);
mmp->mmp_thread = thread_create(NULL, 0, mmp_thread,
spa, 0, &p0, TS_RUN, defclsyspri);
}
mutex_exit(&mmp->mmp_thread_lock);
}
}
void
mmp_thread_stop(spa_t *spa)
{
mmp_thread_t *mmp = &spa->spa_mmp;
mutex_enter(&mmp->mmp_thread_lock);
mmp->mmp_thread_exiting = 1;
cv_broadcast(&mmp->mmp_thread_cv);
while (mmp->mmp_thread) {
cv_wait(&mmp->mmp_thread_cv, &mmp->mmp_thread_lock);
}
mutex_exit(&mmp->mmp_thread_lock);
ASSERT(mmp->mmp_thread == NULL);
mmp->mmp_thread_exiting = 0;
}
typedef enum mmp_vdev_state_flag {
MMP_FAIL_NOT_WRITABLE = (1 << 0),
MMP_FAIL_WRITE_PENDING = (1 << 1),
} mmp_vdev_state_flag_t;
static vdev_t *
mmp_random_leaf_impl(vdev_t *vd, int *fail_mask)
{
int child_idx;
- if (!vdev_writeable(vd)) {
- *fail_mask |= MMP_FAIL_NOT_WRITABLE;
- return (NULL);
- }
-
if (vd->vdev_ops->vdev_op_leaf) {
vdev_t *ret;
- if (vd->vdev_mmp_pending != 0) {
+ if (!vdev_writeable(vd)) {
+ *fail_mask |= MMP_FAIL_NOT_WRITABLE;
+ ret = NULL;
+ } else if (vd->vdev_mmp_pending != 0) {
*fail_mask |= MMP_FAIL_WRITE_PENDING;
ret = NULL;
} else {
ret = vd;
}
return (ret);
}
+ if (vd->vdev_children == 0)
+ return (NULL);
+
child_idx = spa_get_random(vd->vdev_children);
for (int offset = vd->vdev_children; offset > 0; offset--) {
vdev_t *leaf;
vdev_t *child = vd->vdev_child[(child_idx + offset) %
vd->vdev_children];
leaf = mmp_random_leaf_impl(child, fail_mask);
if (leaf)
return (leaf);
}
return (NULL);
}
/*
* Find a leaf vdev to write an MMP block to. It must not have an outstanding
* mmp write (if so a new write will also likely block). If there is no usable
* leaf in the tree rooted at in_vd, a nonzero error value is returned, and
* *out_vd is unchanged.
*
* The error value returned is a bit field.
*
* MMP_FAIL_WRITE_PENDING
* If set, one or more leaf vdevs are writeable, but have an MMP write which has
* not yet completed.
*
* MMP_FAIL_NOT_WRITABLE
* If set, one or more vdevs are not writeable. The children of those vdevs
* were not examined.
*
* Assuming in_vd points to a tree, a random subtree will be chosen to start.
* That subtree, and successive ones, will be walked until a usable leaf has
* been found, or all subtrees have been examined (except that the children of
* un-writeable vdevs are not examined).
*
* If the leaf vdevs in the tree are healthy, the distribution of returned leaf
* vdevs will be even. If there are unhealthy leaves, the following leaves
* (child_index % index_children) will be chosen more often.
*/
static int
mmp_random_leaf(vdev_t *in_vd, vdev_t **out_vd)
{
int error_mask = 0;
vdev_t *vd = mmp_random_leaf_impl(in_vd, &error_mask);
if (error_mask == 0)
*out_vd = vd;
return (error_mask);
}
/*
* MMP writes are issued on a fixed schedule, but may complete at variable,
* much longer, intervals. The mmp_delay captures long periods between
* successful writes for any reason, including disk latency, scheduling delays,
* etc.
*
* The mmp_delay is usually calculated as a decaying average, but if the latest
* delay is higher we do not average it, so that we do not hide sudden spikes
* which the importing host must wait for.
*
* If writes are occurring frequently, such as due to a high rate of txg syncs,
* the mmp_delay could become very small. Since those short delays depend on
* activity we cannot count on, we never allow mmp_delay to get lower than rate
* expected if only mmp_thread writes occur.
*
* If an mmp write was skipped or fails, and we have already waited longer than
* mmp_delay, we need to update it so the next write reflects the longer delay.
*
* Do not set mmp_delay if the multihost property is not on, so as not to
* trigger an activity check on import.
*/
static void
mmp_delay_update(spa_t *spa, boolean_t write_completed)
{
mmp_thread_t *mts = &spa->spa_mmp;
hrtime_t delay = gethrtime() - mts->mmp_last_write;
ASSERT(MUTEX_HELD(&mts->mmp_io_lock));
if (spa_multihost(spa) == B_FALSE) {
mts->mmp_delay = 0;
return;
}
if (delay > mts->mmp_delay)
mts->mmp_delay = delay;
if (write_completed == B_FALSE)
return;
mts->mmp_last_write = gethrtime();
/*
* strictly less than, in case delay was changed above.
*/
if (delay < mts->mmp_delay) {
hrtime_t min_delay = MSEC2NSEC(zfs_multihost_interval) /
MAX(1, vdev_count_leaves(spa));
mts->mmp_delay = MAX(((delay + mts->mmp_delay * 127) / 128),
min_delay);
}
}
static void
mmp_write_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
mmp_thread_t *mts = zio->io_private;
mutex_enter(&mts->mmp_io_lock);
uint64_t mmp_kstat_id = vd->vdev_mmp_kstat_id;
hrtime_t mmp_write_duration = gethrtime() - vd->vdev_mmp_pending;
mmp_delay_update(spa, (zio->io_error == 0));
vd->vdev_mmp_pending = 0;
vd->vdev_mmp_kstat_id = 0;
mutex_exit(&mts->mmp_io_lock);
spa_config_exit(spa, SCL_STATE, mmp_tag);
spa_mmp_history_set(spa, mmp_kstat_id, zio->io_error,
mmp_write_duration);
abd_free(zio->io_abd);
}
/*
* When the uberblock on-disk is updated by a spa_sync,
* creating a new "best" uberblock, update the one stored
* in the mmp thread state, used for mmp writes.
*/
void
mmp_update_uberblock(spa_t *spa, uberblock_t *ub)
{
mmp_thread_t *mmp = &spa->spa_mmp;
mutex_enter(&mmp->mmp_io_lock);
mmp->mmp_ub = *ub;
mmp->mmp_ub.ub_timestamp = gethrestime_sec();
mmp_delay_update(spa, B_TRUE);
mutex_exit(&mmp->mmp_io_lock);
}
/*
* Choose a random vdev, label, and MMP block, and write over it
* with a copy of the last-synced uberblock, whose timestamp
* has been updated to reflect that the pool is in use.
*/
static void
mmp_write_uberblock(spa_t *spa)
{
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
mmp_thread_t *mmp = &spa->spa_mmp;
uberblock_t *ub;
vdev_t *vd = NULL;
int label, error;
uint64_t offset;
hrtime_t lock_acquire_time = gethrtime();
spa_config_enter(spa, SCL_STATE, mmp_tag, RW_READER);
lock_acquire_time = gethrtime() - lock_acquire_time;
if (lock_acquire_time > (MSEC2NSEC(MMP_MIN_INTERVAL) / 10))
zfs_dbgmsg("SCL_STATE acquisition took %llu ns\n",
(u_longlong_t)lock_acquire_time);
error = mmp_random_leaf(spa->spa_root_vdev, &vd);
mutex_enter(&mmp->mmp_io_lock);
/*
* spa_mmp_history has two types of entries:
* Issued MMP write: records time issued, error status, etc.
* Skipped MMP write: an MMP write could not be issued because no
* suitable leaf vdev was available. See comment above struct
* spa_mmp_history for details.
*/
if (error) {
mmp_delay_update(spa, B_FALSE);
if (mmp->mmp_skip_error == error) {
spa_mmp_history_set_skip(spa, mmp->mmp_kstat_id - 1);
} else {
mmp->mmp_skip_error = error;
spa_mmp_history_add(spa, mmp->mmp_ub.ub_txg,
gethrestime_sec(), mmp->mmp_delay, NULL, 0,
mmp->mmp_kstat_id++, error);
}
mutex_exit(&mmp->mmp_io_lock);
spa_config_exit(spa, SCL_STATE, FTAG);
return;
}
mmp->mmp_skip_error = 0;
if (mmp->mmp_zio_root == NULL)
mmp->mmp_zio_root = zio_root(spa, NULL, NULL,
flags | ZIO_FLAG_GODFATHER);
ub = &mmp->mmp_ub;
ub->ub_timestamp = gethrestime_sec();
ub->ub_mmp_magic = MMP_MAGIC;
ub->ub_mmp_delay = mmp->mmp_delay;
vd->vdev_mmp_pending = gethrtime();
vd->vdev_mmp_kstat_id = mmp->mmp_kstat_id;
zio_t *zio = zio_null(mmp->mmp_zio_root, spa, NULL, NULL, NULL, flags);
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
mmp->mmp_kstat_id++;
mutex_exit(&mmp->mmp_io_lock);
offset = VDEV_UBERBLOCK_OFFSET(vd, VDEV_UBERBLOCK_COUNT(vd) -
MMP_BLOCKS_PER_LABEL + spa_get_random(MMP_BLOCKS_PER_LABEL));
label = spa_get_random(VDEV_LABELS);
vdev_label_write(zio, vd, label, ub_abd, offset,
VDEV_UBERBLOCK_SIZE(vd), mmp_write_done, mmp,
flags | ZIO_FLAG_DONT_PROPAGATE);
(void) spa_mmp_history_add(spa, ub->ub_txg, ub->ub_timestamp,
ub->ub_mmp_delay, vd, label, vd->vdev_mmp_kstat_id, 0);
zio_nowait(zio);
}
static void
mmp_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
mmp_thread_t *mmp = &spa->spa_mmp;
boolean_t last_spa_suspended = spa_suspended(spa);
boolean_t last_spa_multihost = spa_multihost(spa);
callb_cpr_t cpr;
hrtime_t max_fail_ns = zfs_multihost_fail_intervals *
MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
mmp_thread_enter(mmp, &cpr);
/*
* The mmp_write_done() function calculates mmp_delay based on the
* prior value of mmp_delay and the elapsed time since the last write.
* For the first mmp write, there is no "last write", so we start
* with fake, but reasonable, default non-zero values.
*/
mmp->mmp_delay = MSEC2NSEC(MAX(zfs_multihost_interval,
MMP_MIN_INTERVAL)) / MAX(vdev_count_leaves(spa), 1);
mmp->mmp_last_write = gethrtime() - mmp->mmp_delay;
while (!mmp->mmp_thread_exiting) {
uint64_t mmp_fail_intervals = zfs_multihost_fail_intervals;
uint64_t mmp_interval = MSEC2NSEC(
MAX(zfs_multihost_interval, MMP_MIN_INTERVAL));
boolean_t suspended = spa_suspended(spa);
boolean_t multihost = spa_multihost(spa);
hrtime_t next_time;
if (multihost)
next_time = gethrtime() + mmp_interval /
MAX(vdev_count_leaves(spa), 1);
else
next_time = gethrtime() +
MSEC2NSEC(MMP_DEFAULT_INTERVAL);
/*
* MMP off => on, or suspended => !suspended:
* No writes occurred recently. Update mmp_last_write to give
* us some time to try.
*/
if ((!last_spa_multihost && multihost) ||
(last_spa_suspended && !suspended)) {
mutex_enter(&mmp->mmp_io_lock);
mmp->mmp_last_write = gethrtime();
mutex_exit(&mmp->mmp_io_lock);
}
/*
* MMP on => off:
* mmp_delay == 0 tells importing node to skip activity check.
*/
if (last_spa_multihost && !multihost) {
mutex_enter(&mmp->mmp_io_lock);
mmp->mmp_delay = 0;
mutex_exit(&mmp->mmp_io_lock);
}
last_spa_multihost = multihost;
last_spa_suspended = suspended;
/*
* Smooth max_fail_ns when its factors are decreased, because
* making (max_fail_ns < mmp_interval) results in the pool being
* immediately suspended before writes can occur at the new
* higher frequency.
*/
if ((mmp_interval * mmp_fail_intervals) < max_fail_ns) {
max_fail_ns = ((31 * max_fail_ns) + (mmp_interval *
mmp_fail_intervals)) / 32;
} else {
max_fail_ns = mmp_interval * mmp_fail_intervals;
}
/*
* Suspend the pool if no MMP write has succeeded in over
* mmp_interval * mmp_fail_intervals nanoseconds.
*/
if (!suspended && mmp_fail_intervals && multihost &&
(gethrtime() - mmp->mmp_last_write) > max_fail_ns) {
cmn_err(CE_WARN, "MMP writes to pool '%s' have not "
"succeeded in over %llus; suspending pool",
spa_name(spa),
NSEC2SEC(gethrtime() - mmp->mmp_last_write));
zio_suspend(spa, NULL, ZIO_SUSPEND_MMP);
}
if (multihost && !suspended)
mmp_write_uberblock(spa);
CALLB_CPR_SAFE_BEGIN(&cpr);
(void) cv_timedwait_sig_hires(&mmp->mmp_thread_cv,
&mmp->mmp_thread_lock, next_time, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
CALLB_CPR_SAFE_END(&cpr, &mmp->mmp_thread_lock);
}
/* Outstanding writes are allowed to complete. */
if (mmp->mmp_zio_root)
zio_wait(mmp->mmp_zio_root);
mmp->mmp_zio_root = NULL;
mmp_thread_exit(mmp, &mmp->mmp_thread, &cpr);
}
/*
* Signal the MMP thread to wake it, when it is sleeping on
* its cv. Used when some module parameter has changed and
* we want the thread to know about it.
* Only signal if the pool is active and mmp thread is
* running, otherwise there is no thread to wake.
*/
static void
mmp_signal_thread(spa_t *spa)
{
mmp_thread_t *mmp = &spa->spa_mmp;
mutex_enter(&mmp->mmp_thread_lock);
if (mmp->mmp_thread)
cv_broadcast(&mmp->mmp_thread_cv);
mutex_exit(&mmp->mmp_thread_lock);
}
void
mmp_signal_all_threads(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa))) {
if (spa->spa_state == POOL_STATE_ACTIVE)
mmp_signal_thread(spa);
}
mutex_exit(&spa_namespace_lock);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
#include <linux/mod_compat.h>
static int
param_set_multihost_interval(const char *val, zfs_kernel_param_t *kp)
{
int ret;
ret = param_set_ulong(val, kp);
if (ret < 0)
return (ret);
mmp_signal_all_threads();
return (ret);
}
/* BEGIN CSTYLED */
module_param(zfs_multihost_fail_intervals, uint, 0644);
MODULE_PARM_DESC(zfs_multihost_fail_intervals,
"Max allowed period without a successful mmp write");
module_param_call(zfs_multihost_interval, param_set_multihost_interval,
param_get_ulong, &zfs_multihost_interval, 0644);
MODULE_PARM_DESC(zfs_multihost_interval,
"Milliseconds between mmp writes to each leaf");
module_param(zfs_multihost_import_intervals, uint, 0644);
MODULE_PARM_DESC(zfs_multihost_import_intervals,
"Number of zfs_multihost_interval periods to wait for activity");
/* END CSTYLED */
#endif
diff --git a/module/zfs/range_tree.c b/module/zfs/range_tree.c
index 01ef463ecc25..baa655d39a59 100644
--- a/module/zfs/range_tree.c
+++ b/module/zfs/range_tree.c
@@ -1,672 +1,645 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
- * Copyright (c) 2013, 2014 by Delphix. All rights reserved.
+ * Copyright (c) 2013, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/dnode.h>
#include <sys/zio.h>
#include <sys/range_tree.h>
/*
* Range trees are tree-based data structures that can be used to
* track free space or generally any space allocation information.
* A range tree keeps track of individual segments and automatically
* provides facilities such as adjacent extent merging and extent
* splitting in response to range add/remove requests.
*
* A range tree starts out completely empty, with no segments in it.
* Adding an allocation via range_tree_add to the range tree can either:
* 1) create a new extent
* 2) extend an adjacent extent
* 3) merge two adjacent extents
* Conversely, removing an allocation via range_tree_remove can:
* 1) completely remove an extent
* 2) shorten an extent (if the allocation was near one of its ends)
* 3) split an extent into two extents, in effect punching a hole
*
* A range tree is also capable of 'bridging' gaps when adding
* allocations. This is useful for cases when close proximity of
* allocations is an important detail that needs to be represented
* in the range tree. See range_tree_set_gap(). The default behavior
* is not to bridge gaps (i.e. the maximum allowed gap size is 0).
*
* In order to traverse a range tree, use either the range_tree_walk()
* or range_tree_vacate() functions.
*
* To obtain more accurate information on individual segment
* operations that the range tree performs "under the hood", you can
* specify a set of callbacks by passing a range_tree_ops_t structure
* to the range_tree_create function. Any callbacks that are non-NULL
* are then called at the appropriate times.
*
* The range tree code also supports a special variant of range trees
* that can bridge small gaps between segments. This kind of tree is used
* by the dsl scanning code to group I/Os into mostly sequential chunks to
* optimize disk performance. The code here attempts to do this with as
* little memory and computational overhead as possible. One limitation of
* this implementation is that segments of range trees with gaps can only
* support removing complete segments.
*/
kmem_cache_t *range_seg_cache;
/* Generic ops for managing an AVL tree alongside a range tree */
struct range_tree_ops rt_avl_ops = {
.rtop_create = rt_avl_create,
.rtop_destroy = rt_avl_destroy,
.rtop_add = rt_avl_add,
.rtop_remove = rt_avl_remove,
.rtop_vacate = rt_avl_vacate,
};
void
range_tree_init(void)
{
ASSERT(range_seg_cache == NULL);
range_seg_cache = kmem_cache_create("range_seg_cache",
sizeof (range_seg_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
}
void
range_tree_fini(void)
{
kmem_cache_destroy(range_seg_cache);
range_seg_cache = NULL;
}
void
range_tree_stat_verify(range_tree_t *rt)
{
range_seg_t *rs;
uint64_t hist[RANGE_TREE_HISTOGRAM_SIZE] = { 0 };
int i;
for (rs = avl_first(&rt->rt_root); rs != NULL;
rs = AVL_NEXT(&rt->rt_root, rs)) {
uint64_t size = rs->rs_end - rs->rs_start;
int idx = highbit64(size) - 1;
hist[idx]++;
ASSERT3U(hist[idx], !=, 0);
}
for (i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
if (hist[i] != rt->rt_histogram[i]) {
zfs_dbgmsg("i=%d, hist=%p, hist=%llu, rt_hist=%llu",
i, hist, hist[i], rt->rt_histogram[i]);
}
VERIFY3U(hist[i], ==, rt->rt_histogram[i]);
}
}
-/*
- * Changes out the lock used by the range tree. Useful when you are moving
- * the range tree between containing structures without having to recreate
- * it. Both the old and new locks must be held by the caller.
- */
-void
-range_tree_set_lock(range_tree_t *rt, kmutex_t *lp)
-{
- ASSERT(MUTEX_HELD(rt->rt_lock) && MUTEX_HELD(lp));
- rt->rt_lock = lp;
-}
-
static void
range_tree_stat_incr(range_tree_t *rt, range_seg_t *rs)
{
uint64_t size = rs->rs_end - rs->rs_start;
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
- ASSERT(MUTEX_HELD(rt->rt_lock));
rt->rt_histogram[idx]++;
ASSERT3U(rt->rt_histogram[idx], !=, 0);
}
static void
range_tree_stat_decr(range_tree_t *rt, range_seg_t *rs)
{
uint64_t size = rs->rs_end - rs->rs_start;
int idx = highbit64(size) - 1;
ASSERT(size != 0);
ASSERT3U(idx, <,
sizeof (rt->rt_histogram) / sizeof (*rt->rt_histogram));
- ASSERT(MUTEX_HELD(rt->rt_lock));
ASSERT3U(rt->rt_histogram[idx], !=, 0);
rt->rt_histogram[idx]--;
}
/*
* NOTE: caller is responsible for all locking.
*/
static int
range_tree_seg_compare(const void *x1, const void *x2)
{
const range_seg_t *r1 = (const range_seg_t *)x1;
const range_seg_t *r2 = (const range_seg_t *)x2;
ASSERT3U(r1->rs_start, <=, r1->rs_end);
ASSERT3U(r2->rs_start, <=, r2->rs_end);
return ((r1->rs_start >= r2->rs_end) - (r1->rs_end <= r2->rs_start));
}
range_tree_t *
range_tree_create_impl(range_tree_ops_t *ops, void *arg,
- int (*avl_compare) (const void *, const void *), kmutex_t *lp, uint64_t gap)
+ int (*avl_compare) (const void *, const void *), uint64_t gap)
{
range_tree_t *rt = kmem_zalloc(sizeof (range_tree_t), KM_SLEEP);
avl_create(&rt->rt_root, range_tree_seg_compare,
sizeof (range_seg_t), offsetof(range_seg_t, rs_node));
- rt->rt_lock = lp;
rt->rt_ops = ops;
rt->rt_gap = gap;
rt->rt_arg = arg;
rt->rt_avl_compare = avl_compare;
if (rt->rt_ops != NULL && rt->rt_ops->rtop_create != NULL)
rt->rt_ops->rtop_create(rt, rt->rt_arg);
return (rt);
}
range_tree_t *
-range_tree_create(range_tree_ops_t *ops, void *arg, kmutex_t *lp)
+range_tree_create(range_tree_ops_t *ops, void *arg)
{
- return (range_tree_create_impl(ops, arg, NULL, lp, 0));
+ return (range_tree_create_impl(ops, arg, NULL, 0));
}
void
range_tree_destroy(range_tree_t *rt)
{
VERIFY0(rt->rt_space);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_destroy != NULL)
rt->rt_ops->rtop_destroy(rt, rt->rt_arg);
avl_destroy(&rt->rt_root);
kmem_free(rt, sizeof (*rt));
}
void
range_tree_adjust_fill(range_tree_t *rt, range_seg_t *rs, int64_t delta)
{
- ASSERT(MUTEX_HELD(rt->rt_lock));
-
ASSERT3U(rs->rs_fill + delta, !=, 0);
ASSERT3U(rs->rs_fill + delta, <=, rs->rs_end - rs->rs_start);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs->rs_fill += delta;
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
}
static void
range_tree_add_impl(void *arg, uint64_t start, uint64_t size, uint64_t fill)
{
range_tree_t *rt = arg;
avl_index_t where;
range_seg_t rsearch, *rs_before, *rs_after, *rs;
uint64_t end = start + size, gap = rt->rt_gap;
uint64_t bridge_size = 0;
boolean_t merge_before, merge_after;
- ASSERT(MUTEX_HELD(rt->rt_lock));
ASSERT3U(size, !=, 0);
ASSERT3U(fill, <=, size);
rsearch.rs_start = start;
rsearch.rs_end = end;
rs = avl_find(&rt->rt_root, &rsearch, &where);
if (gap == 0 && rs != NULL &&
rs->rs_start <= start && rs->rs_end >= end) {
zfs_panic_recover("zfs: allocating allocated segment"
"(offset=%llu size=%llu) of (offset=%llu size=%llu)\n",
(longlong_t)start, (longlong_t)size,
(longlong_t)rs->rs_start,
(longlong_t)rs->rs_end - rs->rs_start);
return;
}
/*
* If this is a gap-supporting range tree, it is possible that we
* are inserting into an existing segment. In this case simply
* bump the fill count and call the remove / add callbacks. If the
* new range will extend an existing segment, we remove the
* existing one, apply the new extent to it and re-insert it using
* the normal code paths.
*/
if (rs != NULL) {
ASSERT3U(gap, !=, 0);
if (rs->rs_start <= start && rs->rs_end >= end) {
range_tree_adjust_fill(rt, rs, fill);
return;
}
avl_remove(&rt->rt_root, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
range_tree_stat_decr(rt, rs);
rt->rt_space -= rs->rs_end - rs->rs_start;
fill += rs->rs_fill;
start = MIN(start, rs->rs_start);
end = MAX(end, rs->rs_end);
size = end - start;
range_tree_add_impl(rt, start, size, fill);
kmem_cache_free(range_seg_cache, rs);
return;
}
ASSERT3P(rs, ==, NULL);
/*
* Determine whether or not we will have to merge with our neighbors.
* If gap != 0, we might need to merge with our neighbors even if we
* aren't directly touching.
*/
rs_before = avl_nearest(&rt->rt_root, where, AVL_BEFORE);
rs_after = avl_nearest(&rt->rt_root, where, AVL_AFTER);
merge_before = (rs_before != NULL && rs_before->rs_end >= start - gap);
merge_after = (rs_after != NULL && rs_after->rs_start <= end + gap);
if (merge_before && gap != 0)
bridge_size += start - rs_before->rs_end;
if (merge_after && gap != 0)
bridge_size += rs_after->rs_start - end;
if (merge_before && merge_after) {
avl_remove(&rt->rt_root, rs_before);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL) {
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
}
range_tree_stat_decr(rt, rs_before);
range_tree_stat_decr(rt, rs_after);
rs_after->rs_fill += rs_before->rs_fill + fill;
rs_after->rs_start = rs_before->rs_start;
kmem_cache_free(range_seg_cache, rs_before);
rs = rs_after;
} else if (merge_before) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_before, rt->rt_arg);
range_tree_stat_decr(rt, rs_before);
rs_before->rs_fill += fill;
rs_before->rs_end = end;
rs = rs_before;
} else if (merge_after) {
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs_after, rt->rt_arg);
range_tree_stat_decr(rt, rs_after);
rs_after->rs_fill += fill;
rs_after->rs_start = start;
rs = rs_after;
} else {
rs = kmem_cache_alloc(range_seg_cache, KM_SLEEP);
rs->rs_fill = fill;
rs->rs_start = start;
rs->rs_end = end;
avl_insert(&rt->rt_root, rs, where);
}
if (gap != 0)
ASSERT3U(rs->rs_fill, <=, rs->rs_end - rs->rs_start);
else
ASSERT3U(rs->rs_fill, ==, rs->rs_end - rs->rs_start);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
range_tree_stat_incr(rt, rs);
rt->rt_space += size + bridge_size;
}
void
range_tree_add(void *arg, uint64_t start, uint64_t size)
{
range_tree_add_impl(arg, start, size, size);
}
static void
range_tree_remove_impl(range_tree_t *rt, uint64_t start, uint64_t size,
boolean_t do_fill)
{
avl_index_t where;
range_seg_t rsearch, *rs, *newseg;
uint64_t end = start + size;
boolean_t left_over, right_over;
- ASSERT(MUTEX_HELD(rt->rt_lock));
VERIFY3U(size, !=, 0);
VERIFY3U(size, <=, rt->rt_space);
rsearch.rs_start = start;
rsearch.rs_end = end;
rs = avl_find(&rt->rt_root, &rsearch, &where);
/* Make sure we completely overlap with someone */
if (rs == NULL) {
zfs_panic_recover("zfs: freeing free segment "
"(offset=%llu size=%llu)",
(longlong_t)start, (longlong_t)size);
return;
}
/*
* Range trees with gap support must only remove complete segments
* from the tree. This allows us to maintain accurate fill accounting
* and to ensure that bridged sections are not leaked. If we need to
* remove less than the full segment, we can only adjust the fill count.
*/
if (rt->rt_gap != 0) {
if (do_fill) {
if (rs->rs_fill == size) {
start = rs->rs_start;
end = rs->rs_end;
size = end - start;
} else {
range_tree_adjust_fill(rt, rs, -size);
return;
}
} else if (rs->rs_start != start || rs->rs_end != end) {
zfs_panic_recover("zfs: freeing partial segment of "
"gap tree (offset=%llu size=%llu) of "
"(offset=%llu size=%llu)",
(longlong_t)start, (longlong_t)size,
(longlong_t)rs->rs_start,
(longlong_t)rs->rs_end - rs->rs_start);
return;
}
}
VERIFY3U(rs->rs_start, <=, start);
VERIFY3U(rs->rs_end, >=, end);
left_over = (rs->rs_start != start);
right_over = (rs->rs_end != end);
range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
if (left_over && right_over) {
newseg = kmem_cache_alloc(range_seg_cache, KM_SLEEP);
newseg->rs_start = end;
newseg->rs_end = rs->rs_end;
newseg->rs_fill = newseg->rs_end - newseg->rs_start;
range_tree_stat_incr(rt, newseg);
rs->rs_end = start;
avl_insert_here(&rt->rt_root, newseg, rs, AVL_AFTER);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, newseg, rt->rt_arg);
} else if (left_over) {
rs->rs_end = start;
} else if (right_over) {
rs->rs_start = end;
} else {
avl_remove(&rt->rt_root, rs);
kmem_cache_free(range_seg_cache, rs);
rs = NULL;
}
if (rs != NULL) {
/*
* The fill of the leftover segment will always be equal to
* the size, since we do not support removing partial segments
* of range trees with gaps.
*/
rs->rs_fill = rs->rs_end - rs->rs_start;
range_tree_stat_incr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
}
rt->rt_space -= size;
}
void
range_tree_remove(void *arg, uint64_t start, uint64_t size)
{
range_tree_remove_impl(arg, start, size, B_FALSE);
}
void
range_tree_remove_fill(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_tree_remove_impl(rt, start, size, B_TRUE);
}
void
range_tree_resize_segment(range_tree_t *rt, range_seg_t *rs,
uint64_t newstart, uint64_t newsize)
{
int64_t delta = newsize - (rs->rs_end - rs->rs_start);
- ASSERT(MUTEX_HELD(rt->rt_lock));
-
range_tree_stat_decr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_remove != NULL)
rt->rt_ops->rtop_remove(rt, rs, rt->rt_arg);
rs->rs_start = newstart;
rs->rs_end = newstart + newsize;
range_tree_stat_incr(rt, rs);
if (rt->rt_ops != NULL && rt->rt_ops->rtop_add != NULL)
rt->rt_ops->rtop_add(rt, rs, rt->rt_arg);
rt->rt_space += delta;
}
static range_seg_t *
range_tree_find_impl(range_tree_t *rt, uint64_t start, uint64_t size)
{
avl_index_t where;
range_seg_t rsearch;
uint64_t end = start + size;
- ASSERT(MUTEX_HELD(rt->rt_lock));
VERIFY(size != 0);
rsearch.rs_start = start;
rsearch.rs_end = end;
return (avl_find(&rt->rt_root, &rsearch, &where));
}
range_seg_t *
range_tree_find(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_seg_t *rs = range_tree_find_impl(rt, start, size);
if (rs != NULL && rs->rs_start <= start && rs->rs_end >= start + size)
return (rs);
return (NULL);
}
void
range_tree_verify(range_tree_t *rt, uint64_t off, uint64_t size)
{
range_seg_t *rs;
- mutex_enter(rt->rt_lock);
rs = range_tree_find(rt, off, size);
if (rs != NULL)
panic("freeing free block; rs=%p", (void *)rs);
- mutex_exit(rt->rt_lock);
}
boolean_t
range_tree_contains(range_tree_t *rt, uint64_t start, uint64_t size)
{
return (range_tree_find(rt, start, size) != NULL);
}
/*
* Ensure that this range is not in the tree, regardless of whether
* it is currently in the tree.
*/
void
range_tree_clear(range_tree_t *rt, uint64_t start, uint64_t size)
{
range_seg_t *rs;
+ if (size == 0)
+ return;
+
while ((rs = range_tree_find_impl(rt, start, size)) != NULL) {
uint64_t free_start = MAX(rs->rs_start, start);
uint64_t free_end = MIN(rs->rs_end, start + size);
range_tree_remove(rt, free_start, free_end - free_start);
}
}
void
range_tree_swap(range_tree_t **rtsrc, range_tree_t **rtdst)
{
range_tree_t *rt;
- ASSERT(MUTEX_HELD((*rtsrc)->rt_lock));
ASSERT0(range_tree_space(*rtdst));
ASSERT0(avl_numnodes(&(*rtdst)->rt_root));
rt = *rtsrc;
*rtsrc = *rtdst;
*rtdst = rt;
}
void
range_tree_vacate(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
range_seg_t *rs;
void *cookie = NULL;
- ASSERT(MUTEX_HELD(rt->rt_lock));
-
if (rt->rt_ops != NULL && rt->rt_ops->rtop_vacate != NULL)
rt->rt_ops->rtop_vacate(rt, rt->rt_arg);
while ((rs = avl_destroy_nodes(&rt->rt_root, &cookie)) != NULL) {
if (func != NULL)
func(arg, rs->rs_start, rs->rs_end - rs->rs_start);
kmem_cache_free(range_seg_cache, rs);
}
bzero(rt->rt_histogram, sizeof (rt->rt_histogram));
rt->rt_space = 0;
}
void
range_tree_walk(range_tree_t *rt, range_tree_func_t *func, void *arg)
{
range_seg_t *rs;
- ASSERT(MUTEX_HELD(rt->rt_lock));
-
for (rs = avl_first(&rt->rt_root); rs; rs = AVL_NEXT(&rt->rt_root, rs))
func(arg, rs->rs_start, rs->rs_end - rs->rs_start);
}
range_seg_t *
range_tree_first(range_tree_t *rt)
{
- ASSERT(MUTEX_HELD(rt->rt_lock));
return (avl_first(&rt->rt_root));
}
uint64_t
range_tree_space(range_tree_t *rt)
{
return (rt->rt_space);
}
/* Generic range tree functions for maintaining segments in an AVL tree. */
void
rt_avl_create(range_tree_t *rt, void *arg)
{
avl_tree_t *tree = arg;
avl_create(tree, rt->rt_avl_compare, sizeof (range_seg_t),
offsetof(range_seg_t, rs_pp_node));
}
void
rt_avl_destroy(range_tree_t *rt, void *arg)
{
avl_tree_t *tree = arg;
ASSERT0(avl_numnodes(tree));
avl_destroy(tree);
}
void
rt_avl_add(range_tree_t *rt, range_seg_t *rs, void *arg)
{
avl_tree_t *tree = arg;
avl_add(tree, rs);
}
void
rt_avl_remove(range_tree_t *rt, range_seg_t *rs, void *arg)
{
avl_tree_t *tree = arg;
avl_remove(tree, rs);
}
void
rt_avl_vacate(range_tree_t *rt, void *arg)
{
/*
* Normally one would walk the tree freeing nodes along the way.
* Since the nodes are shared with the range trees we can avoid
* walking all nodes and just reinitialize the avl tree. The nodes
* will be freed by the range tree, so we don't want to free them here.
*/
rt_avl_create(rt, arg);
}
diff --git a/module/zfs/spa.c b/module/zfs/spa.c
index 53b5aabf02fd..08fc7bbda71d 100644
--- a/module/zfs/spa.c
+++ b/module/zfs/spa.c
@@ -1,7342 +1,7261 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2015, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2013, 2014, Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2017 Datto Inc.
* Copyright 2017 Joyent, Inc.
*/
/*
* SPA: Storage Pool Allocator
*
* This file contains all the routines used when modifying on-disk SPA state.
* This includes opening, importing, destroying, exporting a pool, and syncing a
* pool.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/ddt.h>
#include <sys/vdev_impl.h>
+#include <sys/vdev_removal.h>
+#include <sys/vdev_indirect_mapping.h>
+#include <sys/vdev_indirect_births.h>
#include <sys/vdev_disk.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/mmp.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
+#include <sys/bpobj.h>
#include <sys/dmu_traverse.h>
#include <sys/dmu_objset.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_synctask.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/callb.h>
#include <sys/systeminfo.h>
#include <sys/spa_boot.h>
#include <sys/zfs_ioctl.h>
#include <sys/dsl_scan.h>
#include <sys/zfeature.h>
#include <sys/dsl_destroy.h>
#include <sys/zvol.h>
#ifdef _KERNEL
#include <sys/fm/protocol.h>
#include <sys/fm/util.h>
#include <sys/bootprops.h>
#include <sys/callb.h>
#include <sys/cpupart.h>
#include <sys/pool.h>
#include <sys/sysdc.h>
#include <sys/zone.h>
#endif /* _KERNEL */
#include "zfs_prop.h"
#include "zfs_comutil.h"
/*
* The interval, in seconds, at which failed configuration cache file writes
* should be retried.
*/
-static int zfs_ccw_retry_interval = 300;
+int zfs_ccw_retry_interval = 300;
typedef enum zti_modes {
ZTI_MODE_FIXED, /* value is # of threads (min 1) */
ZTI_MODE_BATCH, /* cpu-intensive; value is ignored */
ZTI_MODE_NULL, /* don't create a taskq */
ZTI_NMODES
} zti_modes_t;
#define ZTI_P(n, q) { ZTI_MODE_FIXED, (n), (q) }
#define ZTI_PCT(n) { ZTI_MODE_ONLINE_PERCENT, (n), 1 }
#define ZTI_BATCH { ZTI_MODE_BATCH, 0, 1 }
#define ZTI_NULL { ZTI_MODE_NULL, 0, 0 }
#define ZTI_N(n) ZTI_P(n, 1)
#define ZTI_ONE ZTI_N(1)
typedef struct zio_taskq_info {
zti_modes_t zti_mode;
uint_t zti_value;
uint_t zti_count;
} zio_taskq_info_t;
static const char *const zio_taskq_types[ZIO_TASKQ_TYPES] = {
"iss", "iss_h", "int", "int_h"
};
/*
* This table defines the taskq settings for each ZFS I/O type. When
* initializing a pool, we use this table to create an appropriately sized
* taskq. Some operations are low volume and therefore have a small, static
* number of threads assigned to their taskqs using the ZTI_N(#) or ZTI_ONE
* macros. Other operations process a large amount of data; the ZTI_BATCH
* macro causes us to create a taskq oriented for throughput. Some operations
* are so high frequency and short-lived that the taskq itself can become a a
* point of lock contention. The ZTI_P(#, #) macro indicates that we need an
* additional degree of parallelism specified by the number of threads per-
* taskq and the number of taskqs; when dispatching an event in this case, the
* particular taskq is chosen at random.
*
* The different taskq priorities are to handle the different contexts (issue
* and interrupt) and then to reserve threads for ZIO_PRIORITY_NOW I/Os that
* need to be handled with minimum delay.
*/
const zio_taskq_info_t zio_taskqs[ZIO_TYPES][ZIO_TASKQ_TYPES] = {
/* ISSUE ISSUE_HIGH INTR INTR_HIGH */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* NULL */
{ ZTI_N(8), ZTI_NULL, ZTI_P(12, 8), ZTI_NULL }, /* READ */
{ ZTI_BATCH, ZTI_N(5), ZTI_P(12, 8), ZTI_N(5) }, /* WRITE */
{ ZTI_P(12, 8), ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* FREE */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* CLAIM */
{ ZTI_ONE, ZTI_NULL, ZTI_ONE, ZTI_NULL }, /* IOCTL */
};
-static sysevent_t *spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl,
- const char *name);
-static void spa_event_post(sysevent_t *ev);
static void spa_sync_version(void *arg, dmu_tx_t *tx);
static void spa_sync_props(void *arg, dmu_tx_t *tx);
static boolean_t spa_has_active_shared_spare(spa_t *spa);
static inline int spa_load_impl(spa_t *spa, uint64_t, nvlist_t *config,
- spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
+ spa_load_state_t state, spa_import_type_t type, boolean_t trust_config,
char **ereport);
static void spa_vdev_resilver_done(spa_t *spa);
uint_t zio_taskq_batch_pct = 75; /* 1 thread per cpu in pset */
id_t zio_taskq_psrset_bind = PS_NONE;
boolean_t zio_taskq_sysdc = B_TRUE; /* use SDC scheduling class */
uint_t zio_taskq_basedc = 80; /* base duty cycle */
boolean_t spa_create_process = B_TRUE; /* no process ==> no sysdc */
/*
* This (illegal) pool name is used when temporarily importing a spa_t in order
* to get the vdev stats associated with the imported devices.
*/
#define TRYIMPORT_NAME "$import"
/*
* ==========================================================================
* SPA properties routines
* ==========================================================================
*/
/*
* Add a (source=src, propname=propval) list to an nvlist.
*/
static void
spa_prop_add_list(nvlist_t *nvl, zpool_prop_t prop, char *strval,
uint64_t intval, zprop_source_t src)
{
const char *propname = zpool_prop_to_name(prop);
nvlist_t *propval;
VERIFY(nvlist_alloc(&propval, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(propval, ZPROP_SOURCE, src) == 0);
if (strval != NULL)
VERIFY(nvlist_add_string(propval, ZPROP_VALUE, strval) == 0);
else
VERIFY(nvlist_add_uint64(propval, ZPROP_VALUE, intval) == 0);
VERIFY(nvlist_add_nvlist(nvl, propname, propval) == 0);
nvlist_free(propval);
}
/*
* Get property values from the spa configuration.
*/
static void
spa_prop_get_config(spa_t *spa, nvlist_t **nvp)
{
vdev_t *rvd = spa->spa_root_vdev;
dsl_pool_t *pool = spa->spa_dsl_pool;
uint64_t size, alloc, cap, version;
const zprop_source_t src = ZPROP_SRC_NONE;
spa_config_dirent_t *dp;
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT(MUTEX_HELD(&spa->spa_props_lock));
if (rvd != NULL) {
alloc = metaslab_class_get_alloc(spa_normal_class(spa));
size = metaslab_class_get_space(spa_normal_class(spa));
spa_prop_add_list(*nvp, ZPOOL_PROP_NAME, spa_name(spa), 0, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_SIZE, NULL, size, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_ALLOCATED, NULL, alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FREE, NULL,
size - alloc, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_FRAGMENTATION, NULL,
metaslab_class_fragmentation(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_EXPANDSZ, NULL,
metaslab_class_expandable_space(mc), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_READONLY, NULL,
(spa_mode(spa) == FREAD), src);
cap = (size == 0) ? 0 : (alloc * 100 / size);
spa_prop_add_list(*nvp, ZPOOL_PROP_CAPACITY, NULL, cap, src);
spa_prop_add_list(*nvp, ZPOOL_PROP_DEDUPRATIO, NULL,
ddt_get_pool_dedup_ratio(spa), src);
spa_prop_add_list(*nvp, ZPOOL_PROP_HEALTH, NULL,
rvd->vdev_state, src);
version = spa_version(spa);
if (version == zpool_prop_default_numeric(ZPOOL_PROP_VERSION)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_DEFAULT);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_VERSION, NULL,
version, ZPROP_SRC_LOCAL);
}
}
if (pool != NULL) {
/*
* The $FREE directory was introduced in SPA_VERSION_DEADLISTS,
* when opening pools before this version freedir will be NULL.
*/
if (pool->dp_free_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING, NULL,
dsl_dir_phys(pool->dp_free_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_FREEING,
NULL, 0, src);
}
if (pool->dp_leak_dir != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED, NULL,
dsl_dir_phys(pool->dp_leak_dir)->dd_used_bytes,
src);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_LEAKED,
NULL, 0, src);
}
}
spa_prop_add_list(*nvp, ZPOOL_PROP_GUID, NULL, spa_guid(spa), src);
if (spa->spa_comment != NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_COMMENT, spa->spa_comment,
0, ZPROP_SRC_LOCAL);
}
if (spa->spa_root != NULL)
spa_prop_add_list(*nvp, ZPOOL_PROP_ALTROOT, spa->spa_root,
0, ZPROP_SRC_LOCAL);
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
MIN(zfs_max_recordsize, SPA_MAXBLOCKSIZE), ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXBLOCKSIZE, NULL,
SPA_OLD_MAXBLOCKSIZE, ZPROP_SRC_NONE);
}
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MAX_SIZE, ZPROP_SRC_NONE);
} else {
spa_prop_add_list(*nvp, ZPOOL_PROP_MAXDNODESIZE, NULL,
DNODE_MIN_SIZE, ZPROP_SRC_NONE);
}
if ((dp = list_head(&spa->spa_config_list)) != NULL) {
if (dp->scd_path == NULL) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
"none", 0, ZPROP_SRC_LOCAL);
} else if (strcmp(dp->scd_path, spa_config_path) != 0) {
spa_prop_add_list(*nvp, ZPOOL_PROP_CACHEFILE,
dp->scd_path, 0, ZPROP_SRC_LOCAL);
}
}
}
/*
* Get zpool property values.
*/
int
spa_prop_get(spa_t *spa, nvlist_t **nvp)
{
objset_t *mos = spa->spa_meta_objset;
zap_cursor_t zc;
zap_attribute_t za;
int err;
err = nvlist_alloc(nvp, NV_UNIQUE_NAME, KM_SLEEP);
if (err)
return (err);
mutex_enter(&spa->spa_props_lock);
/*
* Get properties from the spa config.
*/
spa_prop_get_config(spa, nvp);
/* If no pool property object, no more prop to get. */
if (mos == NULL || spa->spa_pool_props_object == 0) {
mutex_exit(&spa->spa_props_lock);
goto out;
}
/*
* Get properties from the MOS pool property object.
*/
for (zap_cursor_init(&zc, mos, spa->spa_pool_props_object);
(err = zap_cursor_retrieve(&zc, &za)) == 0;
zap_cursor_advance(&zc)) {
uint64_t intval = 0;
char *strval = NULL;
zprop_source_t src = ZPROP_SRC_DEFAULT;
zpool_prop_t prop;
if ((prop = zpool_name_to_prop(za.za_name)) == ZPOOL_PROP_INVAL)
continue;
switch (za.za_integer_length) {
case 8:
/* integer property */
if (za.za_first_integer !=
zpool_prop_default_numeric(prop))
src = ZPROP_SRC_LOCAL;
if (prop == ZPOOL_PROP_BOOTFS) {
dsl_pool_t *dp;
dsl_dataset_t *ds = NULL;
dp = spa_get_dsl(spa);
dsl_pool_config_enter(dp, FTAG);
if ((err = dsl_dataset_hold_obj(dp,
za.za_first_integer, FTAG, &ds))) {
dsl_pool_config_exit(dp, FTAG);
break;
}
strval = kmem_alloc(ZFS_MAX_DATASET_NAME_LEN,
KM_SLEEP);
dsl_dataset_name(ds, strval);
dsl_dataset_rele(ds, FTAG);
dsl_pool_config_exit(dp, FTAG);
} else {
strval = NULL;
intval = za.za_first_integer;
}
spa_prop_add_list(*nvp, prop, strval, intval, src);
if (strval != NULL)
kmem_free(strval, ZFS_MAX_DATASET_NAME_LEN);
break;
case 1:
/* string property */
strval = kmem_alloc(za.za_num_integers, KM_SLEEP);
err = zap_lookup(mos, spa->spa_pool_props_object,
za.za_name, 1, za.za_num_integers, strval);
if (err) {
kmem_free(strval, za.za_num_integers);
break;
}
spa_prop_add_list(*nvp, prop, strval, 0, src);
kmem_free(strval, za.za_num_integers);
break;
default:
break;
}
}
zap_cursor_fini(&zc);
mutex_exit(&spa->spa_props_lock);
out:
if (err && err != ENOENT) {
nvlist_free(*nvp);
*nvp = NULL;
return (err);
}
return (0);
}
/*
* Validate the given pool properties nvlist and modify the list
* for the property values to be set.
*/
static int
spa_prop_validate(spa_t *spa, nvlist_t *props)
{
nvpair_t *elem;
int error = 0, reset_bootfs = 0;
uint64_t objnum = 0;
boolean_t has_feature = B_FALSE;
elem = NULL;
while ((elem = nvlist_next_nvpair(props, elem)) != NULL) {
uint64_t intval;
char *strval, *slash, *check, *fname;
const char *propname = nvpair_name(elem);
zpool_prop_t prop = zpool_name_to_prop(propname);
switch (prop) {
case ZPOOL_PROP_INVAL:
if (!zpool_prop_feature(propname)) {
error = SET_ERROR(EINVAL);
break;
}
/*
* Sanitize the input.
*/
if (nvpair_type(elem) != DATA_TYPE_UINT64) {
error = SET_ERROR(EINVAL);
break;
}
if (nvpair_value_uint64(elem, &intval) != 0) {
error = SET_ERROR(EINVAL);
break;
}
if (intval != 0) {
error = SET_ERROR(EINVAL);
break;
}
fname = strchr(propname, '@') + 1;
if (zfeature_lookup_name(fname, NULL) != 0) {
error = SET_ERROR(EINVAL);
break;
}
has_feature = B_TRUE;
break;
case ZPOOL_PROP_VERSION:
error = nvpair_value_uint64(elem, &intval);
if (!error &&
(intval < spa_version(spa) ||
intval > SPA_VERSION_BEFORE_FEATURES ||
has_feature))
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_DELEGATION:
case ZPOOL_PROP_AUTOREPLACE:
case ZPOOL_PROP_LISTSNAPS:
case ZPOOL_PROP_AUTOEXPAND:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_MULTIHOST:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > 1)
error = SET_ERROR(EINVAL);
if (!error && !spa_get_hostid())
error = SET_ERROR(ENOTSUP);
break;
case ZPOOL_PROP_BOOTFS:
/*
* If the pool version is less than SPA_VERSION_BOOTFS,
* or the pool is still being created (version == 0),
* the bootfs property cannot be set.
*/
if (spa_version(spa) < SPA_VERSION_BOOTFS) {
error = SET_ERROR(ENOTSUP);
break;
}
/*
* Make sure the vdev config is bootable
*/
if (!vdev_is_bootable(spa->spa_root_vdev)) {
error = SET_ERROR(ENOTSUP);
break;
}
reset_bootfs = 1;
error = nvpair_value_string(elem, &strval);
if (!error) {
objset_t *os;
uint64_t propval;
if (strval == NULL || strval[0] == '\0') {
objnum = zpool_prop_default_numeric(
ZPOOL_PROP_BOOTFS);
break;
}
error = dmu_objset_hold(strval, FTAG, &os);
if (error)
break;
/*
* Must be ZPL, and its property settings
* must be supported by GRUB (compression
* is not gzip, and large blocks or large
* dnodes are not used).
*/
if (dmu_objset_type(os) != DMU_OST_ZFS) {
error = SET_ERROR(ENOTSUP);
} else if ((error =
dsl_prop_get_int_ds(dmu_objset_ds(os),
zfs_prop_to_name(ZFS_PROP_COMPRESSION),
&propval)) == 0 &&
!BOOTFS_COMPRESS_VALID(propval)) {
error = SET_ERROR(ENOTSUP);
} else if ((error =
dsl_prop_get_int_ds(dmu_objset_ds(os),
zfs_prop_to_name(ZFS_PROP_DNODESIZE),
&propval)) == 0 &&
propval != ZFS_DNSIZE_LEGACY) {
error = SET_ERROR(ENOTSUP);
} else {
objnum = dmu_objset_id(os);
}
dmu_objset_rele(os, FTAG);
}
break;
case ZPOOL_PROP_FAILUREMODE:
error = nvpair_value_uint64(elem, &intval);
if (!error && intval > ZIO_FAILURE_MODE_PANIC)
error = SET_ERROR(EINVAL);
/*
* This is a special case which only occurs when
* the pool has completely failed. This allows
* the user to change the in-core failmode property
* without syncing it out to disk (I/Os might
* currently be blocked). We do this by returning
* EIO to the caller (spa_prop_set) to trick it
* into thinking we encountered a property validation
* error.
*/
if (!error && spa_suspended(spa)) {
spa->spa_failmode = intval;
error = SET_ERROR(EIO);
}
break;
case ZPOOL_PROP_CACHEFILE:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
if (strval[0] == '\0')
break;
if (strcmp(strval, "none") == 0)
break;
if (strval[0] != '/') {
error = SET_ERROR(EINVAL);
break;
}
slash = strrchr(strval, '/');
ASSERT(slash != NULL);
if (slash[1] == '\0' || strcmp(slash, "/.") == 0 ||
strcmp(slash, "/..") == 0)
error = SET_ERROR(EINVAL);
break;
case ZPOOL_PROP_COMMENT:
if ((error = nvpair_value_string(elem, &strval)) != 0)
break;
for (check = strval; *check != '\0'; check++) {
if (!isprint(*check)) {
error = SET_ERROR(EINVAL);
break;
}
}
if (strlen(strval) > ZPROP_MAX_COMMENT)
error = SET_ERROR(E2BIG);
break;
case ZPOOL_PROP_DEDUPDITTO:
if (spa_version(spa) < SPA_VERSION_DEDUP)
error = SET_ERROR(ENOTSUP);
else
error = nvpair_value_uint64(elem, &intval);
if (error == 0 &&
intval != 0 && intval < ZIO_DEDUPDITTO_MIN)
error = SET_ERROR(EINVAL);
break;
default:
break;
}
if (error)
break;
}
if (!error && reset_bootfs) {
error = nvlist_remove(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), DATA_TYPE_STRING);
if (!error) {
error = nvlist_add_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), objnum);
}
}
return (error);
}
void
spa_configfile_set(spa_t *spa, nvlist_t *nvp, boolean_t need_sync)
{
char *cachefile;
spa_config_dirent_t *dp;
if (nvlist_lookup_string(nvp, zpool_prop_to_name(ZPOOL_PROP_CACHEFILE),
&cachefile) != 0)
return;
dp = kmem_alloc(sizeof (spa_config_dirent_t),
KM_SLEEP);
if (cachefile[0] == '\0')
dp->scd_path = spa_strdup(spa_config_path);
else if (strcmp(cachefile, "none") == 0)
dp->scd_path = NULL;
else
dp->scd_path = spa_strdup(cachefile);
list_insert_head(&spa->spa_config_list, dp);
if (need_sync)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
int
spa_prop_set(spa_t *spa, nvlist_t *nvp)
{
int error;
nvpair_t *elem = NULL;
boolean_t need_sync = B_FALSE;
if ((error = spa_prop_validate(spa, nvp)) != 0)
return (error);
while ((elem = nvlist_next_nvpair(nvp, elem)) != NULL) {
zpool_prop_t prop = zpool_name_to_prop(nvpair_name(elem));
if (prop == ZPOOL_PROP_CACHEFILE ||
prop == ZPOOL_PROP_ALTROOT ||
prop == ZPOOL_PROP_READONLY)
continue;
if (prop == ZPOOL_PROP_VERSION || prop == ZPOOL_PROP_INVAL) {
uint64_t ver;
if (prop == ZPOOL_PROP_VERSION) {
VERIFY(nvpair_value_uint64(elem, &ver) == 0);
} else {
ASSERT(zpool_prop_feature(nvpair_name(elem)));
ver = SPA_VERSION_FEATURES;
need_sync = B_TRUE;
}
/* Save time if the version is already set. */
if (ver == spa_version(spa))
continue;
/*
* In addition to the pool directory object, we might
* create the pool properties object, the features for
* read object, the features for write object, or the
* feature descriptions object.
*/
error = dsl_sync_task(spa->spa_name, NULL,
spa_sync_version, &ver,
6, ZFS_SPACE_CHECK_RESERVED);
if (error)
return (error);
continue;
}
need_sync = B_TRUE;
break;
}
if (need_sync) {
return (dsl_sync_task(spa->spa_name, NULL, spa_sync_props,
nvp, 6, ZFS_SPACE_CHECK_RESERVED));
}
return (0);
}
/*
* If the bootfs property value is dsobj, clear it.
*/
void
spa_prop_clear_bootfs(spa_t *spa, uint64_t dsobj, dmu_tx_t *tx)
{
if (spa->spa_bootfs == dsobj && spa->spa_pool_props_object != 0) {
VERIFY(zap_remove(spa->spa_meta_objset,
spa->spa_pool_props_object,
zpool_prop_to_name(ZPOOL_PROP_BOOTFS), tx) == 0);
spa->spa_bootfs = 0;
}
}
/*ARGSUSED*/
static int
spa_change_guid_check(void *arg, dmu_tx_t *tx)
{
ASSERTV(uint64_t *newguid = arg);
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
vdev_t *rvd = spa->spa_root_vdev;
uint64_t vdev_state;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
vdev_state = rvd->vdev_state;
spa_config_exit(spa, SCL_STATE, FTAG);
if (vdev_state != VDEV_STATE_HEALTHY)
return (SET_ERROR(ENXIO));
ASSERT3U(spa_guid(spa), !=, *newguid);
return (0);
}
static void
spa_change_guid_sync(void *arg, dmu_tx_t *tx)
{
uint64_t *newguid = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
uint64_t oldguid;
vdev_t *rvd = spa->spa_root_vdev;
oldguid = spa_guid(spa);
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
rvd->vdev_guid = *newguid;
rvd->vdev_guid_sum += (*newguid - oldguid);
vdev_config_dirty(rvd);
spa_config_exit(spa, SCL_STATE, FTAG);
spa_history_log_internal(spa, "guid change", tx, "old=%llu new=%llu",
oldguid, *newguid);
}
/*
* Change the GUID for the pool. This is done so that we can later
* re-import a pool built from a clone of our own vdevs. We will modify
* the root vdev's guid, our own pool guid, and then mark all of our
* vdevs dirty. Note that we must make sure that all our vdevs are
* online when we do this, or else any vdevs that weren't present
* would be orphaned from our pool. We are also going to issue a
* sysevent to update any watchers.
*/
int
spa_change_guid(spa_t *spa)
{
int error;
uint64_t guid;
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
guid = spa_generate_guid(NULL);
error = dsl_sync_task(spa->spa_name, spa_change_guid_check,
spa_change_guid_sync, &guid, 5, ZFS_SPACE_CHECK_RESERVED);
if (error == 0) {
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_REGUID);
}
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* ==========================================================================
* SPA state manipulation (open/create/destroy/import/export)
* ==========================================================================
*/
static int
spa_error_entry_compare(const void *a, const void *b)
{
const spa_error_entry_t *sa = (const spa_error_entry_t *)a;
const spa_error_entry_t *sb = (const spa_error_entry_t *)b;
int ret;
ret = memcmp(&sa->se_bookmark, &sb->se_bookmark,
sizeof (zbookmark_phys_t));
return (AVL_ISIGN(ret));
}
/*
* Utility function which retrieves copies of the current logs and
* re-initializes them in the process.
*/
void
spa_get_errlists(spa_t *spa, avl_tree_t *last, avl_tree_t *scrub)
{
ASSERT(MUTEX_HELD(&spa->spa_errlist_lock));
bcopy(&spa->spa_errlist_last, last, sizeof (avl_tree_t));
bcopy(&spa->spa_errlist_scrub, scrub, sizeof (avl_tree_t));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
}
static void
spa_taskqs_init(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
const zio_taskq_info_t *ztip = &zio_taskqs[t][q];
enum zti_modes mode = ztip->zti_mode;
uint_t value = ztip->zti_value;
uint_t count = ztip->zti_count;
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
char name[32];
uint_t flags = 0;
boolean_t batch = B_FALSE;
if (mode == ZTI_MODE_NULL) {
tqs->stqs_count = 0;
tqs->stqs_taskq = NULL;
return;
}
ASSERT3U(count, >, 0);
tqs->stqs_count = count;
tqs->stqs_taskq = kmem_alloc(count * sizeof (taskq_t *), KM_SLEEP);
switch (mode) {
case ZTI_MODE_FIXED:
ASSERT3U(value, >=, 1);
value = MAX(value, 1);
flags |= TASKQ_DYNAMIC;
break;
case ZTI_MODE_BATCH:
batch = B_TRUE;
flags |= TASKQ_THREADS_CPU_PCT;
value = MIN(zio_taskq_batch_pct, 100);
break;
default:
panic("unrecognized mode for %s_%s taskq (%u:%u) in "
"spa_activate()",
zio_type_name[t], zio_taskq_types[q], mode, value);
break;
}
for (uint_t i = 0; i < count; i++) {
taskq_t *tq;
if (count > 1) {
(void) snprintf(name, sizeof (name), "%s_%s_%u",
zio_type_name[t], zio_taskq_types[q], i);
} else {
(void) snprintf(name, sizeof (name), "%s_%s",
zio_type_name[t], zio_taskq_types[q]);
}
if (zio_taskq_sysdc && spa->spa_proc != &p0) {
if (batch)
flags |= TASKQ_DC_BATCH;
tq = taskq_create_sysdc(name, value, 50, INT_MAX,
spa->spa_proc, zio_taskq_basedc, flags);
} else {
pri_t pri = maxclsyspri;
/*
* The write issue taskq can be extremely CPU
* intensive. Run it at slightly less important
* priority than the other taskqs. Under Linux this
* means incrementing the priority value on platforms
* like illumos it should be decremented.
*/
if (t == ZIO_TYPE_WRITE && q == ZIO_TASKQ_ISSUE)
pri++;
tq = taskq_create_proc(name, value, pri, 50,
INT_MAX, spa->spa_proc, flags);
}
tqs->stqs_taskq[i] = tq;
}
}
static void
spa_taskqs_fini(spa_t *spa, zio_type_t t, zio_taskq_type_t q)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
if (tqs->stqs_taskq == NULL) {
ASSERT3U(tqs->stqs_count, ==, 0);
return;
}
for (uint_t i = 0; i < tqs->stqs_count; i++) {
ASSERT3P(tqs->stqs_taskq[i], !=, NULL);
taskq_destroy(tqs->stqs_taskq[i]);
}
kmem_free(tqs->stqs_taskq, tqs->stqs_count * sizeof (taskq_t *));
tqs->stqs_taskq = NULL;
}
/*
* Dispatch a task to the appropriate taskq for the ZFS I/O type and priority.
* Note that a type may have multiple discrete taskqs to avoid lock contention
* on the taskq itself. In that case we choose which taskq at random by using
* the low bits of gethrtime().
*/
void
spa_taskq_dispatch_ent(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags, taskq_ent_t *ent)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
taskq_dispatch_ent(tq, func, arg, flags, ent);
}
/*
* Same as spa_taskq_dispatch_ent() but block on the task until completion.
*/
void
spa_taskq_dispatch_sync(spa_t *spa, zio_type_t t, zio_taskq_type_t q,
task_func_t *func, void *arg, uint_t flags)
{
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
taskq_t *tq;
taskqid_t id;
ASSERT3P(tqs->stqs_taskq, !=, NULL);
ASSERT3U(tqs->stqs_count, !=, 0);
if (tqs->stqs_count == 1) {
tq = tqs->stqs_taskq[0];
} else {
tq = tqs->stqs_taskq[((uint64_t)gethrtime()) % tqs->stqs_count];
}
id = taskq_dispatch(tq, func, arg, flags);
if (id)
taskq_wait_id(tq, id);
}
static void
spa_create_zio_taskqs(spa_t *spa)
{
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_init(spa, t, q);
}
}
}
/*
* Disabled until spa_thread() can be adapted for Linux.
*/
#undef HAVE_SPA_THREAD
#if defined(_KERNEL) && defined(HAVE_SPA_THREAD)
static void
spa_thread(void *arg)
{
callb_cpr_t cprinfo;
spa_t *spa = arg;
user_t *pu = PTOU(curproc);
CALLB_CPR_INIT(&cprinfo, &spa->spa_proc_lock, callb_generic_cpr,
spa->spa_name);
ASSERT(curproc != &p0);
(void) snprintf(pu->u_psargs, sizeof (pu->u_psargs),
"zpool-%s", spa->spa_name);
(void) strlcpy(pu->u_comm, pu->u_psargs, sizeof (pu->u_comm));
/* bind this thread to the requested psrset */
if (zio_taskq_psrset_bind != PS_NONE) {
pool_lock();
mutex_enter(&cpu_lock);
mutex_enter(&pidlock);
mutex_enter(&curproc->p_lock);
if (cpupart_bind_thread(curthread, zio_taskq_psrset_bind,
0, NULL, NULL) == 0) {
curthread->t_bind_pset = zio_taskq_psrset_bind;
} else {
cmn_err(CE_WARN,
"Couldn't bind process for zfs pool \"%s\" to "
"pset %d\n", spa->spa_name, zio_taskq_psrset_bind);
}
mutex_exit(&curproc->p_lock);
mutex_exit(&pidlock);
mutex_exit(&cpu_lock);
pool_unlock();
}
if (zio_taskq_sysdc) {
sysdc_thread_enter(curthread, 100, 0);
}
spa->spa_proc = curproc;
spa->spa_did = curthread->t_did;
spa_create_zio_taskqs(spa);
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_CREATED);
spa->spa_proc_state = SPA_PROC_ACTIVE;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_SAFE_BEGIN(&cprinfo);
while (spa->spa_proc_state == SPA_PROC_ACTIVE)
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
CALLB_CPR_SAFE_END(&cprinfo, &spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_DEACTIVATE);
spa->spa_proc_state = SPA_PROC_GONE;
spa->spa_proc = &p0;
cv_broadcast(&spa->spa_proc_cv);
CALLB_CPR_EXIT(&cprinfo); /* drops spa_proc_lock */
mutex_enter(&curproc->p_lock);
lwp_exit();
}
#endif
/*
* Activate an uninitialized pool.
*/
static void
spa_activate(spa_t *spa, int mode)
{
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_mode = mode;
spa->spa_normal_class = metaslab_class_create(spa, zfs_metaslab_ops);
spa->spa_log_class = metaslab_class_create(spa, zfs_metaslab_ops);
/* Try to create a covering process */
mutex_enter(&spa->spa_proc_lock);
ASSERT(spa->spa_proc_state == SPA_PROC_NONE);
ASSERT(spa->spa_proc == &p0);
spa->spa_did = 0;
#ifdef HAVE_SPA_THREAD
/* Only create a process if we're going to be around a while. */
if (spa_create_process && strcmp(spa->spa_name, TRYIMPORT_NAME) != 0) {
if (newproc(spa_thread, (caddr_t)spa, syscid, maxclsyspri,
NULL, 0) == 0) {
spa->spa_proc_state = SPA_PROC_CREATED;
while (spa->spa_proc_state == SPA_PROC_CREATED) {
cv_wait(&spa->spa_proc_cv,
&spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
ASSERT(spa->spa_proc != &p0);
ASSERT(spa->spa_did != 0);
} else {
#ifdef _KERNEL
cmn_err(CE_WARN,
"Couldn't create process for zfs pool \"%s\"\n",
spa->spa_name);
#endif
}
}
#endif /* HAVE_SPA_THREAD */
mutex_exit(&spa->spa_proc_lock);
/* If we didn't create a process, we need to create our taskqs. */
if (spa->spa_proc == &p0) {
spa_create_zio_taskqs(spa);
}
+ for (size_t i = 0; i < TXG_SIZE; i++)
+ spa->spa_txg_zio[i] = zio_root(spa, NULL, NULL, 0);
+
list_create(&spa->spa_config_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_config_dirty_node));
list_create(&spa->spa_evicting_os_list, sizeof (objset_t),
offsetof(objset_t, os_evicting_node));
list_create(&spa->spa_state_dirty_list, sizeof (vdev_t),
offsetof(vdev_t, vdev_state_dirty_node));
txg_list_create(&spa->spa_vdev_txg_list, spa,
offsetof(struct vdev, vdev_txg_node));
avl_create(&spa->spa_errlist_scrub,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
avl_create(&spa->spa_errlist_last,
spa_error_entry_compare, sizeof (spa_error_entry_t),
offsetof(spa_error_entry_t, se_avl));
spa_keystore_init(&spa->spa_keystore);
/*
* This taskq is used to perform zvol-minor-related tasks
* asynchronously. This has several advantages, including easy
* resolution of various deadlocks (zfsonlinux bug #3681).
*
* The taskq must be single threaded to ensure tasks are always
* processed in the order in which they were dispatched.
*
* A taskq per pool allows one to keep the pools independent.
* This way if one pool is suspended, it will not impact another.
*
* The preferred location to dispatch a zvol minor task is a sync
* task. In this context, there is easy access to the spa_t and minimal
* error handling is required because the sync task must succeed.
*/
spa->spa_zvol_taskq = taskq_create("z_zvol", 1, defclsyspri,
1, INT_MAX, 0);
/*
* Taskq dedicated to prefetcher threads: this is used to prevent the
* pool traverse code from monopolizing the global (and limited)
* system_taskq by inappropriately scheduling long running tasks on it.
*/
spa->spa_prefetch_taskq = taskq_create("z_prefetch", boot_ncpus,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
/*
* The taskq to upgrade datasets in this pool. Currently used by
* feature SPA_FEATURE_USEROBJ_ACCOUNTING/SPA_FEATURE_PROJECT_QUOTA.
*/
spa->spa_upgrade_taskq = taskq_create("z_upgrade", boot_ncpus,
defclsyspri, 1, INT_MAX, TASKQ_DYNAMIC);
}
/*
* Opposite of spa_activate().
*/
static void
spa_deactivate(spa_t *spa)
{
ASSERT(spa->spa_sync_on == B_FALSE);
ASSERT(spa->spa_dsl_pool == NULL);
ASSERT(spa->spa_root_vdev == NULL);
ASSERT(spa->spa_async_zio_root == NULL);
ASSERT(spa->spa_state != POOL_STATE_UNINITIALIZED);
spa_evicting_os_wait(spa);
if (spa->spa_zvol_taskq) {
taskq_destroy(spa->spa_zvol_taskq);
spa->spa_zvol_taskq = NULL;
}
if (spa->spa_prefetch_taskq) {
taskq_destroy(spa->spa_prefetch_taskq);
spa->spa_prefetch_taskq = NULL;
}
if (spa->spa_upgrade_taskq) {
taskq_destroy(spa->spa_upgrade_taskq);
spa->spa_upgrade_taskq = NULL;
}
txg_list_destroy(&spa->spa_vdev_txg_list);
list_destroy(&spa->spa_config_dirty_list);
list_destroy(&spa->spa_evicting_os_list);
list_destroy(&spa->spa_state_dirty_list);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
for (int t = 0; t < ZIO_TYPES; t++) {
for (int q = 0; q < ZIO_TASKQ_TYPES; q++) {
spa_taskqs_fini(spa, t, q);
}
}
+ for (size_t i = 0; i < TXG_SIZE; i++) {
+ ASSERT3P(spa->spa_txg_zio[i], !=, NULL);
+ VERIFY0(zio_wait(spa->spa_txg_zio[i]));
+ spa->spa_txg_zio[i] = NULL;
+ }
+
metaslab_class_destroy(spa->spa_normal_class);
spa->spa_normal_class = NULL;
metaslab_class_destroy(spa->spa_log_class);
spa->spa_log_class = NULL;
/*
* If this was part of an import or the open otherwise failed, we may
* still have errors left in the queues. Empty them just in case.
*/
spa_errlog_drain(spa);
avl_destroy(&spa->spa_errlist_scrub);
avl_destroy(&spa->spa_errlist_last);
spa_keystore_fini(&spa->spa_keystore);
spa->spa_state = POOL_STATE_UNINITIALIZED;
mutex_enter(&spa->spa_proc_lock);
if (spa->spa_proc_state != SPA_PROC_NONE) {
ASSERT(spa->spa_proc_state == SPA_PROC_ACTIVE);
spa->spa_proc_state = SPA_PROC_DEACTIVATE;
cv_broadcast(&spa->spa_proc_cv);
while (spa->spa_proc_state == SPA_PROC_DEACTIVATE) {
ASSERT(spa->spa_proc != &p0);
cv_wait(&spa->spa_proc_cv, &spa->spa_proc_lock);
}
ASSERT(spa->spa_proc_state == SPA_PROC_GONE);
spa->spa_proc_state = SPA_PROC_NONE;
}
ASSERT(spa->spa_proc == &p0);
mutex_exit(&spa->spa_proc_lock);
/*
* We want to make sure spa_thread() has actually exited the ZFS
* module, so that the module can't be unloaded out from underneath
* it.
*/
if (spa->spa_did != 0) {
thread_join(spa->spa_did);
spa->spa_did = 0;
}
}
/*
* Verify a pool configuration, and construct the vdev tree appropriately. This
* will create all the necessary vdevs in the appropriate layout, with each vdev
* in the CLOSED state. This will prep the pool before open/creation/import.
* All vdev validation is done by the vdev_alloc() routine.
*/
static int
spa_config_parse(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent,
uint_t id, int atype)
{
nvlist_t **child;
uint_t children;
int error;
if ((error = vdev_alloc(spa, vdp, nv, parent, id, atype)) != 0)
return (error);
if ((*vdp)->vdev_ops->vdev_op_leaf)
return (0);
error = nvlist_lookup_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
&child, &children);
if (error == ENOENT)
return (0);
if (error) {
vdev_free(*vdp);
*vdp = NULL;
return (SET_ERROR(EINVAL));
}
for (int c = 0; c < children; c++) {
vdev_t *vd;
if ((error = spa_config_parse(spa, &vd, child[c], *vdp, c,
atype)) != 0) {
vdev_free(*vdp);
*vdp = NULL;
return (error);
}
}
ASSERT(*vdp != NULL);
return (0);
}
/*
* Opposite of spa_load().
*/
static void
spa_unload(spa_t *spa)
{
int i;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
/*
* Stop async tasks.
*/
spa_async_suspend(spa);
/*
* Stop syncing.
*/
if (spa->spa_sync_on) {
txg_sync_stop(spa->spa_dsl_pool);
spa->spa_sync_on = B_FALSE;
}
/*
* Even though vdev_free() also calls vdev_metaslab_fini, we need
* to call it earlier, before we wait for async i/o to complete.
* This ensures that there is no async metaslab prefetching, by
* calling taskq_wait(mg_taskq).
*/
if (spa->spa_root_vdev != NULL) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
for (int c = 0; c < spa->spa_root_vdev->vdev_children; c++)
vdev_metaslab_fini(spa->spa_root_vdev->vdev_child[c]);
spa_config_exit(spa, SCL_ALL, FTAG);
}
if (spa->spa_mmp.mmp_thread)
mmp_thread_stop(spa);
/*
* Wait for any outstanding async I/O to complete.
*/
if (spa->spa_async_zio_root != NULL) {
for (int i = 0; i < max_ncpus; i++)
(void) zio_wait(spa->spa_async_zio_root[i]);
kmem_free(spa->spa_async_zio_root, max_ncpus * sizeof (void *));
spa->spa_async_zio_root = NULL;
}
+ if (spa->spa_vdev_removal != NULL) {
+ spa_vdev_removal_destroy(spa->spa_vdev_removal);
+ spa->spa_vdev_removal = NULL;
+ }
+
+ spa_condense_fini(spa);
+
bpobj_close(&spa->spa_deferred_bpobj);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Close all vdevs.
*/
if (spa->spa_root_vdev)
vdev_free(spa->spa_root_vdev);
ASSERT(spa->spa_root_vdev == NULL);
/*
* Close the dsl pool.
*/
if (spa->spa_dsl_pool) {
dsl_pool_close(spa->spa_dsl_pool);
spa->spa_dsl_pool = NULL;
spa->spa_meta_objset = NULL;
}
ddt_unload(spa);
/*
* Drop and purge level 2 cache
*/
spa_l2cache_drop(spa);
for (i = 0; i < spa->spa_spares.sav_count; i++)
vdev_free(spa->spa_spares.sav_vdevs[i]);
if (spa->spa_spares.sav_vdevs) {
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
spa->spa_spares.sav_vdevs = NULL;
}
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
}
spa->spa_spares.sav_count = 0;
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vdev_clear_stats(spa->spa_l2cache.sav_vdevs[i]);
vdev_free(spa->spa_l2cache.sav_vdevs[i]);
}
if (spa->spa_l2cache.sav_vdevs) {
kmem_free(spa->spa_l2cache.sav_vdevs,
spa->spa_l2cache.sav_count * sizeof (void *));
spa->spa_l2cache.sav_vdevs = NULL;
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
}
spa->spa_l2cache.sav_count = 0;
spa->spa_async_suspended = 0;
+ spa->spa_indirect_vdevs_loaded = B_FALSE;
+
if (spa->spa_comment != NULL) {
spa_strfree(spa->spa_comment);
spa->spa_comment = NULL;
}
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* Load (or re-load) the current list of vdevs describing the active spares for
* this pool. When this is called, we have some form of basic information in
* 'spa_spares.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
*/
-static void
+void
spa_load_spares(spa_t *spa)
{
nvlist_t **spares;
uint_t nspares;
int i;
vdev_t *vd, *tvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* First, close and free any existing spare vdevs.
*/
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
/* Undo the call to spa_activate() below */
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL && tvd->vdev_isspare)
spa_spare_remove(tvd);
vdev_close(vd);
vdev_free(vd);
}
if (spa->spa_spares.sav_vdevs)
kmem_free(spa->spa_spares.sav_vdevs,
spa->spa_spares.sav_count * sizeof (void *));
if (spa->spa_spares.sav_config == NULL)
nspares = 0;
else
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
spa->spa_spares.sav_count = (int)nspares;
spa->spa_spares.sav_vdevs = NULL;
if (nspares == 0)
return;
/*
* Construct the array of vdevs, opening them to get status in the
* process. For each spare, there is potentially two different vdev_t
* structures associated with it: one in the list of spares (used only
* for basic validation purposes) and one in the active vdev
* configuration (if it's spared in). During this phase we open and
* validate each vdev on the spare list. If the vdev also exists in the
* active configuration, then we also mark this vdev as an active spare.
*/
spa->spa_spares.sav_vdevs = kmem_zalloc(nspares * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++) {
VERIFY(spa_config_parse(spa, &vd, spares[i], NULL, 0,
VDEV_ALLOC_SPARE) == 0);
ASSERT(vd != NULL);
spa->spa_spares.sav_vdevs[i] = vd;
if ((tvd = spa_lookup_by_guid(spa, vd->vdev_guid,
B_FALSE)) != NULL) {
if (!tvd->vdev_isspare)
spa_spare_add(tvd);
/*
* We only mark the spare active if we were successfully
* able to load the vdev. Otherwise, importing a pool
* with a bad active spare would result in strange
* behavior, because multiple pool would think the spare
* is actively in use.
*
* There is a vulnerability here to an equally bizarre
* circumstance, where a dead active spare is later
* brought back to life (onlined or otherwise). Given
* the rarity of this scenario, and the extra complexity
* it adds, we ignore the possibility.
*/
if (!vdev_is_dead(tvd))
spa_spare_activate(tvd);
}
vd->vdev_top = vd;
vd->vdev_aux = &spa->spa_spares;
if (vdev_open(vd) != 0)
continue;
if (vdev_validate_aux(vd) == 0)
spa_spare_add(vd);
}
/*
* Recompute the stashed list of spares, with status information
* this time.
*/
VERIFY(nvlist_remove(spa->spa_spares.sav_config, ZPOOL_CONFIG_SPARES,
DATA_TYPE_NVLIST_ARRAY) == 0);
spares = kmem_alloc(spa->spa_spares.sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < spa->spa_spares.sav_count; i++)
spares[i] = vdev_config_generate(spa,
spa->spa_spares.sav_vdevs[i], B_TRUE, VDEV_CONFIG_SPARE);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, spa->spa_spares.sav_count) == 0);
for (i = 0; i < spa->spa_spares.sav_count; i++)
nvlist_free(spares[i]);
kmem_free(spares, spa->spa_spares.sav_count * sizeof (void *));
}
/*
* Load (or re-load) the current list of vdevs describing the active l2cache for
* this pool. When this is called, we have some form of basic information in
* 'spa_l2cache.sav_config'. We parse this into vdevs, try to open them, and
* then re-generate a more complete list including status information.
* Devices which are already active have their details maintained, and are
* not re-opened.
*/
-static void
+void
spa_load_l2cache(spa_t *spa)
{
nvlist_t **l2cache = NULL;
uint_t nl2cache;
int i, j, oldnvdevs;
uint64_t guid;
vdev_t *vd, **oldvdevs, **newvdevs;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
oldvdevs = sav->sav_vdevs;
oldnvdevs = sav->sav_count;
sav->sav_vdevs = NULL;
sav->sav_count = 0;
if (sav->sav_config == NULL) {
nl2cache = 0;
newvdevs = NULL;
goto out;
}
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
newvdevs = kmem_alloc(nl2cache * sizeof (void *), KM_SLEEP);
/*
* Process new nvlist of vdevs.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i], ZPOOL_CONFIG_GUID,
&guid) == 0);
newvdevs[i] = NULL;
for (j = 0; j < oldnvdevs; j++) {
vd = oldvdevs[j];
if (vd != NULL && guid == vd->vdev_guid) {
/*
* Retain previous vdev for add/remove ops.
*/
newvdevs[i] = vd;
oldvdevs[j] = NULL;
break;
}
}
if (newvdevs[i] == NULL) {
/*
* Create new vdev
*/
VERIFY(spa_config_parse(spa, &vd, l2cache[i], NULL, 0,
VDEV_ALLOC_L2CACHE) == 0);
ASSERT(vd != NULL);
newvdevs[i] = vd;
/*
* Commit this vdev as an l2cache device,
* even if it fails to open.
*/
spa_l2cache_add(vd);
vd->vdev_top = vd;
vd->vdev_aux = sav;
spa_l2cache_activate(vd);
if (vdev_open(vd) != 0)
continue;
(void) vdev_validate_aux(vd);
if (!vdev_is_dead(vd))
l2arc_add_vdev(spa, vd);
}
}
sav->sav_vdevs = newvdevs;
sav->sav_count = (int)nl2cache;
/*
* Recompute the stashed list of l2cache devices, with status
* information this time.
*/
VERIFY(nvlist_remove(sav->sav_config, ZPOOL_CONFIG_L2CACHE,
DATA_TYPE_NVLIST_ARRAY) == 0);
if (sav->sav_count > 0)
l2cache = kmem_alloc(sav->sav_count * sizeof (void *),
KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
l2cache[i] = vdev_config_generate(spa,
sav->sav_vdevs[i], B_TRUE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, sav->sav_count) == 0);
out:
/*
* Purge vdevs that were dropped
*/
for (i = 0; i < oldnvdevs; i++) {
uint64_t pool;
vd = oldvdevs[i];
if (vd != NULL) {
ASSERT(vd->vdev_isl2cache);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
vdev_clear_stats(vd);
vdev_free(vd);
}
}
if (oldvdevs)
kmem_free(oldvdevs, oldnvdevs * sizeof (void *));
for (i = 0; i < sav->sav_count; i++)
nvlist_free(l2cache[i]);
if (sav->sav_count)
kmem_free(l2cache, sav->sav_count * sizeof (void *));
}
static int
load_nvlist(spa_t *spa, uint64_t obj, nvlist_t **value)
{
dmu_buf_t *db;
char *packed = NULL;
size_t nvsize = 0;
int error;
*value = NULL;
error = dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db);
if (error)
return (error);
nvsize = *(uint64_t *)db->db_data;
dmu_buf_rele(db, FTAG);
packed = vmem_alloc(nvsize, KM_SLEEP);
error = dmu_read(spa->spa_meta_objset, obj, 0, nvsize, packed,
DMU_READ_PREFETCH);
if (error == 0)
error = nvlist_unpack(packed, nvsize, value, 0);
vmem_free(packed, nvsize);
return (error);
}
/*
* Checks to see if the given vdev could not be opened, in which case we post a
* sysevent to notify the autoreplace code that the device has been removed.
*/
static void
spa_check_removed(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++)
spa_check_removed(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf && vdev_is_dead(vd) &&
- !vd->vdev_ishole) {
+ vdev_is_concrete(vd)) {
zfs_post_autoreplace(vd->vdev_spa, vd);
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_CHECK);
}
}
static void
spa_config_valid_zaps(vdev_t *vd, vdev_t *mvd)
{
ASSERT3U(vd->vdev_children, ==, mvd->vdev_children);
vd->vdev_top_zap = mvd->vdev_top_zap;
vd->vdev_leaf_zap = mvd->vdev_leaf_zap;
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_config_valid_zaps(vd->vdev_child[i], mvd->vdev_child[i]);
}
}
/*
* Validate the current config against the MOS config
*/
static boolean_t
spa_config_valid(spa_t *spa, nvlist_t *config)
{
vdev_t *mrvd, *rvd = spa->spa_root_vdev;
nvlist_t *nv;
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nv) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
VERIFY(spa_config_parse(spa, &mrvd, nv, NULL, 0, VDEV_ALLOC_LOAD) == 0);
ASSERT3U(rvd->vdev_children, ==, mrvd->vdev_children);
/*
* If we're doing a normal import, then build up any additional
* diagnostic information about missing devices in this config.
* We'll pass this up to the user for further processing.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG)) {
nvlist_t **child, *nv;
uint64_t idx = 0;
child = kmem_alloc(rvd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
vdev_t *mtvd = mrvd->vdev_child[c];
if (tvd->vdev_ops == &vdev_missing_ops &&
mtvd->vdev_ops != &vdev_missing_ops &&
mtvd->vdev_islog)
child[idx++] = vdev_config_generate(spa, mtvd,
B_FALSE, 0);
}
if (idx) {
VERIFY(nvlist_add_nvlist_array(nv,
ZPOOL_CONFIG_CHILDREN, child, idx) == 0);
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_MISSING_DEVICES, nv) == 0);
for (int i = 0; i < idx; i++)
nvlist_free(child[i]);
}
nvlist_free(nv);
kmem_free(child, rvd->vdev_children * sizeof (char **));
}
/*
* Compare the root vdev tree with the information we have
* from the MOS config (mrvd). Check each top-level vdev
* with the corresponding MOS config top-level (mtvd).
*/
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
vdev_t *mtvd = mrvd->vdev_child[c];
/*
* Resolve any "missing" vdevs in the current configuration.
+ * Also trust the MOS config about any "indirect" vdevs.
* If we find that the MOS config has more accurate information
* about the top-level vdev then use that vdev instead.
*/
- if (tvd->vdev_ops == &vdev_missing_ops &&
- mtvd->vdev_ops != &vdev_missing_ops) {
-
- if (!(spa->spa_import_flags & ZFS_IMPORT_MISSING_LOG))
- continue;
+ if ((tvd->vdev_ops == &vdev_missing_ops &&
+ mtvd->vdev_ops != &vdev_missing_ops) ||
+ (mtvd->vdev_ops == &vdev_indirect_ops &&
+ tvd->vdev_ops != &vdev_indirect_ops)) {
/*
* Device specific actions.
*/
if (mtvd->vdev_islog) {
+ if (!(spa->spa_import_flags &
+ ZFS_IMPORT_MISSING_LOG)) {
+ continue;
+ }
+
spa_set_log_state(spa, SPA_LOG_CLEAR);
- } else {
- /*
- * XXX - once we have 'readonly' pool
- * support we should be able to handle
- * missing data devices by transitioning
- * the pool to readonly.
- */
+ } else if (mtvd->vdev_ops != &vdev_indirect_ops) {
continue;
}
/*
* Swap the missing vdev with the data we were
* able to obtain from the MOS config.
*/
vdev_remove_child(rvd, tvd);
vdev_remove_child(mrvd, mtvd);
vdev_add_child(rvd, mtvd);
vdev_add_child(mrvd, tvd);
- spa_config_exit(spa, SCL_ALL, FTAG);
- vdev_load(mtvd);
- spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
-
vdev_reopen(rvd);
} else {
if (mtvd->vdev_islog) {
/*
* Load the slog device's state from the MOS
* config since it's possible that the label
* does not contain the most up-to-date
* information.
*/
vdev_load_log_state(tvd, mtvd);
vdev_reopen(tvd);
}
/*
* Per-vdev ZAP info is stored exclusively in the MOS.
*/
spa_config_valid_zaps(tvd, mtvd);
}
+
+ /*
+ * Never trust this info from userland; always use what's
+ * in the MOS. This prevents it from getting out of sync
+ * with the rest of the info in the MOS.
+ */
+ tvd->vdev_removing = mtvd->vdev_removing;
+ tvd->vdev_indirect_config = mtvd->vdev_indirect_config;
}
vdev_free(mrvd);
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Ensure we were able to validate the config.
*/
return (rvd->vdev_guid_sum == spa->spa_uberblock.ub_guid_sum);
}
/*
* Check for missing log devices
*/
static boolean_t
spa_check_logs(spa_t *spa)
{
boolean_t rv = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
switch (spa->spa_log_state) {
default:
break;
case SPA_LOG_MISSING:
/* need to recheck in case slog has been restored */
case SPA_LOG_UNKNOWN:
rv = (dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_check_log_chain, NULL, DS_FIND_CHILDREN) != 0);
if (rv)
spa_set_log_state(spa, SPA_LOG_MISSING);
break;
}
return (rv);
}
static boolean_t
spa_passivate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
boolean_t slog_found = B_FALSE;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
if (!spa_has_slogs(spa))
return (B_FALSE);
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (tvd->vdev_islog) {
metaslab_group_passivate(mg);
slog_found = B_TRUE;
}
}
return (slog_found);
}
static void
spa_activate_log(spa_t *spa)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALLOC, RW_WRITER));
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (tvd->vdev_islog)
metaslab_group_activate(mg);
}
}
int
-spa_offline_log(spa_t *spa)
+spa_reset_logs(spa_t *spa)
{
int error;
- error = dmu_objset_find(spa_name(spa), zil_vdev_offline,
+ error = dmu_objset_find(spa_name(spa), zil_reset,
NULL, DS_FIND_CHILDREN);
if (error == 0) {
/*
* We successfully offlined the log device, sync out the
* current txg so that the "stubby" block can be removed
* by zil_sync().
*/
txg_wait_synced(spa->spa_dsl_pool, 0);
}
return (error);
}
static void
spa_aux_check_removed(spa_aux_vdev_t *sav)
{
for (int i = 0; i < sav->sav_count; i++)
spa_check_removed(sav->sav_vdevs[i]);
}
void
spa_claim_notify(zio_t *zio)
{
spa_t *spa = zio->io_spa;
if (zio->io_error)
return;
mutex_enter(&spa->spa_props_lock); /* any mutex will do */
if (spa->spa_claim_max_txg < zio->io_bp->blk_birth)
spa->spa_claim_max_txg = zio->io_bp->blk_birth;
mutex_exit(&spa->spa_props_lock);
}
typedef struct spa_load_error {
uint64_t sle_meta_count;
uint64_t sle_data_count;
} spa_load_error_t;
static void
spa_load_verify_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
spa_load_error_t *sle = zio->io_private;
dmu_object_type_t type = BP_GET_TYPE(bp);
int error = zio->io_error;
spa_t *spa = zio->io_spa;
abd_free(zio->io_abd);
if (error) {
if ((BP_GET_LEVEL(bp) != 0 || DMU_OT_IS_METADATA(type)) &&
type != DMU_OT_INTENT_LOG)
atomic_inc_64(&sle->sle_meta_count);
else
atomic_inc_64(&sle->sle_data_count);
}
mutex_enter(&spa->spa_scrub_lock);
spa->spa_load_verify_ios--;
cv_broadcast(&spa->spa_scrub_io_cv);
mutex_exit(&spa->spa_scrub_lock);
}
/*
* Maximum number of concurrent scrub i/os to create while verifying
* a pool while importing it.
*/
int spa_load_verify_maxinflight = 10000;
int spa_load_verify_metadata = B_TRUE;
int spa_load_verify_data = B_TRUE;
/*ARGSUSED*/
static int
spa_load_verify_cb(spa_t *spa, zilog_t *zilog, const blkptr_t *bp,
const zbookmark_phys_t *zb, const dnode_phys_t *dnp, void *arg)
{
if (bp == NULL || BP_IS_HOLE(bp) || BP_IS_EMBEDDED(bp))
return (0);
/*
* Note: normally this routine will not be called if
* spa_load_verify_metadata is not set. However, it may be useful
* to manually set the flag after the traversal has begun.
*/
if (!spa_load_verify_metadata)
return (0);
if (!BP_IS_METADATA(bp) && !spa_load_verify_data)
return (0);
zio_t *rio = arg;
size_t size = BP_GET_PSIZE(bp);
mutex_enter(&spa->spa_scrub_lock);
while (spa->spa_load_verify_ios >= spa_load_verify_maxinflight)
cv_wait(&spa->spa_scrub_io_cv, &spa->spa_scrub_lock);
spa->spa_load_verify_ios++;
mutex_exit(&spa->spa_scrub_lock);
zio_nowait(zio_read(rio, spa, bp, abd_alloc_for_io(size, B_FALSE), size,
spa_load_verify_done, rio->io_private, ZIO_PRIORITY_SCRUB,
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SCRUB | ZIO_FLAG_RAW, zb));
return (0);
}
/* ARGSUSED */
int
verify_dataset_name_len(dsl_pool_t *dp, dsl_dataset_t *ds, void *arg)
{
if (dsl_dataset_namelen(ds) >= ZFS_MAX_DATASET_NAME_LEN)
return (SET_ERROR(ENAMETOOLONG));
return (0);
}
static int
spa_load_verify(spa_t *spa)
{
zio_t *rio;
spa_load_error_t sle = { 0 };
zpool_rewind_policy_t policy;
boolean_t verify_ok = B_FALSE;
int error = 0;
zpool_get_rewind_policy(spa->spa_config, &policy);
if (policy.zrp_request & ZPOOL_NEVER_REWIND)
return (0);
dsl_pool_config_enter(spa->spa_dsl_pool, FTAG);
error = dmu_objset_find_dp(spa->spa_dsl_pool,
spa->spa_dsl_pool->dp_root_dir_obj, verify_dataset_name_len, NULL,
DS_FIND_CHILDREN);
dsl_pool_config_exit(spa->spa_dsl_pool, FTAG);
if (error != 0)
return (error);
rio = zio_root(spa, NULL, &sle,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE);
if (spa_load_verify_metadata) {
error = traverse_pool(spa, spa->spa_verify_min_txg,
TRAVERSE_PRE | TRAVERSE_PREFETCH_METADATA |
TRAVERSE_NO_DECRYPT, spa_load_verify_cb, rio);
}
(void) zio_wait(rio);
spa->spa_load_meta_errors = sle.sle_meta_count;
spa->spa_load_data_errors = sle.sle_data_count;
if (!error && sle.sle_meta_count <= policy.zrp_maxmeta &&
sle.sle_data_count <= policy.zrp_maxdata) {
int64_t loss = 0;
verify_ok = B_TRUE;
spa->spa_load_txg = spa->spa_uberblock.ub_txg;
spa->spa_load_txg_ts = spa->spa_uberblock.ub_timestamp;
loss = spa->spa_last_ubsync_txg_ts - spa->spa_load_txg_ts;
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_TIME, spa->spa_load_txg_ts) == 0);
VERIFY(nvlist_add_int64(spa->spa_load_info,
ZPOOL_CONFIG_REWIND_TIME, loss) == 0);
VERIFY(nvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_LOAD_DATA_ERRORS, sle.sle_data_count) == 0);
} else {
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg;
}
if (error) {
if (error != ENXIO && error != EIO)
error = SET_ERROR(EIO);
return (error);
}
return (verify_ok ? 0 : EIO);
}
/*
* Find a value in the pool props object.
*/
static void
spa_prop_find(spa_t *spa, zpool_prop_t prop, uint64_t *val)
{
(void) zap_lookup(spa->spa_meta_objset, spa->spa_pool_props_object,
zpool_prop_to_name(prop), sizeof (uint64_t), 1, val);
}
/*
* Find a value in the pool directory object.
*/
static int
spa_dir_prop(spa_t *spa, const char *name, uint64_t *val)
{
return (zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
name, sizeof (uint64_t), 1, val));
}
static int
spa_vdev_err(vdev_t *vdev, vdev_aux_t aux, int err)
{
vdev_set_state(vdev, B_TRUE, VDEV_STATE_CANT_OPEN, aux);
- return (err);
+ return (SET_ERROR(err));
}
/*
* Fix up config after a partly-completed split. This is done with the
* ZPOOL_CONFIG_SPLIT nvlist. Both the splitting pool and the split-off
* pool have that entry in their config, but only the splitting one contains
* a list of all the guids of the vdevs that are being split off.
*
* This function determines what to do with that list: either rejoin
* all the disks to the pool, or complete the splitting process. To attempt
* the rejoin, each disk that is offlined is marked online again, and
* we do a reopen() call. If the vdev label for every disk that was
* marked online indicates it was successfully split off (VDEV_AUX_SPLIT_POOL)
* then we call vdev_split() on each disk, and complete the split.
*
* Otherwise we leave the config alone, with all the vdevs in place in
* the original pool.
*/
static void
spa_try_repair(spa_t *spa, nvlist_t *config)
{
uint_t extracted;
uint64_t *glist;
uint_t i, gcount;
nvlist_t *nvl;
vdev_t **vd;
boolean_t attempt_reopen;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT, &nvl) != 0)
return;
/* check that the config is complete */
if (nvlist_lookup_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
&glist, &gcount) != 0)
return;
vd = kmem_zalloc(gcount * sizeof (vdev_t *), KM_SLEEP);
/* attempt to online all the vdevs & validate */
attempt_reopen = B_TRUE;
for (i = 0; i < gcount; i++) {
if (glist[i] == 0) /* vdev is hole */
continue;
vd[i] = spa_lookup_by_guid(spa, glist[i], B_FALSE);
if (vd[i] == NULL) {
/*
* Don't bother attempting to reopen the disks;
* just do the split.
*/
attempt_reopen = B_FALSE;
} else {
/* attempt to re-online it */
vd[i]->vdev_offline = B_FALSE;
}
}
if (attempt_reopen) {
vdev_reopen(spa->spa_root_vdev);
/* check each device to see what state it's in */
for (extracted = 0, i = 0; i < gcount; i++) {
if (vd[i] != NULL &&
vd[i]->vdev_stat.vs_aux != VDEV_AUX_SPLIT_POOL)
break;
++extracted;
}
}
/*
* If every disk has been moved to the new pool, or if we never
* even attempted to look at them, then we split them off for
* good.
*/
if (!attempt_reopen || gcount == extracted) {
for (i = 0; i < gcount; i++)
if (vd[i] != NULL)
vdev_split(vd[i]);
vdev_reopen(spa->spa_root_vdev);
}
kmem_free(vd, gcount * sizeof (vdev_t *));
}
static int
spa_load(spa_t *spa, spa_load_state_t state, spa_import_type_t type,
boolean_t mosconfig)
{
nvlist_t *config = spa->spa_config;
char *ereport = FM_EREPORT_ZFS_POOL;
char *comment;
int error;
uint64_t pool_guid;
nvlist_t *nvl;
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &pool_guid))
return (SET_ERROR(EINVAL));
ASSERT(spa->spa_comment == NULL);
if (nvlist_lookup_string(config, ZPOOL_CONFIG_COMMENT, &comment) == 0)
spa->spa_comment = spa_strdup(comment);
/*
* Versioning wasn't explicitly added to the label until later, so if
* it's not present treat it as the initial version.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VERSION,
&spa->spa_ubsync.ub_version) != 0)
spa->spa_ubsync.ub_version = SPA_VERSION_INITIAL;
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_TXG,
&spa->spa_config_txg);
if ((state == SPA_LOAD_IMPORT || state == SPA_LOAD_TRYIMPORT) &&
spa_guid_exists(pool_guid, 0)) {
error = SET_ERROR(EEXIST);
} else {
spa->spa_config_guid = pool_guid;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_SPLIT,
&nvl) == 0) {
VERIFY(nvlist_dup(nvl, &spa->spa_config_splitting,
KM_SLEEP) == 0);
}
nvlist_free(spa->spa_load_info);
spa->spa_load_info = fnvlist_alloc();
gethrestime(&spa->spa_loaded_ts);
error = spa_load_impl(spa, pool_guid, config, state, type,
mosconfig, &ereport);
}
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = refcount_count(&spa->spa_refcount);
if (error) {
if (error != EEXIST) {
spa->spa_loaded_ts.tv_sec = 0;
spa->spa_loaded_ts.tv_nsec = 0;
}
if (error != EBADF) {
zfs_ereport_post(ereport, spa, NULL, NULL, NULL, 0, 0);
}
}
spa->spa_load_state = error ? SPA_LOAD_ERROR : SPA_LOAD_NONE;
spa->spa_ena = 0;
return (error);
}
#ifdef ZFS_DEBUG
/*
* Count the number of per-vdev ZAPs associated with all of the vdevs in the
* vdev tree rooted in the given vd, and ensure that each ZAP is present in the
* spa's per-vdev ZAP list.
*/
static uint64_t
vdev_count_verify_zaps(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
uint64_t total = 0;
if (vd->vdev_top_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_top_zap));
}
if (vd->vdev_leaf_zap != 0) {
total++;
ASSERT0(zap_lookup_int(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, vd->vdev_leaf_zap));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
total += vdev_count_verify_zaps(vd->vdev_child[i]);
}
return (total);
}
#endif
/*
* Determine whether the activity check is required.
*/
static boolean_t
spa_activity_check_required(spa_t *spa, uberblock_t *ub, nvlist_t *label,
nvlist_t *config)
{
uint64_t state = 0;
uint64_t hostid = 0;
uint64_t tryconfig_txg = 0;
uint64_t tryconfig_timestamp = 0;
nvlist_t *nvinfo;
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvinfo = fnvlist_lookup_nvlist(config, ZPOOL_CONFIG_LOAD_INFO);
(void) nvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG,
&tryconfig_txg);
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
&tryconfig_timestamp);
}
(void) nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_STATE, &state);
/*
* Disable the MMP activity check - This is used by zdb which
* is intended to be used on potentially active pools.
*/
if (spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP)
return (B_FALSE);
/*
* Skip the activity check when the MMP feature is disabled.
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay == 0)
return (B_FALSE);
/*
* If the tryconfig_* values are nonzero, they are the results of an
* earlier tryimport. If they match the uberblock we just found, then
* the pool has not changed and we return false so we do not test a
* second time.
*/
if (tryconfig_txg && tryconfig_txg == ub->ub_txg &&
tryconfig_timestamp && tryconfig_timestamp == ub->ub_timestamp)
return (B_FALSE);
/*
* Allow the activity check to be skipped when importing the pool
* on the same host which last imported it. Since the hostid from
* configuration may be stale use the one read from the label.
*/
if (nvlist_exists(label, ZPOOL_CONFIG_HOSTID))
hostid = fnvlist_lookup_uint64(label, ZPOOL_CONFIG_HOSTID);
if (hostid == spa_get_hostid())
return (B_FALSE);
/*
* Skip the activity test when the pool was cleanly exported.
*/
if (state != POOL_STATE_ACTIVE)
return (B_FALSE);
return (B_TRUE);
}
/*
* Perform the import activity check. If the user canceled the import or
* we detected activity then fail.
*/
static int
spa_activity_check(spa_t *spa, uberblock_t *ub, nvlist_t *config)
{
uint64_t import_intervals = MAX(zfs_multihost_import_intervals, 1);
uint64_t txg = ub->ub_txg;
uint64_t timestamp = ub->ub_timestamp;
uint64_t import_delay = NANOSEC;
hrtime_t import_expire;
nvlist_t *mmp_label = NULL;
vdev_t *rvd = spa->spa_root_vdev;
kcondvar_t cv;
kmutex_t mtx;
int error = 0;
cv_init(&cv, NULL, CV_DEFAULT, NULL);
mutex_init(&mtx, NULL, MUTEX_DEFAULT, NULL);
mutex_enter(&mtx);
/*
* If ZPOOL_CONFIG_MMP_TXG is present an activity check was performed
* during the earlier tryimport. If the txg recorded there is 0 then
* the pool is known to be active on another host.
*
* Otherwise, the pool might be in use on another node. Check for
* changes in the uberblocks on disk if necessary.
*/
if (nvlist_exists(config, ZPOOL_CONFIG_LOAD_INFO)) {
nvlist_t *nvinfo = fnvlist_lookup_nvlist(config,
ZPOOL_CONFIG_LOAD_INFO);
if (nvlist_exists(nvinfo, ZPOOL_CONFIG_MMP_TXG) &&
fnvlist_lookup_uint64(nvinfo, ZPOOL_CONFIG_MMP_TXG) == 0) {
vdev_uberblock_load(rvd, ub, &mmp_label);
error = SET_ERROR(EREMOTEIO);
goto out;
}
}
/*
* Preferentially use the zfs_multihost_interval from the node which
* last imported the pool. This value is stored in an MMP uberblock as.
*
* ub_mmp_delay * vdev_count_leaves() == zfs_multihost_interval
*/
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay)
import_delay = MAX(import_delay, import_intervals *
ub->ub_mmp_delay * MAX(vdev_count_leaves(spa), 1));
/* Apply a floor using the local default values. */
import_delay = MAX(import_delay, import_intervals *
MSEC2NSEC(MAX(zfs_multihost_interval, MMP_MIN_INTERVAL)));
zfs_dbgmsg("import_delay=%llu ub_mmp_delay=%llu import_intervals=%u "
"leaves=%u", import_delay, ub->ub_mmp_delay, import_intervals,
vdev_count_leaves(spa));
/* Add a small random factor in case of simultaneous imports (0-25%) */
import_expire = gethrtime() + import_delay +
(import_delay * spa_get_random(250) / 1000);
while (gethrtime() < import_expire) {
vdev_uberblock_load(rvd, ub, &mmp_label);
if (txg != ub->ub_txg || timestamp != ub->ub_timestamp) {
error = SET_ERROR(EREMOTEIO);
break;
}
if (mmp_label) {
nvlist_free(mmp_label);
mmp_label = NULL;
}
error = cv_timedwait_sig(&cv, &mtx, ddi_get_lbolt() + hz);
if (error != -1) {
error = SET_ERROR(EINTR);
break;
}
error = 0;
}
out:
mutex_exit(&mtx);
mutex_destroy(&mtx);
cv_destroy(&cv);
/*
* If the pool is determined to be active store the status in the
* spa->spa_load_info nvlist. If the remote hostname or hostid are
* available from configuration read from disk store them as well.
* This allows 'zpool import' to generate a more useful message.
*
* ZPOOL_CONFIG_MMP_STATE - observed pool status (mandatory)
* ZPOOL_CONFIG_MMP_HOSTNAME - hostname from the active pool
* ZPOOL_CONFIG_MMP_HOSTID - hostid from the active pool
*/
if (error == EREMOTEIO) {
char *hostname = "<unknown>";
uint64_t hostid = 0;
if (mmp_label) {
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTNAME)) {
hostname = fnvlist_lookup_string(mmp_label,
ZPOOL_CONFIG_HOSTNAME);
fnvlist_add_string(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTNAME, hostname);
}
if (nvlist_exists(mmp_label, ZPOOL_CONFIG_HOSTID)) {
hostid = fnvlist_lookup_uint64(mmp_label,
ZPOOL_CONFIG_HOSTID);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_HOSTID, hostid);
}
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_ACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, 0);
error = spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO);
}
if (mmp_label)
nvlist_free(mmp_label);
return (error);
}
/*
* Load an existing storage pool, using the pool's builtin spa_config as a
* source of configuration information.
*/
__attribute__((always_inline))
static inline int
spa_load_impl(spa_t *spa, uint64_t pool_guid, nvlist_t *config,
- spa_load_state_t state, spa_import_type_t type, boolean_t mosconfig,
+ spa_load_state_t state, spa_import_type_t type, boolean_t trust_config,
char **ereport)
{
int error = 0;
nvlist_t *nvroot = NULL;
nvlist_t *label;
vdev_t *rvd;
uberblock_t *ub = &spa->spa_uberblock;
uint64_t children, config_cache_txg = spa->spa_config_txg;
int orig_mode = spa->spa_mode;
int parse;
uint64_t obj;
boolean_t missing_feat_write = B_FALSE;
boolean_t activity_check = B_FALSE;
/*
* If this is an untrusted config, access the pool in read-only mode.
* This prevents things like resilvering recently removed devices.
*/
- if (!mosconfig)
+ if (!trust_config)
spa->spa_mode = FREAD;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa->spa_load_state = state;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvroot))
return (SET_ERROR(EINVAL));
parse = (type == SPA_IMPORT_EXISTING ?
VDEV_ALLOC_LOAD : VDEV_ALLOC_SPLIT);
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Parse the configuration into a vdev tree. We explicitly set the
* value that will be returned by spa_version() since parsing the
* configuration requires knowing the version number.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, parse);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0)
return (error);
ASSERT(spa->spa_root_vdev == rvd);
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
ASSERT3U(spa->spa_max_ashift, <=, SPA_MAXBLOCKSHIFT);
if (type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_guid(spa) == pool_guid);
}
/*
* Try to open all vdevs, loading each label in the process.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = vdev_open(rvd);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0)
return (error);
/*
* We need to validate the vdev labels against the configuration that
* we have in hand, which is dependent on the setting of mosconfig. If
* mosconfig is true then we're validating the vdev labels based on
* that config. Otherwise, we're validating against the cached config
* (zpool.cache) that was read when we loaded the zfs module, and then
* later we will recursively call spa_load() and validate against
* the vdev config.
*
* If we're assembling a new pool that's been split off from an
* existing pool, the labels haven't yet been updated so we skip
* validation for now.
*/
if (type != SPA_IMPORT_ASSEMBLE) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
- error = vdev_validate(rvd, mosconfig);
+ error = vdev_validate(rvd, trust_config);
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0)
return (error);
if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
return (SET_ERROR(ENXIO));
}
/*
* Find the best uberblock.
*/
vdev_uberblock_load(rvd, ub, &label);
/*
* If we weren't able to find a single valid uberblock, return failure.
*/
if (ub->ub_txg == 0) {
nvlist_free(label);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, ENXIO));
}
/*
* For pools which have the multihost property on determine if the
* pool is truly inactive and can be safely imported. Prevent
* hosts which don't have a hostid set from importing the pool.
*/
activity_check = spa_activity_check_required(spa, ub, label, config);
if (activity_check) {
if (ub->ub_mmp_magic == MMP_MAGIC && ub->ub_mmp_delay &&
spa_get_hostid() == 0) {
nvlist_free(label);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
error = spa_activity_check(spa, ub, config);
if (error) {
nvlist_free(label);
return (error);
}
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_INACTIVE);
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_TXG, ub->ub_txg);
}
/*
* If the pool has an unsupported version we can't open it.
*/
if (!SPA_VERSION_IS_SUPPORTED(ub->ub_version)) {
nvlist_free(label);
return (spa_vdev_err(rvd, VDEV_AUX_VERSION_NEWER, ENOTSUP));
}
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *features;
/*
* If we weren't able to find what's necessary for reading the
* MOS in the label, return failure.
*/
if (label == NULL || nvlist_lookup_nvlist(label,
ZPOOL_CONFIG_FEATURES_FOR_READ, &features) != 0) {
nvlist_free(label);
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
ENXIO));
}
/*
* Update our in-core representation with the definitive values
* from the label.
*/
nvlist_free(spa->spa_label_features);
VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0);
}
nvlist_free(label);
/*
* Look through entries in the label nvlist's features_for_read. If
* there is a feature listed there which we don't understand then we
* cannot open a pool.
*/
if (ub->ub_version >= SPA_VERSION_FEATURES) {
nvlist_t *unsup_feat;
VERIFY(nvlist_alloc(&unsup_feat, NV_UNIQUE_NAME, KM_SLEEP) ==
0);
for (nvpair_t *nvp = nvlist_next_nvpair(spa->spa_label_features,
NULL); nvp != NULL;
nvp = nvlist_next_nvpair(spa->spa_label_features, nvp)) {
if (!zfeature_is_supported(nvpair_name(nvp))) {
VERIFY(nvlist_add_string(unsup_feat,
nvpair_name(nvp), "") == 0);
}
}
if (!nvlist_empty(unsup_feat)) {
VERIFY(nvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat) == 0);
nvlist_free(unsup_feat);
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
nvlist_free(unsup_feat);
}
/*
* If the vdev guid sum doesn't match the uberblock, we have an
* incomplete configuration. We first check to see if the pool
* is aware of the complete config (i.e ZPOOL_CONFIG_VDEV_CHILDREN).
* If it is, defer the vdev_guid_sum check till later so we
* can handle missing vdevs.
*/
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
- &children) != 0 && mosconfig && type != SPA_IMPORT_ASSEMBLE &&
+ &children) != 0 && trust_config && type != SPA_IMPORT_ASSEMBLE &&
rvd->vdev_guid_sum != ub->ub_guid_sum)
return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM, ENXIO));
if (type != SPA_IMPORT_ASSEMBLE && spa->spa_config_splitting) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_try_repair(spa, config);
spa_config_exit(spa, SCL_ALL, FTAG);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
}
/*
* Initialize internal SPA structures.
*/
spa->spa_state = POOL_STATE_ACTIVE;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_verify_min_txg = spa->spa_extreme_rewind ?
TXG_INITIAL - 1 : spa_last_synced_txg(spa) - TXG_DEFER_SIZE - 1;
spa->spa_first_txg = spa->spa_last_ubsync_txg ?
spa->spa_last_ubsync_txg : spa_last_synced_txg(spa) + 1;
spa->spa_claim_max_txg = spa->spa_first_txg;
spa->spa_prev_software_version = ub->ub_software_version;
+ /*
+ * Everything that we read before we do spa_remove_init() must
+ * have been rewritten after the last device removal was initiated.
+ * Otherwise we could be reading from indirect vdevs before
+ * we have loaded their mappings.
+ */
+
error = dsl_pool_init(spa, spa->spa_first_txg, &spa->spa_dsl_pool);
if (error)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
spa->spa_meta_objset = spa->spa_dsl_pool->dp_meta_objset;
if (spa_dir_prop(spa, DMU_POOL_CONFIG, &spa->spa_config_object) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+ /*
+ * Validate the config, using the MOS config to fill in any
+ * information which might be missing. If we fail to validate
+ * the config then declare the pool unfit for use. If we're
+ * assembling a pool from a split, the log is not transferred
+ * over.
+ */
+ if (type != SPA_IMPORT_ASSEMBLE) {
+ nvlist_t *mos_config;
+ if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+
+ if (!spa_config_valid(spa, mos_config)) {
+ nvlist_free(mos_config);
+ return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
+ ENXIO));
+ }
+ nvlist_free(mos_config);
+
+ /*
+ * Now that we've validated the config, check the state of the
+ * root vdev. If it can't be opened, it indicates one or
+ * more toplevel vdevs are faulted.
+ */
+ if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
+ return (SET_ERROR(ENXIO));
+ }
+
+ /*
+ * Everything that we read before spa_remove_init() must be stored
+ * on concreted vdevs. Therefore we do this as early as possible.
+ */
+ if (spa_remove_init(spa) != 0)
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
+
if (spa_version(spa) >= SPA_VERSION_FEATURES) {
boolean_t missing_feat_read = B_FALSE;
nvlist_t *unsup_feat, *enabled_feat;
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_READ,
&spa->spa_feat_for_read_obj) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURES_FOR_WRITE,
&spa->spa_feat_for_write_obj) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_FEATURE_DESCRIPTIONS,
&spa->spa_feat_desc_obj) != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
enabled_feat = fnvlist_alloc();
unsup_feat = fnvlist_alloc();
if (!spa_features_check(spa, B_FALSE,
unsup_feat, enabled_feat))
missing_feat_read = B_TRUE;
if (spa_writeable(spa) || state == SPA_LOAD_TRYIMPORT) {
if (!spa_features_check(spa, B_TRUE,
unsup_feat, enabled_feat)) {
missing_feat_write = B_TRUE;
}
}
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_ENABLED_FEAT, enabled_feat);
if (!nvlist_empty(unsup_feat)) {
fnvlist_add_nvlist(spa->spa_load_info,
ZPOOL_CONFIG_UNSUP_FEAT, unsup_feat);
}
fnvlist_free(enabled_feat);
fnvlist_free(unsup_feat);
if (!missing_feat_read) {
fnvlist_add_boolean(spa->spa_load_info,
ZPOOL_CONFIG_CAN_RDONLY);
}
/*
* If the state is SPA_LOAD_TRYIMPORT, our objective is
* twofold: to determine whether the pool is available for
* import in read-write mode and (if it is not) whether the
* pool is available for import in read-only mode. If the pool
* is available for import in read-write mode, it is displayed
* as available in userland; if it is not available for import
* in read-only mode, it is displayed as unavailable in
* userland. If the pool is available for import in read-only
* mode but not read-write mode, it is displayed as unavailable
* in userland with a special note that the pool is actually
* available for open in read-only mode.
*
* As a result, if the state is SPA_LOAD_TRYIMPORT and we are
* missing a feature for write, we must first determine whether
* the pool can be opened read-only before returning to
* userland in order to know whether to display the
* abovementioned note.
*/
if (missing_feat_read || (missing_feat_write &&
spa_writeable(spa))) {
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT,
ENOTSUP));
}
/*
* Load refcounts for ZFS features from disk into an in-memory
* cache during SPA initialization.
*/
for (spa_feature_t i = 0; i < SPA_FEATURES; i++) {
uint64_t refcount;
error = feature_get_refcount_from_disk(spa,
&spa_feature_table[i], &refcount);
if (error == 0) {
spa->spa_feat_refcount_cache[i] = refcount;
} else if (error == ENOTSUP) {
spa->spa_feat_refcount_cache[i] =
SPA_FEATURE_DISABLED;
} else {
return (spa_vdev_err(rvd,
VDEV_AUX_CORRUPT_DATA, EIO));
}
}
}
if (spa_feature_is_active(spa, SPA_FEATURE_ENABLED_TXG)) {
if (spa_dir_prop(spa, DMU_POOL_FEATURE_ENABLED_TXG,
&spa->spa_feat_enabled_txg_obj) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
spa->spa_is_initializing = B_TRUE;
error = dsl_pool_open(spa->spa_dsl_pool);
spa->spa_is_initializing = B_FALSE;
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
- if (!mosconfig) {
+ if (!trust_config) {
uint64_t hostid;
- nvlist_t *policy = NULL, *nvconfig;
+ nvlist_t *policy = NULL;
+ nvlist_t *mos_config;
- if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
+ if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
- if (!spa_is_root(spa) && nvlist_lookup_uint64(nvconfig,
+ if (!spa_is_root(spa) && nvlist_lookup_uint64(mos_config,
ZPOOL_CONFIG_HOSTID, &hostid) == 0) {
char *hostname;
unsigned long myhostid = 0;
- VERIFY(nvlist_lookup_string(nvconfig,
+ VERIFY(nvlist_lookup_string(mos_config,
ZPOOL_CONFIG_HOSTNAME, &hostname) == 0);
myhostid = spa_get_hostid();
if (hostid && myhostid && hostid != myhostid) {
- nvlist_free(nvconfig);
+ nvlist_free(mos_config);
return (SET_ERROR(EBADF));
}
}
if (nvlist_lookup_nvlist(spa->spa_config,
ZPOOL_REWIND_POLICY, &policy) == 0)
- VERIFY(nvlist_add_nvlist(nvconfig,
+ VERIFY(nvlist_add_nvlist(mos_config,
ZPOOL_REWIND_POLICY, policy) == 0);
- spa_config_set(spa, nvconfig);
+ spa_config_set(spa, mos_config);
spa_unload(spa);
spa_deactivate(spa);
spa_activate(spa, orig_mode);
return (spa_load(spa, state, SPA_IMPORT_EXISTING, B_TRUE));
}
/* Grab the checksum salt from the MOS. */
error = zap_lookup(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes);
if (error == ENOENT) {
/* Generate a new salt for subsequent use */
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
} else if (error != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
}
if (spa_dir_prop(spa, DMU_POOL_SYNC_BPOBJ, &obj) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = bpobj_open(&spa->spa_deferred_bpobj, spa->spa_meta_objset, obj);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the bit that tells us to use the new accounting function
* (raid-z deflation). If we have an older pool, this will not
* be present.
*/
error = spa_dir_prop(spa, DMU_POOL_DEFLATE, &spa->spa_deflate);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_CREATION_VERSION,
&spa->spa_creation_version);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the persistent error log. If we have an older pool, this will
* not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_LAST, &spa->spa_errlog_last);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_ERRLOG_SCRUB,
&spa->spa_errlog_scrub);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the history object. If we have an older pool, this
* will not be present.
*/
error = spa_dir_prop(spa, DMU_POOL_HISTORY, &spa->spa_history);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
/*
* Load the per-vdev ZAP map. If we have an older pool, this will not
* be present; in this case, defer its creation to a later time to
* avoid dirtying the MOS this early / out of sync context. See
* spa_sync_config_object.
*/
/* The sentinel is only available in the MOS config. */
nvlist_t *mos_config;
if (load_nvlist(spa, spa->spa_config_object, &mos_config) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
error = spa_dir_prop(spa, DMU_POOL_VDEV_ZAP_MAP,
&spa->spa_all_vdev_zaps);
if (error == ENOENT) {
VERIFY(!nvlist_exists(mos_config,
ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
spa->spa_avz_action = AVZ_ACTION_INITIALIZE;
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
} else if (error != 0) {
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
} else if (!nvlist_exists(mos_config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS)) {
/*
* An older version of ZFS overwrote the sentinel value, so
* we have orphaned per-vdev ZAPs in the MOS. Defer their
* destruction to later; see spa_sync_config_object.
*/
spa->spa_avz_action = AVZ_ACTION_DESTROY;
/*
* We're assuming that no vdevs have had their ZAPs created
* before this. Better be sure of it.
*/
ASSERT0(vdev_count_verify_zaps(spa->spa_root_vdev));
}
nvlist_free(mos_config);
/*
* If we're assembling the pool from the split-off vdevs of
* an existing pool, we don't want to attach the spares & cache
* devices.
*/
/*
* Load any hot spares for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_SPARES, &spa->spa_spares.sav_object);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_SPARES);
if (load_nvlist(spa, spa->spa_spares.sav_object,
&spa->spa_spares.sav_config) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Load any level 2 ARC devices for this pool.
*/
error = spa_dir_prop(spa, DMU_POOL_L2CACHE,
&spa->spa_l2cache.sav_object);
if (error != 0 && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0 && type != SPA_IMPORT_ASSEMBLE) {
ASSERT(spa_version(spa) >= SPA_VERSION_L2CACHE);
if (load_nvlist(spa, spa->spa_l2cache.sav_object,
&spa->spa_l2cache.sav_config) != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
} else if (error == 0) {
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
error = spa_dir_prop(spa, DMU_POOL_PROPS, &spa->spa_pool_props_object);
if (error && error != ENOENT)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
if (error == 0) {
uint64_t autoreplace = 0;
spa_prop_find(spa, ZPOOL_PROP_BOOTFS, &spa->spa_bootfs);
spa_prop_find(spa, ZPOOL_PROP_AUTOREPLACE, &autoreplace);
spa_prop_find(spa, ZPOOL_PROP_DELEGATION, &spa->spa_delegation);
spa_prop_find(spa, ZPOOL_PROP_FAILUREMODE, &spa->spa_failmode);
spa_prop_find(spa, ZPOOL_PROP_AUTOEXPAND, &spa->spa_autoexpand);
spa_prop_find(spa, ZPOOL_PROP_MULTIHOST, &spa->spa_multihost);
spa_prop_find(spa, ZPOOL_PROP_DEDUPDITTO,
&spa->spa_dedup_ditto);
spa->spa_autoreplace = (autoreplace != 0);
}
/*
* If the 'multihost' property is set, then never allow a pool to
* be imported when the system hostid is zero. The exception to
* this rule is zdb which is always allowed to access pools.
*/
if (spa_multihost(spa) && spa_get_hostid() == 0 &&
(spa->spa_import_flags & ZFS_IMPORT_SKIP_MMP) == 0) {
fnvlist_add_uint64(spa->spa_load_info,
ZPOOL_CONFIG_MMP_STATE, MMP_STATE_NO_HOSTID);
return (spa_vdev_err(rvd, VDEV_AUX_ACTIVE, EREMOTEIO));
}
/*
* If the 'autoreplace' property is set, then post a resource notifying
* the ZFS DE that it should not issue any faults for unopenable
* devices. We also iterate over the vdevs, and post a sysevent for any
* unopenable vdevs so that the normal autoreplace handler can take
* over.
*/
if (spa->spa_autoreplace && state != SPA_LOAD_TRYIMPORT) {
spa_check_removed(spa->spa_root_vdev);
/*
* For the import case, this is done in spa_import(), because
* at this point we're using the spare definitions from
* the MOS config, not necessarily from the userland config.
*/
if (state != SPA_LOAD_IMPORT) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
}
/*
* Load the vdev state for all toplevel vdevs.
*/
- vdev_load(rvd);
+ error = vdev_load(rvd);
+ if (error != 0) {
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
+ }
+
+ error = spa_condense_init(spa);
+ if (error != 0) {
+ return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, error));
+ }
/*
* Propagate the leaf DTLs we just loaded all the way up the tree.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
vdev_dtl_reassess(rvd, 0, 0, B_FALSE);
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Load the DDTs (dedup tables).
*/
error = ddt_load(spa);
if (error != 0)
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
spa_update_dspace(spa);
- /*
- * Validate the config, using the MOS config to fill in any
- * information which might be missing. If we fail to validate
- * the config then declare the pool unfit for use. If we're
- * assembling a pool from a split, the log is not transferred
- * over.
- */
- if (type != SPA_IMPORT_ASSEMBLE) {
- nvlist_t *nvconfig;
-
- if (load_nvlist(spa, spa->spa_config_object, &nvconfig) != 0)
- return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA, EIO));
-
- if (!spa_config_valid(spa, nvconfig)) {
- nvlist_free(nvconfig);
- return (spa_vdev_err(rvd, VDEV_AUX_BAD_GUID_SUM,
- ENXIO));
- }
- nvlist_free(nvconfig);
-
- /*
- * Now that we've validated the config, check the state of the
- * root vdev. If it can't be opened, it indicates one or
- * more toplevel vdevs are faulted.
- */
- if (rvd->vdev_state <= VDEV_STATE_CANT_OPEN)
- return (SET_ERROR(ENXIO));
-
- if (spa_writeable(spa) && spa_check_logs(spa)) {
- *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
- return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
- }
+ if (type != SPA_IMPORT_ASSEMBLE && spa_writeable(spa) &&
+ spa_check_logs(spa)) {
+ *ereport = FM_EREPORT_ZFS_LOG_REPLAY;
+ return (spa_vdev_err(rvd, VDEV_AUX_BAD_LOG, ENXIO));
}
if (missing_feat_write) {
ASSERT(state == SPA_LOAD_TRYIMPORT);
/*
* At this point, we know that we can open the pool in
* read-only mode but not read-write mode. We now have enough
* information and can return to userland.
*/
return (spa_vdev_err(rvd, VDEV_AUX_UNSUP_FEAT, ENOTSUP));
}
/*
* We've successfully opened the pool, verify that we're ready
* to start pushing transactions.
*/
if (state != SPA_LOAD_TRYIMPORT) {
if ((error = spa_load_verify(spa)))
return (spa_vdev_err(rvd, VDEV_AUX_CORRUPT_DATA,
error));
}
if (spa_writeable(spa) && (state == SPA_LOAD_RECOVER ||
spa->spa_load_max_txg == UINT64_MAX)) {
dmu_tx_t *tx;
int need_update = B_FALSE;
dsl_pool_t *dp = spa_get_dsl(spa);
+ /*
+ * We must check this before we start the sync thread, because
+ * we only want to start a condense thread for condense
+ * operations that were in progress when the pool was
+ * imported. Once we start syncing, spa_sync() could
+ * initiate a condense (and start a thread for it). In
+ * that case it would be wrong to start a second
+ * condense thread.
+ */
+ boolean_t condense_in_progress =
+ (spa->spa_condensing_indirect != NULL);
+
ASSERT(state != SPA_LOAD_TRYIMPORT);
/*
* Claim log blocks that haven't been committed yet.
* This must all happen in a single txg.
* Note: spa_claim_max_txg is updated by spa_claim_notify(),
* invoked from zil_claim_log_block()'s i/o done callback.
* Price of rollback is that we abandon the log.
*/
spa->spa_claiming = B_TRUE;
tx = dmu_tx_create_assigned(dp, spa_first_txg(spa));
(void) dmu_objset_find_dp(dp, dp->dp_root_dir_obj,
zil_claim, tx, DS_FIND_CHILDREN);
dmu_tx_commit(tx);
spa->spa_claiming = B_FALSE;
spa_set_log_state(spa, SPA_LOG_GOOD);
spa->spa_sync_on = B_TRUE;
txg_sync_start(spa->spa_dsl_pool);
mmp_thread_start(spa);
/*
* Wait for all claims to sync. We sync up to the highest
* claimed log block birth time so that claimed log blocks
* don't appear to be from the future. spa_claim_max_txg
* will have been set for us by either zil_check_log_chain()
* (invoked from spa_check_logs()) or zil_claim() above.
*/
txg_wait_synced(spa->spa_dsl_pool, spa->spa_claim_max_txg);
/*
* If the config cache is stale, or we have uninitialized
* metaslabs (see spa_vdev_add()), then update the config.
*
* If this is a verbatim import, trust the current
* in-core spa_config and update the disk labels.
*/
if (config_cache_txg != spa->spa_config_txg ||
state == SPA_LOAD_IMPORT ||
state == SPA_LOAD_RECOVER ||
(spa->spa_import_flags & ZFS_IMPORT_VERBATIM))
need_update = B_TRUE;
for (int c = 0; c < rvd->vdev_children; c++)
if (rvd->vdev_child[c]->vdev_ms_array == 0)
need_update = B_TRUE;
/*
* Update the config cache asychronously in case we're the
* root pool, in which case the config cache isn't writable yet.
*/
if (need_update)
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
/*
* Check all DTLs to see if anything needs resilvering.
*/
if (!dsl_scan_resilvering(spa->spa_dsl_pool) &&
vdev_resilver_needed(rvd, NULL, NULL))
spa_async_request(spa, SPA_ASYNC_RESILVER);
/*
* Log the fact that we booted up (so that we can detect if
* we rebooted in the middle of an operation).
*/
spa_history_log_version(spa, "open", NULL);
/*
* Delete any inconsistent datasets.
*/
(void) dmu_objset_find(spa_name(spa),
dsl_destroy_inconsistent, NULL, DS_FIND_CHILDREN);
/*
* Clean up any stale temporary dataset userrefs.
*/
dsl_pool_clean_tmp_userrefs(spa->spa_dsl_pool);
+
+ /*
+ * Note: unlike condensing, we don't need an analogous
+ * "removal_in_progress" dance because no other thread
+ * can start a removal while we hold the spa_namespace_lock.
+ */
+ spa_restart_removal(spa);
+
+ if (condense_in_progress)
+ spa_condense_indirect_restart(spa);
}
return (0);
}
static int
spa_load_retry(spa_t *spa, spa_load_state_t state, int mosconfig)
{
int mode = spa->spa_mode;
spa_unload(spa);
spa_deactivate(spa);
spa->spa_load_max_txg = spa->spa_uberblock.ub_txg - 1;
spa_activate(spa, mode);
spa_async_suspend(spa);
return (spa_load(spa, state, SPA_IMPORT_EXISTING, mosconfig));
}
/*
* If spa_load() fails this function will try loading prior txg's. If
* 'state' is SPA_LOAD_RECOVER and one of these loads succeeds the pool
* will be rewound to that txg. If 'state' is not SPA_LOAD_RECOVER this
* function will not rewind the pool and will return the same error as
* spa_load().
*/
static int
spa_load_best(spa_t *spa, spa_load_state_t state, int mosconfig,
uint64_t max_request, int rewind_flags)
{
nvlist_t *loadinfo = NULL;
nvlist_t *config = NULL;
int load_error, rewind_error;
uint64_t safe_rewind_txg;
uint64_t min_txg;
if (spa->spa_load_txg && state == SPA_LOAD_RECOVER) {
spa->spa_load_max_txg = spa->spa_load_txg;
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
spa->spa_load_max_txg = max_request;
if (max_request != UINT64_MAX)
spa->spa_extreme_rewind = B_TRUE;
}
load_error = rewind_error = spa_load(spa, state, SPA_IMPORT_EXISTING,
mosconfig);
if (load_error == 0)
return (0);
if (spa->spa_root_vdev != NULL)
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
spa->spa_last_ubsync_txg = spa->spa_uberblock.ub_txg;
spa->spa_last_ubsync_txg_ts = spa->spa_uberblock.ub_timestamp;
if (rewind_flags & ZPOOL_NEVER_REWIND) {
nvlist_free(config);
return (load_error);
}
if (state == SPA_LOAD_RECOVER) {
/* Price of rolling back is discarding txgs, including log */
spa_set_log_state(spa, SPA_LOG_CLEAR);
} else {
/*
* If we aren't rolling back save the load info from our first
* import attempt so that we can restore it after attempting
* to rewind.
*/
loadinfo = spa->spa_load_info;
spa->spa_load_info = fnvlist_alloc();
}
spa->spa_load_max_txg = spa->spa_last_ubsync_txg;
safe_rewind_txg = spa->spa_last_ubsync_txg - TXG_DEFER_SIZE;
min_txg = (rewind_flags & ZPOOL_EXTREME_REWIND) ?
TXG_INITIAL : safe_rewind_txg;
/*
* Continue as long as we're finding errors, we're still within
* the acceptable rewind range, and we're still finding uberblocks
*/
while (rewind_error && spa->spa_uberblock.ub_txg >= min_txg &&
spa->spa_uberblock.ub_txg <= spa->spa_load_max_txg) {
if (spa->spa_load_max_txg < safe_rewind_txg)
spa->spa_extreme_rewind = B_TRUE;
rewind_error = spa_load_retry(spa, state, mosconfig);
}
spa->spa_extreme_rewind = B_FALSE;
spa->spa_load_max_txg = UINT64_MAX;
if (config && (rewind_error || state != SPA_LOAD_RECOVER))
spa_config_set(spa, config);
else
nvlist_free(config);
if (state == SPA_LOAD_RECOVER) {
ASSERT3P(loadinfo, ==, NULL);
return (rewind_error);
} else {
/* Store the rewind info as part of the initial load info */
fnvlist_add_nvlist(loadinfo, ZPOOL_CONFIG_REWIND_INFO,
spa->spa_load_info);
/* Restore the initial load info */
fnvlist_free(spa->spa_load_info);
spa->spa_load_info = loadinfo;
return (load_error);
}
}
/*
* Pool Open/Import
*
* The import case is identical to an open except that the configuration is sent
* down from userland, instead of grabbed from the configuration cache. For the
* case of an open, the pool configuration will exist in the
* POOL_STATE_UNINITIALIZED state.
*
* The stats information (gen/count/ustats) is used to gather vdev statistics at
* the same time open the pool, without having to keep around the spa_t in some
* ambiguous state.
*/
static int
spa_open_common(const char *pool, spa_t **spapp, void *tag, nvlist_t *nvpolicy,
nvlist_t **config)
{
spa_t *spa;
spa_load_state_t state = SPA_LOAD_OPEN;
int error;
int locked = B_FALSE;
int firstopen = B_FALSE;
*spapp = NULL;
/*
* As disgusting as this is, we need to support recursive calls to this
* function because dsl_dir_open() is called during spa_load(), and ends
* up calling spa_open() again. The real fix is to figure out how to
* avoid dsl_dir_open() calling this in the first place.
*/
if (MUTEX_NOT_HELD(&spa_namespace_lock)) {
mutex_enter(&spa_namespace_lock);
locked = B_TRUE;
}
if ((spa = spa_lookup(pool)) == NULL) {
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (spa->spa_state == POOL_STATE_UNINITIALIZED) {
zpool_rewind_policy_t policy;
firstopen = B_TRUE;
zpool_get_rewind_policy(nvpolicy ? nvpolicy : spa->spa_config,
&policy);
if (policy.zrp_request & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
spa_activate(spa, spa_mode_global);
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
error = spa_load_best(spa, state, B_FALSE, policy.zrp_txg,
policy.zrp_request);
if (error == EBADF) {
/*
* If vdev_validate() returns failure (indicated by
* EBADF), it indicates that one of the vdevs indicates
* that the pool has been exported or destroyed. If
* this is the case, the config cache is out of sync and
* we should remove the pool from the namespace.
*/
spa_unload(spa);
spa_deactivate(spa);
- spa_config_sync(spa, B_TRUE, B_TRUE);
+ spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
if (locked)
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
if (error) {
/*
* We can't open the pool, but we still have useful
* information: the state of each vdev after the
* attempted vdev_open(). Return this to the user.
*/
if (config != NULL && spa->spa_config) {
VERIFY(nvlist_dup(spa->spa_config, config,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist(*config,
ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
spa_unload(spa);
spa_deactivate(spa);
spa->spa_last_open_failed = error;
if (locked)
mutex_exit(&spa_namespace_lock);
*spapp = NULL;
return (error);
}
}
spa_open_ref(spa, tag);
if (config != NULL)
*config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
/*
* If we've recovered the pool, pass back any information we
* gathered while doing the load.
*/
if (state == SPA_LOAD_RECOVER) {
VERIFY(nvlist_add_nvlist(*config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
}
if (locked) {
spa->spa_last_open_failed = 0;
spa->spa_last_ubsync_txg = 0;
spa->spa_load_txg = 0;
mutex_exit(&spa_namespace_lock);
}
if (firstopen)
zvol_create_minors(spa, spa_name(spa), B_TRUE);
*spapp = spa;
return (0);
}
int
spa_open_rewind(const char *name, spa_t **spapp, void *tag, nvlist_t *policy,
nvlist_t **config)
{
return (spa_open_common(name, spapp, tag, policy, config));
}
int
spa_open(const char *name, spa_t **spapp, void *tag)
{
return (spa_open_common(name, spapp, tag, NULL, NULL));
}
/*
* Lookup the given spa_t, incrementing the inject count in the process,
* preventing it from being exported or destroyed.
*/
spa_t *
spa_inject_addref(char *name)
{
spa_t *spa;
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(name)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (NULL);
}
spa->spa_inject_ref++;
mutex_exit(&spa_namespace_lock);
return (spa);
}
void
spa_inject_delref(spa_t *spa)
{
mutex_enter(&spa_namespace_lock);
spa->spa_inject_ref--;
mutex_exit(&spa_namespace_lock);
}
/*
* Add spares device information to the nvlist.
*/
static void
spa_add_spares(spa_t *spa, nvlist_t *config)
{
nvlist_t **spares;
uint_t i, nspares;
nvlist_t *nvroot;
uint64_t guid;
vdev_stat_t *vs;
uint_t vsc;
uint64_t pool;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_spares.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
if (nspares != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0);
/*
* Go through and find any spares which have since been
* repurposed as an active spare. If this is the case, update
* their status appropriately.
*/
for (i = 0; i < nspares; i++) {
VERIFY(nvlist_lookup_uint64(spares[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
if (spa_spare_exists(guid, &pool, NULL) &&
pool != 0ULL) {
VERIFY(nvlist_lookup_uint64_array(
spares[i], ZPOOL_CONFIG_VDEV_STATS,
(uint64_t **)&vs, &vsc) == 0);
vs->vs_state = VDEV_STATE_CANT_OPEN;
vs->vs_aux = VDEV_AUX_SPARED;
}
}
}
}
/*
* Add l2cache device information to the nvlist, including vdev stats.
*/
static void
spa_add_l2cache(spa_t *spa, nvlist_t *config)
{
nvlist_t **l2cache;
uint_t i, j, nl2cache;
nvlist_t *nvroot;
uint64_t guid;
vdev_t *vd;
vdev_stat_t *vs;
uint_t vsc;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
if (spa->spa_l2cache.sav_count == 0)
return;
VERIFY(nvlist_lookup_nvlist(config,
ZPOOL_CONFIG_VDEV_TREE, &nvroot) == 0);
VERIFY(nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
if (nl2cache != 0) {
VERIFY(nvlist_add_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
VERIFY(nvlist_lookup_nvlist_array(nvroot,
ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0);
/*
* Update level 2 cache device stats.
*/
for (i = 0; i < nl2cache; i++) {
VERIFY(nvlist_lookup_uint64(l2cache[i],
ZPOOL_CONFIG_GUID, &guid) == 0);
vd = NULL;
for (j = 0; j < spa->spa_l2cache.sav_count; j++) {
if (guid ==
spa->spa_l2cache.sav_vdevs[j]->vdev_guid) {
vd = spa->spa_l2cache.sav_vdevs[j];
break;
}
}
ASSERT(vd != NULL);
VERIFY(nvlist_lookup_uint64_array(l2cache[i],
ZPOOL_CONFIG_VDEV_STATS, (uint64_t **)&vs, &vsc)
== 0);
vdev_get_stats(vd, vs);
vdev_config_generate_stats(vd, l2cache[i]);
}
}
}
static void
spa_feature_stats_from_disk(spa_t *spa, nvlist_t *features)
{
zap_cursor_t zc;
zap_attribute_t za;
if (spa->spa_feat_for_read_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_read_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
if (spa->spa_feat_for_write_obj != 0) {
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_feat_for_write_obj);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
ASSERT(za.za_integer_length == sizeof (uint64_t) &&
za.za_num_integers == 1);
VERIFY0(nvlist_add_uint64(features, za.za_name,
za.za_first_integer));
}
zap_cursor_fini(&zc);
}
}
static void
spa_feature_stats_from_cache(spa_t *spa, nvlist_t *features)
{
int i;
for (i = 0; i < SPA_FEATURES; i++) {
zfeature_info_t feature = spa_feature_table[i];
uint64_t refcount;
if (feature_get_refcount(spa, &feature, &refcount) != 0)
continue;
VERIFY0(nvlist_add_uint64(features, feature.fi_guid, refcount));
}
}
/*
* Store a list of pool features and their reference counts in the
* config.
*
* The first time this is called on a spa, allocate a new nvlist, fetch
* the pool features and reference counts from disk, then save the list
* in the spa. In subsequent calls on the same spa use the saved nvlist
* and refresh its values from the cached reference counts. This
* ensures we don't block here on I/O on a suspended pool so 'zpool
* clear' can resume the pool.
*/
static void
spa_add_feature_stats(spa_t *spa, nvlist_t *config)
{
nvlist_t *features;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_READER));
mutex_enter(&spa->spa_feat_stats_lock);
features = spa->spa_feat_stats;
if (features != NULL) {
spa_feature_stats_from_cache(spa, features);
} else {
VERIFY0(nvlist_alloc(&features, NV_UNIQUE_NAME, KM_SLEEP));
spa->spa_feat_stats = features;
spa_feature_stats_from_disk(spa, features);
}
VERIFY0(nvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURE_STATS,
features));
mutex_exit(&spa->spa_feat_stats_lock);
}
int
spa_get_stats(const char *name, nvlist_t **config,
char *altroot, size_t buflen)
{
int error;
spa_t *spa;
*config = NULL;
error = spa_open_common(name, &spa, FTAG, NULL, config);
if (spa != NULL) {
/*
* This still leaves a window of inconsistency where the spares
* or l2cache devices could change and the config would be
* self-inconsistent.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
if (*config != NULL) {
uint64_t loadtimes[2];
loadtimes[0] = spa->spa_loaded_ts.tv_sec;
loadtimes[1] = spa->spa_loaded_ts.tv_nsec;
VERIFY(nvlist_add_uint64_array(*config,
ZPOOL_CONFIG_LOADED_TIME, loadtimes, 2) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_ERRCOUNT,
spa_get_errlog_size(spa)) == 0);
if (spa_suspended(spa)) {
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED,
spa->spa_failmode) == 0);
VERIFY(nvlist_add_uint64(*config,
ZPOOL_CONFIG_SUSPENDED_REASON,
spa->spa_suspended) == 0);
}
spa_add_spares(spa, *config);
spa_add_l2cache(spa, *config);
spa_add_feature_stats(spa, *config);
}
}
/*
* We want to get the alternate root even for faulted pools, so we cheat
* and call spa_lookup() directly.
*/
if (altroot) {
if (spa == NULL) {
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(name);
if (spa)
spa_altroot(spa, altroot, buflen);
else
altroot[0] = '\0';
spa = NULL;
mutex_exit(&spa_namespace_lock);
} else {
spa_altroot(spa, altroot, buflen);
}
}
if (spa != NULL) {
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_close(spa, FTAG);
}
return (error);
}
/*
* Validate that the auxiliary device array is well formed. We must have an
* array of nvlists, each which describes a valid leaf vdev. If this is an
* import (mode is VDEV_ALLOC_SPARE), then we allow corrupted spares to be
* specified, as long as they are well-formed.
*/
static int
spa_validate_aux_devs(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode,
spa_aux_vdev_t *sav, const char *config, uint64_t version,
vdev_labeltype_t label)
{
nvlist_t **dev;
uint_t i, ndev;
vdev_t *vd;
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
/*
* It's acceptable to have no devs specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, config, &dev, &ndev) != 0)
return (0);
if (ndev == 0)
return (SET_ERROR(EINVAL));
/*
* Make sure the pool is formatted with a version that supports this
* device type.
*/
if (spa_version(spa) < version)
return (SET_ERROR(ENOTSUP));
/*
* Set the pending device list so we correctly handle device in-use
* checking.
*/
sav->sav_pending = dev;
sav->sav_npending = ndev;
for (i = 0; i < ndev; i++) {
if ((error = spa_config_parse(spa, &vd, dev[i], NULL, 0,
mode)) != 0)
goto out;
if (!vd->vdev_ops->vdev_op_leaf) {
vdev_free(vd);
error = SET_ERROR(EINVAL);
goto out;
}
vd->vdev_top = vd;
if ((error = vdev_open(vd)) == 0 &&
(error = vdev_label_init(vd, crtxg, label)) == 0) {
VERIFY(nvlist_add_uint64(dev[i], ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
}
vdev_free(vd);
if (error &&
(mode != VDEV_ALLOC_SPARE && mode != VDEV_ALLOC_L2CACHE))
goto out;
else
error = 0;
}
out:
sav->sav_pending = NULL;
sav->sav_npending = 0;
return (error);
}
static int
spa_validate_aux(spa_t *spa, nvlist_t *nvroot, uint64_t crtxg, int mode)
{
int error;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if ((error = spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_spares, ZPOOL_CONFIG_SPARES, SPA_VERSION_SPARES,
VDEV_LABEL_SPARE)) != 0) {
return (error);
}
return (spa_validate_aux_devs(spa, nvroot, crtxg, mode,
&spa->spa_l2cache, ZPOOL_CONFIG_L2CACHE, SPA_VERSION_L2CACHE,
VDEV_LABEL_L2CACHE));
}
static void
spa_set_aux_vdevs(spa_aux_vdev_t *sav, nvlist_t **devs, int ndevs,
const char *config)
{
int i;
if (sav->sav_config != NULL) {
nvlist_t **olddevs;
uint_t oldndevs;
nvlist_t **newdevs;
/*
* Generate new dev list by concatenating with the
* current dev list.
*/
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config, config,
&olddevs, &oldndevs) == 0);
newdevs = kmem_alloc(sizeof (void *) *
(ndevs + oldndevs), KM_SLEEP);
for (i = 0; i < oldndevs; i++)
VERIFY(nvlist_dup(olddevs[i], &newdevs[i],
KM_SLEEP) == 0);
for (i = 0; i < ndevs; i++)
VERIFY(nvlist_dup(devs[i], &newdevs[i + oldndevs],
KM_SLEEP) == 0);
VERIFY(nvlist_remove(sav->sav_config, config,
DATA_TYPE_NVLIST_ARRAY) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config,
config, newdevs, ndevs + oldndevs) == 0);
for (i = 0; i < oldndevs + ndevs; i++)
nvlist_free(newdevs[i]);
kmem_free(newdevs, (oldndevs + ndevs) * sizeof (void *));
} else {
/*
* Generate a new dev list.
*/
VERIFY(nvlist_alloc(&sav->sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(sav->sav_config, config,
devs, ndevs) == 0);
}
}
/*
* Stop and drop level 2 ARC devices
*/
void
spa_l2cache_drop(spa_t *spa)
{
vdev_t *vd;
int i;
spa_aux_vdev_t *sav = &spa->spa_l2cache;
for (i = 0; i < sav->sav_count; i++) {
uint64_t pool;
vd = sav->sav_vdevs[i];
ASSERT(vd != NULL);
if (spa_l2cache_exists(vd->vdev_guid, &pool) &&
pool != 0ULL && l2arc_vdev_present(vd))
l2arc_remove_vdev(vd);
}
}
/*
* Verify encryption parameters for spa creation. If we are encrypting, we must
* have the encryption feature flag enabled.
*/
static int
spa_create_check_encryption_params(dsl_crypto_params_t *dcp,
boolean_t has_encryption)
{
if (dcp->cp_crypt != ZIO_CRYPT_OFF &&
dcp->cp_crypt != ZIO_CRYPT_INHERIT &&
!has_encryption)
return (SET_ERROR(ENOTSUP));
return (dmu_objset_create_crypt_check(NULL, dcp));
}
/*
* Pool Creation
*/
int
spa_create(const char *pool, nvlist_t *nvroot, nvlist_t *props,
nvlist_t *zplprops, dsl_crypto_params_t *dcp)
{
spa_t *spa;
char *altroot = NULL;
vdev_t *rvd;
dsl_pool_t *dp;
dmu_tx_t *tx;
int error = 0;
uint64_t txg = TXG_INITIAL;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
uint64_t version, obj, root_dsobj = 0;
boolean_t has_features;
boolean_t has_encryption;
spa_feature_t feat;
char *feat_name;
char *poolname;
nvlist_t *nvl;
if (nvlist_lookup_string(props, "tname", &poolname) != 0)
poolname = (char *)pool;
/*
* If this pool already exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(poolname) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Allocate a new spa_t structure.
*/
nvl = fnvlist_alloc();
fnvlist_add_string(nvl, ZPOOL_CONFIG_POOL_NAME, pool);
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
spa = spa_add(poolname, nvl, altroot);
fnvlist_free(nvl);
spa_activate(spa, spa_mode_global);
if (props && (error = spa_prop_validate(spa, props))) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Temporary pool names should never be written to disk.
*/
if (poolname != pool)
spa->spa_import_flags |= ZFS_IMPORT_TEMP_NAME;
has_features = B_FALSE;
has_encryption = B_FALSE;
for (nvpair_t *elem = nvlist_next_nvpair(props, NULL);
elem != NULL; elem = nvlist_next_nvpair(props, elem)) {
if (zpool_prop_feature(nvpair_name(elem))) {
has_features = B_TRUE;
feat_name = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(feat_name, &feat));
if (feat == SPA_FEATURE_ENCRYPTION)
has_encryption = B_TRUE;
}
}
/* verify encryption params, if they were provided */
if (dcp != NULL) {
error = spa_create_check_encryption_params(dcp, has_encryption);
if (error != 0) {
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
}
if (has_features || nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version) != 0) {
version = SPA_VERSION;
}
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
spa->spa_first_txg = txg;
spa->spa_uberblock.ub_txg = txg - 1;
spa->spa_uberblock.ub_version = version;
spa->spa_ubsync = spa->spa_uberblock;
spa->spa_load_state = SPA_LOAD_CREATE;
+ spa->spa_removing_phys.sr_state = DSS_NONE;
+ spa->spa_removing_phys.sr_removing_vdev = -1;
+ spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
/*
* Create "The Godfather" zio to hold all async IOs
*/
spa->spa_async_zio_root = kmem_alloc(max_ncpus * sizeof (void *),
KM_SLEEP);
for (int i = 0; i < max_ncpus; i++) {
spa->spa_async_zio_root[i] = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
}
/*
* Create the root vdev.
*/
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
error = spa_config_parse(spa, &rvd, nvroot, NULL, 0, VDEV_ALLOC_ADD);
ASSERT(error != 0 || rvd != NULL);
ASSERT(error != 0 || spa->spa_root_vdev == rvd);
if (error == 0 && !zfs_allocatable_devs(nvroot))
error = SET_ERROR(EINVAL);
if (error == 0 &&
(error = vdev_create(rvd, txg, B_FALSE)) == 0 &&
(error = spa_validate_aux(spa, nvroot, txg,
VDEV_ALLOC_ADD)) == 0) {
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_metaslab_set_size(rvd->vdev_child[c]);
vdev_expand(rvd->vdev_child[c], txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (error != 0) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Get the list of spares, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
/*
* Get the list of level 2 cache devices, if specified.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
spa->spa_is_initializing = B_TRUE;
spa->spa_dsl_pool = dp = dsl_pool_create(spa, zplprops, dcp, txg);
spa->spa_is_initializing = B_FALSE;
/*
* Create DDTs (dedup tables).
*/
ddt_create(spa);
spa_update_dspace(spa);
tx = dmu_tx_create_assigned(dp, txg);
/*
* Create the pool's history object.
*/
if (version >= SPA_VERSION_ZPOOL_HISTORY && !spa->spa_history)
spa_history_create_obj(spa, tx);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_CREATE);
spa_history_log_version(spa, "create", tx);
/*
* Create the pool config object.
*/
spa->spa_config_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, SPA_CONFIG_BLOCKSIZE,
DMU_OT_PACKED_NVLIST_SIZE, sizeof (uint64_t), tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CONFIG,
sizeof (uint64_t), 1, &spa->spa_config_object, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool config");
}
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CREATION_VERSION,
sizeof (uint64_t), 1, &version, tx) != 0) {
cmn_err(CE_PANIC, "failed to add pool version");
}
/* Newly created pools with the right version are always deflated. */
if (version >= SPA_VERSION_RAIDZ_DEFLATE) {
spa->spa_deflate = TRUE;
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx) != 0) {
cmn_err(CE_PANIC, "failed to add deflate");
}
}
/*
* Create the deferred-free bpobj. Turn off compression
* because sync-to-convergence takes longer if the blocksize
* keeps changing.
*/
obj = bpobj_alloc(spa->spa_meta_objset, 1 << 14, tx);
dmu_object_set_compress(spa->spa_meta_objset, obj,
ZIO_COMPRESS_OFF, tx);
if (zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_SYNC_BPOBJ,
sizeof (uint64_t), 1, &obj, tx) != 0) {
cmn_err(CE_PANIC, "failed to add bpobj");
}
VERIFY3U(0, ==, bpobj_open(&spa->spa_deferred_bpobj,
spa->spa_meta_objset, obj));
/*
* Generate some random noise for salted checksums to operate on.
*/
(void) random_get_pseudo_bytes(spa->spa_cksum_salt.zcs_bytes,
sizeof (spa->spa_cksum_salt.zcs_bytes));
/*
* Set pool properties.
*/
spa->spa_bootfs = zpool_prop_default_numeric(ZPOOL_PROP_BOOTFS);
spa->spa_delegation = zpool_prop_default_numeric(ZPOOL_PROP_DELEGATION);
spa->spa_failmode = zpool_prop_default_numeric(ZPOOL_PROP_FAILUREMODE);
spa->spa_autoexpand = zpool_prop_default_numeric(ZPOOL_PROP_AUTOEXPAND);
spa->spa_multihost = zpool_prop_default_numeric(ZPOOL_PROP_MULTIHOST);
if (props != NULL) {
spa_configfile_set(spa, props, B_FALSE);
spa_sync_props(props, tx);
}
dmu_tx_commit(tx);
/*
* If the root dataset is encrypted we will need to create key mappings
* for the zio layer before we start to write any data to disk and hold
* them until after the first txg has been synced. Waiting for the first
* transaction to complete also ensures that our bean counters are
* appropriately updated.
*/
if (dp->dp_root_dir->dd_crypto_obj != 0) {
root_dsobj = dsl_dir_phys(dp->dp_root_dir)->dd_head_dataset_obj;
VERIFY0(spa_keystore_create_mapping_impl(spa, root_dsobj,
dp->dp_root_dir, FTAG));
}
spa->spa_sync_on = B_TRUE;
txg_sync_start(dp);
mmp_thread_start(spa);
txg_wait_synced(dp, txg);
if (dp->dp_root_dir->dd_crypto_obj != 0)
VERIFY0(spa_keystore_remove_mapping(spa, root_dsobj, FTAG));
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
/*
* Don't count references from objsets that are already closed
* and are making their way through the eviction process.
*/
spa_evicting_os_wait(spa);
spa->spa_minref = refcount_count(&spa->spa_refcount);
spa->spa_load_state = SPA_LOAD_NONE;
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Import a non-root pool into the system.
*/
int
spa_import(char *pool, nvlist_t *config, nvlist_t *props, uint64_t flags)
{
spa_t *spa;
char *altroot = NULL;
spa_load_state_t state = SPA_LOAD_IMPORT;
zpool_rewind_policy_t policy;
uint64_t mode = spa_mode_global;
uint64_t readonly = B_FALSE;
int error;
nvlist_t *nvroot;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
/*
* If a pool with this name exists, return failure.
*/
mutex_enter(&spa_namespace_lock);
if (spa_lookup(pool) != NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EEXIST));
}
/*
* Create and initialize the spa structure.
*/
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_READONLY), &readonly);
if (readonly)
mode = FREAD;
spa = spa_add(pool, config, altroot);
spa->spa_import_flags = flags;
/*
* Verbatim import - Take a pool and insert it into the namespace
* as if it had been loaded at boot.
*/
if (spa->spa_import_flags & ZFS_IMPORT_VERBATIM) {
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
mutex_exit(&spa_namespace_lock);
return (0);
}
spa_activate(spa, mode);
/*
* Don't start async tasks until we know everything is healthy.
*/
spa_async_suspend(spa);
zpool_get_rewind_policy(config, &policy);
if (policy.zrp_request & ZPOOL_DO_REWIND)
state = SPA_LOAD_RECOVER;
/*
* Pass off the heavy lifting to spa_load(). Pass TRUE for mosconfig
* because the user-supplied config is actually the one to trust when
* doing an import.
*/
if (state != SPA_LOAD_RECOVER)
spa->spa_last_ubsync_txg = spa->spa_load_txg = 0;
error = spa_load_best(spa, state, B_TRUE, policy.zrp_txg,
policy.zrp_request);
/*
* Propagate anything learned while loading the pool and pass it
* back to caller (i.e. rewind info, missing devices, etc).
*/
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* Toss any existing sparelist, as it doesn't have any validity
* anymore, and conflicts with spa_has_spare().
*/
if (spa->spa_spares.sav_config) {
nvlist_free(spa->spa_spares.sav_config);
spa->spa_spares.sav_config = NULL;
spa_load_spares(spa);
}
if (spa->spa_l2cache.sav_config) {
nvlist_free(spa->spa_l2cache.sav_config);
spa->spa_l2cache.sav_config = NULL;
spa_load_l2cache(spa);
}
VERIFY(nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE,
&nvroot) == 0);
spa_config_exit(spa, SCL_ALL, FTAG);
if (props != NULL)
spa_configfile_set(spa, props, B_FALSE);
if (error != 0 || (props && spa_writeable(spa) &&
(error = spa_prop_set(spa, props)))) {
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (error);
}
spa_async_resume(spa);
/*
* Override any spares and level 2 cache devices as specified by
* the user, as these may have correct device names/devids, etc.
*/
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES,
&spares, &nspares) == 0) {
if (spa->spa_spares.sav_config)
VERIFY(nvlist_remove(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_spares.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_spares.sav_config,
ZPOOL_CONFIG_SPARES, spares, nspares) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_spares(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE,
&l2cache, &nl2cache) == 0) {
if (spa->spa_l2cache.sav_config)
VERIFY(nvlist_remove(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, DATA_TYPE_NVLIST_ARRAY) == 0);
else
VERIFY(nvlist_alloc(&spa->spa_l2cache.sav_config,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_nvlist_array(spa->spa_l2cache.sav_config,
ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache) == 0);
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa_load_l2cache(spa);
spa_config_exit(spa, SCL_ALL, FTAG);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* Check for any removed devices.
*/
if (spa->spa_autoreplace) {
spa_aux_check_removed(&spa->spa_spares);
spa_aux_check_removed(&spa->spa_l2cache);
}
if (spa_writeable(spa)) {
/*
* Update the config cache to include the newly-imported pool.
*/
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
}
/*
* It's possible that the pool was expanded while it was exported.
* We kick off an async task to handle this for us.
*/
spa_async_request(spa, SPA_ASYNC_AUTOEXPAND);
spa_history_log_version(spa, "import", NULL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_IMPORT);
zvol_create_minors(spa, pool, B_TRUE);
mutex_exit(&spa_namespace_lock);
return (0);
}
nvlist_t *
spa_tryimport(nvlist_t *tryconfig)
{
nvlist_t *config = NULL;
char *poolname;
spa_t *spa;
uint64_t state;
int error;
if (nvlist_lookup_string(tryconfig, ZPOOL_CONFIG_POOL_NAME, &poolname))
return (NULL);
if (nvlist_lookup_uint64(tryconfig, ZPOOL_CONFIG_POOL_STATE, &state))
return (NULL);
/*
* Create and initialize the spa structure.
*/
mutex_enter(&spa_namespace_lock);
spa = spa_add(TRYIMPORT_NAME, tryconfig, NULL);
spa_activate(spa, FREAD);
/*
* Pass off the heavy lifting to spa_load().
* Pass TRUE for mosconfig because the user-supplied config
* is actually the one to trust when doing an import.
*/
error = spa_load(spa, SPA_LOAD_TRYIMPORT, SPA_IMPORT_EXISTING, B_TRUE);
/*
* If 'tryconfig' was at least parsable, return the current config.
*/
if (spa->spa_root_vdev != NULL) {
config = spa_config_generate(spa, NULL, -1ULL, B_TRUE);
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME,
poolname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
state) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_TIMESTAMP,
spa->spa_uberblock.ub_timestamp) == 0);
VERIFY(nvlist_add_nvlist(config, ZPOOL_CONFIG_LOAD_INFO,
spa->spa_load_info) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA,
spa->spa_errata) == 0);
/*
* If the bootfs property exists on this pool then we
* copy it out so that external consumers can tell which
* pools are bootable.
*/
if ((!error || error == EEXIST) && spa->spa_bootfs) {
char *tmpname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
/*
* We have to play games with the name since the
* pool was opened as TRYIMPORT_NAME.
*/
if (dsl_dsobj_to_dsname(spa_name(spa),
spa->spa_bootfs, tmpname) == 0) {
char *cp;
char *dsname;
dsname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
cp = strchr(tmpname, '/');
if (cp == NULL) {
(void) strlcpy(dsname, tmpname,
MAXPATHLEN);
} else {
(void) snprintf(dsname, MAXPATHLEN,
"%s/%s", poolname, ++cp);
}
VERIFY(nvlist_add_string(config,
ZPOOL_CONFIG_BOOTFS, dsname) == 0);
kmem_free(dsname, MAXPATHLEN);
}
kmem_free(tmpname, MAXPATHLEN);
}
/*
* Add the list of hot spares and level 2 cache devices.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_add_spares(spa, config);
spa_add_l2cache(spa, config);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
spa_unload(spa);
spa_deactivate(spa);
spa_remove(spa);
mutex_exit(&spa_namespace_lock);
return (config);
}
/*
* Pool export/destroy
*
* The act of destroying or exporting a pool is very simple. We make sure there
* is no more pending I/O and any references to the pool are gone. Then, we
* update the pool state and sync all the labels to disk, removing the
* configuration from the cache afterwards. If the 'hardforce' flag is set, then
* we don't sync the labels or remove the configuration cache.
*/
static int
spa_export_common(char *pool, int new_state, nvlist_t **oldconfig,
boolean_t force, boolean_t hardforce)
{
spa_t *spa;
if (oldconfig)
*oldconfig = NULL;
if (!(spa_mode_global & FWRITE))
return (SET_ERROR(EROFS));
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(pool)) == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(ENOENT));
}
/*
* Put a hold on the pool, drop the namespace lock, stop async tasks,
* reacquire the namespace lock, and see if we can export.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
if (spa->spa_zvol_taskq) {
zvol_remove_minors(spa, spa_name(spa), B_TRUE);
taskq_wait(spa->spa_zvol_taskq);
}
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
goto export_spa;
/*
* The pool will be in core if it's openable, in which case we can
* modify its state. Objsets may be open only because they're dirty,
* so we have to force it to sync before checking spa_refcnt.
*/
if (spa->spa_sync_on) {
txg_wait_synced(spa->spa_dsl_pool, 0);
spa_evicting_os_wait(spa);
}
/*
* A pool cannot be exported or destroyed if there are active
* references. If we are resetting a pool, allow references by
* fault injection handlers.
*/
if (!spa_refcount_zero(spa) ||
(spa->spa_inject_ref != 0 &&
new_state != POOL_STATE_UNINITIALIZED)) {
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EBUSY));
}
if (spa->spa_sync_on) {
/*
* A pool cannot be exported if it has an active shared spare.
* This is to prevent other pools stealing the active spare
* from an exported pool. At user's own will, such pool can
* be forcedly exported.
*/
if (!force && new_state == POOL_STATE_EXPORTED &&
spa_has_active_shared_spare(spa)) {
spa_async_resume(spa);
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EXDEV));
}
/*
* We want this to be reflected on every label,
* so mark them all dirty. spa_unload() will do the
* final sync that pushes these changes out.
*/
if (new_state != POOL_STATE_UNINITIALIZED && !hardforce) {
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
spa->spa_state = new_state;
spa->spa_final_txg = spa_last_synced_txg(spa) +
TXG_DEFER_SIZE + 1;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
}
}
export_spa:
if (new_state == POOL_STATE_DESTROYED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_DESTROY);
else if (new_state == POOL_STATE_EXPORTED)
spa_event_notify(spa, NULL, NULL, ESC_ZFS_POOL_EXPORT);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
if (oldconfig && spa->spa_config)
VERIFY(nvlist_dup(spa->spa_config, oldconfig, 0) == 0);
if (new_state != POOL_STATE_UNINITIALIZED) {
if (!hardforce)
- spa_config_sync(spa, B_TRUE, B_TRUE);
+ spa_write_cachefile(spa, B_TRUE, B_TRUE);
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Destroy a storage pool.
*/
int
spa_destroy(char *pool)
{
return (spa_export_common(pool, POOL_STATE_DESTROYED, NULL,
B_FALSE, B_FALSE));
}
/*
* Export a storage pool.
*/
int
spa_export(char *pool, nvlist_t **oldconfig, boolean_t force,
boolean_t hardforce)
{
return (spa_export_common(pool, POOL_STATE_EXPORTED, oldconfig,
force, hardforce));
}
/*
* Similar to spa_export(), this unloads the spa_t without actually removing it
* from the namespace in any way.
*/
int
spa_reset(char *pool)
{
return (spa_export_common(pool, POOL_STATE_UNINITIALIZED, NULL,
B_FALSE, B_FALSE));
}
/*
* ==========================================================================
* Device manipulation
* ==========================================================================
*/
/*
* Add a device to a storage pool.
*/
int
spa_vdev_add(spa_t *spa, nvlist_t *nvroot)
{
uint64_t txg, id;
int error;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd, *tvd;
nvlist_t **spares, **l2cache;
uint_t nspares, nl2cache;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
if ((error = spa_config_parse(spa, &vd, nvroot, NULL, 0,
VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
spa->spa_pending_vdev = vd; /* spa_vdev_exit() will clear this */
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_SPARES, &spares,
&nspares) != 0)
nspares = 0;
if (nvlist_lookup_nvlist_array(nvroot, ZPOOL_CONFIG_L2CACHE, &l2cache,
&nl2cache) != 0)
nl2cache = 0;
if (vd->vdev_children == 0 && nspares == 0 && nl2cache == 0)
return (spa_vdev_exit(spa, vd, txg, EINVAL));
if (vd->vdev_children != 0 &&
(error = vdev_create(vd, txg, B_FALSE)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
* We must validate the spares and l2cache devices after checking the
* children. Otherwise, vdev_inuse() will blindly overwrite the spare.
*/
if ((error = spa_validate_aux(spa, nvroot, txg, VDEV_ALLOC_ADD)) != 0)
return (spa_vdev_exit(spa, vd, txg, error));
/*
- * Transfer each new top-level vdev from vd to rvd.
+ * If we are in the middle of a device removal, we can only add
+ * devices which match the existing devices in the pool.
+ * If we are in the middle of a removal, or have some indirect
+ * vdevs, we can not add raidz toplevels.
*/
+ if (spa->spa_vdev_removal != NULL ||
+ spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
+ for (int c = 0; c < vd->vdev_children; c++) {
+ tvd = vd->vdev_child[c];
+ if (spa->spa_vdev_removal != NULL &&
+ tvd->vdev_ashift !=
+ spa->spa_vdev_removal->svr_vdev->vdev_ashift) {
+ return (spa_vdev_exit(spa, vd, txg, EINVAL));
+ }
+ /* Fail if top level vdev is raidz */
+ if (tvd->vdev_ops == &vdev_raidz_ops) {
+ return (spa_vdev_exit(spa, vd, txg, EINVAL));
+ }
+ /*
+ * Need the top level mirror to be
+ * a mirror of leaf vdevs only
+ */
+ if (tvd->vdev_ops == &vdev_mirror_ops) {
+ for (uint64_t cid = 0;
+ cid < tvd->vdev_children; cid++) {
+ vdev_t *cvd = tvd->vdev_child[cid];
+ if (!cvd->vdev_ops->vdev_op_leaf) {
+ return (spa_vdev_exit(spa, vd,
+ txg, EINVAL));
+ }
+ }
+ }
+ }
+ }
+
for (int c = 0; c < vd->vdev_children; c++) {
/*
* Set the vdev id to the first hole, if one exists.
*/
for (id = 0; id < rvd->vdev_children; id++) {
if (rvd->vdev_child[id]->vdev_ishole) {
vdev_free(rvd->vdev_child[id]);
break;
}
}
tvd = vd->vdev_child[c];
vdev_remove_child(vd, tvd);
tvd->vdev_id = id;
vdev_add_child(rvd, tvd);
vdev_config_dirty(tvd);
}
if (nspares != 0) {
spa_set_aux_vdevs(&spa->spa_spares, spares, nspares,
ZPOOL_CONFIG_SPARES);
spa_load_spares(spa);
spa->spa_spares.sav_sync = B_TRUE;
}
if (nl2cache != 0) {
spa_set_aux_vdevs(&spa->spa_l2cache, l2cache, nl2cache,
ZPOOL_CONFIG_L2CACHE);
spa_load_l2cache(spa);
spa->spa_l2cache.sav_sync = B_TRUE;
}
/*
* We have to be careful when adding new vdevs to an existing pool.
* If other threads start allocating from these vdevs before we
* sync the config cache, and we lose power, then upon reboot we may
* fail to open the pool because there are DVAs that the config cache
* can't translate. Therefore, we first add the vdevs without
* initializing metaslabs; sync the config cache (via spa_vdev_exit());
* and then let spa_config_update() initialize the new metaslabs.
*
* spa_load() checks for added-but-not-initialized vdevs, so that
* if we lose power at any point in this sequence, the remaining
* steps will be completed the next time we load the pool.
*/
(void) spa_vdev_exit(spa, vd, txg, 0);
mutex_enter(&spa_namespace_lock);
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
spa_event_notify(spa, NULL, NULL, ESC_ZFS_VDEV_ADD);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Attach a device to a mirror. The arguments are the path to any device
* in the mirror, and the nvroot for the new device. If the path specifies
* a device that is not mirrored, we automatically insert the mirror vdev.
*
* If 'replacing' is specified, the new device is intended to replace the
* existing device; in this case the two devices are made into their own
* mirror using the 'replacing' vdev, which is functionally identical to
* the mirror vdev (it actually reuses all the same ops) but has a few
* extra rules: you can't attach to it after it's been created, and upon
* completion of resilvering, the first disk (the one being replaced)
* is automatically detached.
*/
int
spa_vdev_attach(spa_t *spa, uint64_t guid, nvlist_t *nvroot, int replacing)
{
uint64_t txg, dtl_max_txg;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
vdev_t *oldvd, *newvd, *newrootvd, *pvd, *tvd;
vdev_ops_t *pvops;
char *oldvdpath, *newvdpath;
int newvd_isspare;
int error;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
oldvd = spa_lookup_by_guid(spa, guid, B_FALSE);
+ if (spa->spa_vdev_removal != NULL ||
+ spa->spa_removing_phys.sr_prev_indirect_vdev != -1) {
+ return (spa_vdev_exit(spa, NULL, txg, EBUSY));
+ }
+
if (oldvd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!oldvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = oldvd->vdev_parent;
if ((error = spa_config_parse(spa, &newrootvd, nvroot, NULL, 0,
VDEV_ALLOC_ATTACH)) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
if (newrootvd->vdev_children != 1)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
newvd = newrootvd->vdev_child[0];
if (!newvd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, newrootvd, txg, EINVAL));
if ((error = vdev_create(newrootvd, txg, replacing)) != 0)
return (spa_vdev_exit(spa, newrootvd, txg, error));
/*
* Spares can't replace logs
*/
if (oldvd->vdev_top->vdev_islog && newvd->vdev_isspare)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
if (!replacing) {
/*
* For attach, the only allowable parent is a mirror or the root
* vdev.
*/
if (pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_root_ops)
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
pvops = &vdev_mirror_ops;
} else {
/*
* Active hot spares can only be replaced by inactive hot
* spares.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
oldvd->vdev_isspare &&
!spa_has_spare(spa, newvd->vdev_guid))
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
/*
* If the source is a hot spare, and the parent isn't already a
* spare, then we want to create a new hot spare. Otherwise, we
* want to create a replacing vdev. The user is not allowed to
* attach to a spared vdev child unless the 'isspare' state is
* the same (spare replaces spare, non-spare replaces
* non-spare).
*/
if (pvd->vdev_ops == &vdev_replacing_ops &&
spa_version(spa) < SPA_VERSION_MULTI_REPLACE) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
} else if (pvd->vdev_ops == &vdev_spare_ops &&
newvd->vdev_isspare != oldvd->vdev_isspare) {
return (spa_vdev_exit(spa, newrootvd, txg, ENOTSUP));
}
if (newvd->vdev_isspare)
pvops = &vdev_spare_ops;
else
pvops = &vdev_replacing_ops;
}
/*
* Make sure the new device is big enough.
*/
if (newvd->vdev_asize < vdev_get_min_asize(oldvd))
return (spa_vdev_exit(spa, newrootvd, txg, EOVERFLOW));
/*
* The new device cannot have a higher alignment requirement
* than the top-level vdev.
*/
if (newvd->vdev_ashift > oldvd->vdev_top->vdev_ashift)
return (spa_vdev_exit(spa, newrootvd, txg, EDOM));
/*
* If this is an in-place replacement, update oldvd's path and devid
* to make it distinguishable from newvd, and unopenable from now on.
*/
if (strcmp(oldvd->vdev_path, newvd->vdev_path) == 0) {
spa_strfree(oldvd->vdev_path);
oldvd->vdev_path = kmem_alloc(strlen(newvd->vdev_path) + 5,
KM_SLEEP);
(void) sprintf(oldvd->vdev_path, "%s/%s",
newvd->vdev_path, "old");
if (oldvd->vdev_devid != NULL) {
spa_strfree(oldvd->vdev_devid);
oldvd->vdev_devid = NULL;
}
}
/* mark the device being resilvered */
newvd->vdev_resilver_txg = txg;
/*
* If the parent is not a mirror, or if we're replacing, insert the new
* mirror/replacing/spare vdev above oldvd.
*/
if (pvd->vdev_ops != pvops)
pvd = vdev_add_parent(oldvd, pvops);
ASSERT(pvd->vdev_top->vdev_parent == rvd);
ASSERT(pvd->vdev_ops == pvops);
ASSERT(oldvd->vdev_parent == pvd);
/*
* Extract the new device from its root and add it to pvd.
*/
vdev_remove_child(newrootvd, newvd);
newvd->vdev_id = pvd->vdev_children;
newvd->vdev_crtxg = oldvd->vdev_crtxg;
vdev_add_child(pvd, newvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(pvd);
tvd = newvd->vdev_top;
ASSERT(pvd->vdev_top == tvd);
ASSERT(tvd->vdev_parent == rvd);
vdev_config_dirty(tvd);
/*
* Set newvd's DTL to [TXG_INITIAL, dtl_max_txg) so that we account
* for any dmu_sync-ed blocks. It will propagate upward when
* spa_vdev_exit() calls vdev_dtl_reassess().
*/
dtl_max_txg = txg + TXG_CONCURRENT_STATES;
vdev_dtl_dirty(newvd, DTL_MISSING, TXG_INITIAL,
dtl_max_txg - TXG_INITIAL);
if (newvd->vdev_isspare) {
spa_spare_activate(newvd);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_SPARE);
}
oldvdpath = spa_strdup(oldvd->vdev_path);
newvdpath = spa_strdup(newvd->vdev_path);
newvd_isspare = newvd->vdev_isspare;
/*
* Mark newvd's DTL dirty in this txg.
*/
vdev_dirty(tvd, VDD_DTL, newvd, txg);
/*
* Schedule the resilver to restart in the future. We do this to
* ensure that dmu_sync-ed blocks have been stitched into the
* respective datasets.
*/
dsl_resilver_restart(spa->spa_dsl_pool, dtl_max_txg);
if (spa->spa_bootfs)
spa_event_notify(spa, newvd, NULL, ESC_ZFS_BOOTFS_VDEV_ATTACH);
spa_event_notify(spa, newvd, NULL, ESC_ZFS_VDEV_ATTACH);
/*
* Commit the config
*/
(void) spa_vdev_exit(spa, newrootvd, dtl_max_txg, 0);
spa_history_log_internal(spa, "vdev attach", NULL,
"%s vdev=%s %s vdev=%s",
replacing && newvd_isspare ? "spare in" :
replacing ? "replace" : "attach", newvdpath,
replacing ? "for" : "to", oldvdpath);
spa_strfree(oldvdpath);
spa_strfree(newvdpath);
return (0);
}
/*
* Detach a device from a mirror or replacing vdev.
*
* If 'replace_done' is specified, only detach if the parent
* is a replacing vdev.
*/
int
spa_vdev_detach(spa_t *spa, uint64_t guid, uint64_t pguid, int replace_done)
{
uint64_t txg;
int error;
ASSERTV(vdev_t *rvd = spa->spa_root_vdev);
vdev_t *vd, *pvd, *cvd, *tvd;
boolean_t unspare = B_FALSE;
uint64_t unspare_guid = 0;
char *vdpath;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
vd = spa_lookup_by_guid(spa, guid, B_FALSE);
if (vd == NULL)
return (spa_vdev_exit(spa, NULL, txg, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
pvd = vd->vdev_parent;
/*
* If the parent/child relationship is not as expected, don't do it.
* Consider M(A,R(B,C)) -- that is, a mirror of A with a replacing
* vdev that's replacing B with C. The user's intent in replacing
* is to go from M(A,B) to M(A,C). If the user decides to cancel
* the replace by detaching C, the expected behavior is to end up
* M(A,B). But suppose that right after deciding to detach C,
* the replacement of B completes. We would have M(A,C), and then
* ask to detach C, which would leave us with just A -- not what
* the user wanted. To prevent this, we make sure that the
* parent/child relationship hasn't changed -- in this example,
* that C's parent is still the replacing vdev R.
*/
if (pvd->vdev_guid != pguid && pguid != 0)
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
/*
* Only 'replacing' or 'spare' vdevs can be replaced.
*/
if (replace_done && pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
ASSERT(pvd->vdev_ops != &vdev_spare_ops ||
spa_version(spa) >= SPA_VERSION_SPARES);
/*
* Only mirror, replacing, and spare vdevs support detach.
*/
if (pvd->vdev_ops != &vdev_replacing_ops &&
pvd->vdev_ops != &vdev_mirror_ops &&
pvd->vdev_ops != &vdev_spare_ops)
return (spa_vdev_exit(spa, NULL, txg, ENOTSUP));
/*
* If this device has the only valid copy of some data,
* we cannot safely detach it.
*/
if (vdev_dtl_required(vd))
return (spa_vdev_exit(spa, NULL, txg, EBUSY));
ASSERT(pvd->vdev_children >= 2);
/*
* If we are detaching the second disk from a replacing vdev, then
* check to see if we changed the original vdev's path to have "/old"
* at the end in spa_vdev_attach(). If so, undo that change now.
*/
if (pvd->vdev_ops == &vdev_replacing_ops && vd->vdev_id > 0 &&
vd->vdev_path != NULL) {
size_t len = strlen(vd->vdev_path);
for (int c = 0; c < pvd->vdev_children; c++) {
cvd = pvd->vdev_child[c];
if (cvd == vd || cvd->vdev_path == NULL)
continue;
if (strncmp(cvd->vdev_path, vd->vdev_path, len) == 0 &&
strcmp(cvd->vdev_path + len, "/old") == 0) {
spa_strfree(cvd->vdev_path);
cvd->vdev_path = spa_strdup(vd->vdev_path);
break;
}
}
}
/*
* If we are detaching the original disk from a spare, then it implies
* that the spare should become a real disk, and be removed from the
* active spare list for the pool.
*/
if (pvd->vdev_ops == &vdev_spare_ops &&
vd->vdev_id == 0 &&
pvd->vdev_child[pvd->vdev_children - 1]->vdev_isspare)
unspare = B_TRUE;
/*
* Erase the disk labels so the disk can be used for other things.
* This must be done after all other error cases are handled,
* but before we disembowel vd (so we can still do I/O to it).
* But if we can't do it, don't treat the error as fatal --
* it may be that the unwritability of the disk is the reason
* it's being detached!
*/
error = vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
/*
* Remove vd from its parent and compact the parent's children.
*/
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
/*
* Remember one of the remaining children so we can get tvd below.
*/
cvd = pvd->vdev_child[pvd->vdev_children - 1];
/*
* If we need to remove the remaining child from the list of hot spares,
* do it now, marking the vdev as no longer a spare in the process.
* We must do this before vdev_remove_parent(), because that can
* change the GUID if it creates a new toplevel GUID. For a similar
* reason, we must remove the spare now, in the same txg as the detach;
* otherwise someone could attach a new sibling, change the GUID, and
* the subsequent attempt to spa_vdev_remove(unspare_guid) would fail.
*/
if (unspare) {
ASSERT(cvd->vdev_isspare);
spa_spare_remove(cvd);
unspare_guid = cvd->vdev_guid;
(void) spa_vdev_remove(spa, unspare_guid, B_TRUE);
cvd->vdev_unspare = B_TRUE;
}
/*
* If the parent mirror/replacing vdev only has one child,
* the parent is no longer needed. Remove it from the tree.
*/
if (pvd->vdev_children == 1) {
if (pvd->vdev_ops == &vdev_spare_ops)
cvd->vdev_unspare = B_FALSE;
vdev_remove_parent(cvd);
}
/*
* We don't set tvd until now because the parent we just removed
* may have been the previous top-level vdev.
*/
tvd = cvd->vdev_top;
ASSERT(tvd->vdev_parent == rvd);
/*
* Reevaluate the parent vdev state.
*/
vdev_propagate_state(cvd);
/*
* If the 'autoexpand' property is set on the pool then automatically
* try to expand the size of the pool. For example if the device we
* just detached was smaller than the others, it may be possible to
* add metaslabs (i.e. grow the pool). We need to reopen the vdev
* first so that we can obtain the updated sizes of the leaf vdevs.
*/
if (spa->spa_autoexpand) {
vdev_reopen(tvd);
vdev_expand(tvd, txg);
}
vdev_config_dirty(tvd);
/*
* Mark vd's DTL as dirty in this txg. vdev_dtl_sync() will see that
* vd->vdev_detached is set and free vd's DTL object in syncing context.
* But first make sure we're not on any *other* txg's DTL list, to
* prevent vd from being accessed after it's freed.
*/
vdpath = spa_strdup(vd->vdev_path ? vd->vdev_path : "none");
for (int t = 0; t < TXG_SIZE; t++)
(void) txg_list_remove_this(&tvd->vdev_dtl_list, vd, t);
vd->vdev_detached = B_TRUE;
vdev_dirty(tvd, VDD_DTL, vd, txg);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE);
/* hang on to the spa before we release the lock */
spa_open_ref(spa, FTAG);
error = spa_vdev_exit(spa, vd, txg, 0);
spa_history_log_internal(spa, "detach", NULL,
"vdev=%s", vdpath);
spa_strfree(vdpath);
/*
* If this was the removal of the original device in a hot spare vdev,
* then we want to go through and remove the device from the hot spare
* list of every other pool.
*/
if (unspare) {
spa_t *altspa = NULL;
mutex_enter(&spa_namespace_lock);
while ((altspa = spa_next(altspa)) != NULL) {
if (altspa->spa_state != POOL_STATE_ACTIVE ||
altspa == spa)
continue;
spa_open_ref(altspa, FTAG);
mutex_exit(&spa_namespace_lock);
(void) spa_vdev_remove(altspa, unspare_guid, B_TRUE);
mutex_enter(&spa_namespace_lock);
spa_close(altspa, FTAG);
}
mutex_exit(&spa_namespace_lock);
/* search the rest of the vdevs for spares to remove */
spa_vdev_resilver_done(spa);
}
/* all done with the spa; OK to release */
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (error);
}
/*
* Split a set of devices from their mirrors, and create a new pool from them.
*/
int
spa_vdev_split_mirror(spa_t *spa, char *newname, nvlist_t *config,
nvlist_t *props, boolean_t exp)
{
int error = 0;
uint64_t txg, *glist;
spa_t *newspa;
uint_t c, children, lastlog;
nvlist_t **child, *nvl, *tmp;
dmu_tx_t *tx;
char *altroot = NULL;
vdev_t *rvd, **vml = NULL; /* vdev modify list */
boolean_t activate_slog;
ASSERT(spa_writeable(spa));
txg = spa_vdev_enter(spa);
/* clear the log and flush everything up to now */
activate_slog = spa_passivate_log(spa);
(void) spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
- error = spa_offline_log(spa);
+ error = spa_reset_logs(spa);
txg = spa_vdev_config_enter(spa);
if (activate_slog)
spa_activate_log(spa);
if (error != 0)
return (spa_vdev_exit(spa, NULL, txg, error));
/* check new spa name before going any further */
if (spa_lookup(newname) != NULL)
return (spa_vdev_exit(spa, NULL, txg, EEXIST));
/*
* scan through all the children to ensure they're all mirrors
*/
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, &nvl) != 0 ||
nvlist_lookup_nvlist_array(nvl, ZPOOL_CONFIG_CHILDREN, &child,
&children) != 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* first, check to ensure we've got the right child count */
rvd = spa->spa_root_vdev;
lastlog = 0;
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *vd = rvd->vdev_child[c];
/* don't count the holes & logs as children */
- if (vd->vdev_islog || vd->vdev_ishole) {
+ if (vd->vdev_islog || !vdev_is_concrete(vd)) {
if (lastlog == 0)
lastlog = c;
continue;
}
lastlog = 0;
}
if (children != (lastlog != 0 ? lastlog : rvd->vdev_children))
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
/* next, ensure no spare or cache devices are part of the split */
if (nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_SPARES, &tmp) == 0 ||
nvlist_lookup_nvlist(nvl, ZPOOL_CONFIG_L2CACHE, &tmp) == 0)
return (spa_vdev_exit(spa, NULL, txg, EINVAL));
vml = kmem_zalloc(children * sizeof (vdev_t *), KM_SLEEP);
glist = kmem_zalloc(children * sizeof (uint64_t), KM_SLEEP);
/* then, loop over each vdev and validate it */
for (c = 0; c < children; c++) {
uint64_t is_hole = 0;
(void) nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_IS_HOLE,
&is_hole);
if (is_hole != 0) {
if (spa->spa_root_vdev->vdev_child[c]->vdev_ishole ||
spa->spa_root_vdev->vdev_child[c]->vdev_islog) {
continue;
} else {
error = SET_ERROR(EINVAL);
break;
}
}
/* which disk is going to be split? */
if (nvlist_lookup_uint64(child[c], ZPOOL_CONFIG_GUID,
&glist[c]) != 0) {
error = SET_ERROR(EINVAL);
break;
}
/* look it up in the spa */
vml[c] = spa_lookup_by_guid(spa, glist[c], B_FALSE);
if (vml[c] == NULL) {
error = SET_ERROR(ENODEV);
break;
}
/* make sure there's nothing stopping the split */
if (vml[c]->vdev_parent->vdev_ops != &vdev_mirror_ops ||
vml[c]->vdev_islog ||
- vml[c]->vdev_ishole ||
+ !vdev_is_concrete(vml[c]) ||
vml[c]->vdev_isspare ||
vml[c]->vdev_isl2cache ||
!vdev_writeable(vml[c]) ||
vml[c]->vdev_children != 0 ||
vml[c]->vdev_state != VDEV_STATE_HEALTHY ||
c != spa->spa_root_vdev->vdev_child[c]->vdev_id) {
error = SET_ERROR(EINVAL);
break;
}
if (vdev_dtl_required(vml[c])) {
error = SET_ERROR(EBUSY);
break;
}
/* we need certain info from the top level */
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_ARRAY,
vml[c]->vdev_top->vdev_ms_array) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_METASLAB_SHIFT,
vml[c]->vdev_top->vdev_ms_shift) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASIZE,
vml[c]->vdev_top->vdev_asize) == 0);
VERIFY(nvlist_add_uint64(child[c], ZPOOL_CONFIG_ASHIFT,
vml[c]->vdev_top->vdev_ashift) == 0);
/* transfer per-vdev ZAPs */
ASSERT3U(vml[c]->vdev_leaf_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_LEAF_ZAP, vml[c]->vdev_leaf_zap));
ASSERT3U(vml[c]->vdev_top->vdev_top_zap, !=, 0);
VERIFY0(nvlist_add_uint64(child[c],
ZPOOL_CONFIG_VDEV_TOP_ZAP,
vml[c]->vdev_parent->vdev_top_zap));
}
if (error != 0) {
kmem_free(vml, children * sizeof (vdev_t *));
kmem_free(glist, children * sizeof (uint64_t));
return (spa_vdev_exit(spa, NULL, txg, error));
}
/* stop writers from using the disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_TRUE;
}
vdev_reopen(spa->spa_root_vdev);
/*
* Temporarily record the splitting vdevs in the spa config. This
* will disappear once the config is regenerated.
*/
VERIFY(nvlist_alloc(&nvl, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64_array(nvl, ZPOOL_CONFIG_SPLIT_LIST,
glist, children) == 0);
kmem_free(glist, children * sizeof (uint64_t));
mutex_enter(&spa->spa_props_lock);
VERIFY(nvlist_add_nvlist(spa->spa_config, ZPOOL_CONFIG_SPLIT,
nvl) == 0);
mutex_exit(&spa->spa_props_lock);
spa->spa_config_splitting = nvl;
vdev_config_dirty(spa->spa_root_vdev);
/* configure and create the new pool */
VERIFY(nvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, newname) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE,
exp ? POOL_STATE_EXPORTED : POOL_STATE_ACTIVE) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG,
spa->spa_config_txg) == 0);
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID,
spa_generate_guid(NULL)) == 0);
VERIFY0(nvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS));
(void) nvlist_lookup_string(props,
zpool_prop_to_name(ZPOOL_PROP_ALTROOT), &altroot);
/* add the new pool to the namespace */
newspa = spa_add(newname, config, altroot);
newspa->spa_avz_action = AVZ_ACTION_REBUILD;
newspa->spa_config_txg = spa->spa_config_txg;
spa_set_log_state(newspa, SPA_LOG_CLEAR);
/* release the spa config lock, retaining the namespace lock */
spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 1);
spa_activate(newspa, spa_mode_global);
spa_async_suspend(newspa);
/* create the new pool from the disks of the original pool */
error = spa_load(newspa, SPA_LOAD_IMPORT, SPA_IMPORT_ASSEMBLE, B_TRUE);
if (error)
goto out;
/* if that worked, generate a real config for the new pool */
if (newspa->spa_root_vdev != NULL) {
VERIFY(nvlist_alloc(&newspa->spa_config_splitting,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(newspa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, spa_guid(spa)) == 0);
spa_config_set(newspa, spa_config_generate(newspa, NULL, -1ULL,
B_TRUE));
}
/* set the props */
if (props != NULL) {
spa_configfile_set(newspa, props, B_FALSE);
error = spa_prop_set(newspa, props);
if (error)
goto out;
}
/* flush everything */
txg = spa_vdev_config_enter(newspa);
vdev_config_dirty(newspa->spa_root_vdev);
(void) spa_vdev_config_exit(newspa, NULL, txg, 0, FTAG);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 2);
spa_async_resume(newspa);
/* finally, update the original pool's config */
txg = spa_vdev_config_enter(spa);
tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0)
dmu_tx_abort(tx);
for (c = 0; c < children; c++) {
if (vml[c] != NULL) {
vdev_split(vml[c]);
if (error == 0)
spa_history_log_internal(spa, "detach", tx,
"vdev=%s", vml[c]->vdev_path);
vdev_free(vml[c]);
}
}
spa->spa_avz_action = AVZ_ACTION_REBUILD;
vdev_config_dirty(spa->spa_root_vdev);
spa->spa_config_splitting = NULL;
nvlist_free(nvl);
if (error == 0)
dmu_tx_commit(tx);
(void) spa_vdev_exit(spa, NULL, txg, 0);
if (zio_injection_enabled)
zio_handle_panic_injection(spa, FTAG, 3);
/* split is complete; log a history record */
spa_history_log_internal(newspa, "split", NULL,
"from pool %s", spa_name(spa));
kmem_free(vml, children * sizeof (vdev_t *));
/* if we're not going to mount the filesystems in userland, export */
if (exp)
error = spa_export_common(newname, POOL_STATE_EXPORTED, NULL,
B_FALSE, B_FALSE);
return (error);
out:
spa_unload(newspa);
spa_deactivate(newspa);
spa_remove(newspa);
txg = spa_vdev_config_enter(spa);
/* re-online all offlined disks */
for (c = 0; c < children; c++) {
if (vml[c] != NULL)
vml[c]->vdev_offline = B_FALSE;
}
vdev_reopen(spa->spa_root_vdev);
nvlist_free(spa->spa_config_splitting);
spa->spa_config_splitting = NULL;
(void) spa_vdev_exit(spa, NULL, txg, error);
kmem_free(vml, children * sizeof (vdev_t *));
return (error);
}
-static nvlist_t *
-spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
-{
- for (int i = 0; i < count; i++) {
- uint64_t guid;
-
- VERIFY(nvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID,
- &guid) == 0);
-
- if (guid == target_guid)
- return (nvpp[i]);
- }
-
- return (NULL);
-}
-
-static void
-spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
- nvlist_t *dev_to_remove)
-{
- nvlist_t **newdev = NULL;
-
- if (count > 1)
- newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
-
- for (int i = 0, j = 0; i < count; i++) {
- if (dev[i] == dev_to_remove)
- continue;
- VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
- }
-
- VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
- VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
-
- for (int i = 0; i < count - 1; i++)
- nvlist_free(newdev[i]);
-
- if (count > 1)
- kmem_free(newdev, (count - 1) * sizeof (void *));
-}
-
-/*
- * Evacuate the device.
- */
-static int
-spa_vdev_remove_evacuate(spa_t *spa, vdev_t *vd)
-{
- uint64_t txg;
- int error = 0;
-
- ASSERT(MUTEX_HELD(&spa_namespace_lock));
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
- ASSERT(vd == vd->vdev_top);
-
- /*
- * Evacuate the device. We don't hold the config lock as writer
- * since we need to do I/O but we do keep the
- * spa_namespace_lock held. Once this completes the device
- * should no longer have any blocks allocated on it.
- */
- if (vd->vdev_islog) {
- if (vd->vdev_stat.vs_alloc != 0)
- error = spa_offline_log(spa);
- } else {
- error = SET_ERROR(ENOTSUP);
- }
-
- if (error)
- return (error);
-
- /*
- * The evacuation succeeded. Remove any remaining MOS metadata
- * associated with this vdev, and wait for these changes to sync.
- */
- ASSERT0(vd->vdev_stat.vs_alloc);
- txg = spa_vdev_config_enter(spa);
- vd->vdev_removing = B_TRUE;
- vdev_dirty_leaves(vd, VDD_DTL, txg);
- vdev_config_dirty(vd);
- spa_vdev_config_exit(spa, NULL, txg, 0, FTAG);
-
- return (0);
-}
-
-/*
- * Complete the removal by cleaning up the namespace.
- */
-static void
-spa_vdev_remove_from_namespace(spa_t *spa, vdev_t *vd)
-{
- vdev_t *rvd = spa->spa_root_vdev;
- uint64_t id = vd->vdev_id;
- boolean_t last_vdev = (id == (rvd->vdev_children - 1));
-
- ASSERT(MUTEX_HELD(&spa_namespace_lock));
- ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
- ASSERT(vd == vd->vdev_top);
-
- /*
- * Only remove any devices which are empty.
- */
- if (vd->vdev_stat.vs_alloc != 0)
- return;
-
- (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
-
- if (list_link_active(&vd->vdev_state_dirty_node))
- vdev_state_clean(vd);
- if (list_link_active(&vd->vdev_config_dirty_node))
- vdev_config_clean(vd);
-
- vdev_free(vd);
-
- if (last_vdev) {
- vdev_compact_children(rvd);
- } else {
- vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
- vdev_add_child(rvd, vd);
- }
- vdev_config_dirty(rvd);
-
- /*
- * Reassess the health of our root vdev.
- */
- vdev_reopen(rvd);
-}
-
-/*
- * Remove a device from the pool -
- *
- * Removing a device from the vdev namespace requires several steps
- * and can take a significant amount of time. As a result we use
- * the spa_vdev_config_[enter/exit] functions which allow us to
- * grab and release the spa_config_lock while still holding the namespace
- * lock. During each step the configuration is synced out.
- *
- * Currently, this supports removing only hot spares, slogs, and level 2 ARC
- * devices.
- */
-int
-spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
-{
- vdev_t *vd;
- sysevent_t *ev = NULL;
- metaslab_group_t *mg;
- nvlist_t **spares, **l2cache, *nv;
- uint64_t txg = 0;
- uint_t nspares, nl2cache;
- int error = 0;
- boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
-
- ASSERT(spa_writeable(spa));
-
- if (!locked)
- txg = spa_vdev_enter(spa);
-
- vd = spa_lookup_by_guid(spa, guid, B_FALSE);
-
- if (spa->spa_spares.sav_vdevs != NULL &&
- nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
- ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
- (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
- /*
- * Only remove the hot spare if it's not currently in use
- * in this pool.
- */
- if (vd == NULL || unspare) {
- if (vd == NULL)
- vd = spa_lookup_by_guid(spa, guid, B_TRUE);
- ev = spa_event_create(spa, vd, NULL,
- ESC_ZFS_VDEV_REMOVE_AUX);
- spa_vdev_remove_aux(spa->spa_spares.sav_config,
- ZPOOL_CONFIG_SPARES, spares, nspares, nv);
- spa_load_spares(spa);
- spa->spa_spares.sav_sync = B_TRUE;
- } else {
- error = SET_ERROR(EBUSY);
- }
- } else if (spa->spa_l2cache.sav_vdevs != NULL &&
- nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
- ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
- (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
- /*
- * Cache devices can always be removed.
- */
- vd = spa_lookup_by_guid(spa, guid, B_TRUE);
- ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
- spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
- ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
- spa_load_l2cache(spa);
- spa->spa_l2cache.sav_sync = B_TRUE;
- } else if (vd != NULL && vd->vdev_islog) {
- ASSERT(!locked);
- ASSERT(vd == vd->vdev_top);
-
- mg = vd->vdev_mg;
-
- /*
- * Stop allocating from this vdev.
- */
- metaslab_group_passivate(mg);
-
- /*
- * Wait for the youngest allocations and frees to sync,
- * and then wait for the deferral of those frees to finish.
- */
- spa_vdev_config_exit(spa, NULL,
- txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
-
- /*
- * Attempt to evacuate the vdev.
- */
- error = spa_vdev_remove_evacuate(spa, vd);
-
- txg = spa_vdev_config_enter(spa);
-
- /*
- * If we couldn't evacuate the vdev, unwind.
- */
- if (error) {
- metaslab_group_activate(mg);
- return (spa_vdev_exit(spa, NULL, txg, error));
- }
-
- /*
- * Clean up the vdev namespace.
- */
- ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_DEV);
- spa_vdev_remove_from_namespace(spa, vd);
-
- } else if (vd != NULL) {
- /*
- * Normal vdevs cannot be removed (yet).
- */
- error = SET_ERROR(ENOTSUP);
- } else {
- /*
- * There is no vdev of any kind with the specified guid.
- */
- error = SET_ERROR(ENOENT);
- }
-
- if (!locked)
- error = spa_vdev_exit(spa, NULL, txg, error);
-
- if (ev)
- spa_event_post(ev);
-
- return (error);
-}
-
/*
* Find any device that's done replacing, or a vdev marked 'unspare' that's
* currently spared, so we can detach it.
*/
static vdev_t *
spa_vdev_resilver_done_hunt(vdev_t *vd)
{
vdev_t *newvd, *oldvd;
for (int c = 0; c < vd->vdev_children; c++) {
oldvd = spa_vdev_resilver_done_hunt(vd->vdev_child[c]);
if (oldvd != NULL)
return (oldvd);
}
/*
* Check for a completed replacement. We always consider the first
* vdev in the list to be the oldest vdev, and the last one to be
* the newest (see spa_vdev_attach() for how that works). In
* the case where the newest vdev is faulted, we will not automatically
* remove it after a resilver completes. This is OK as it will require
* user intervention to determine which disk the admin wishes to keep.
*/
if (vd->vdev_ops == &vdev_replacing_ops) {
ASSERT(vd->vdev_children > 1);
newvd = vd->vdev_child[vd->vdev_children - 1];
oldvd = vd->vdev_child[0];
if (vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
}
/*
* Check for a completed resilver with the 'unspare' flag set.
*/
if (vd->vdev_ops == &vdev_spare_ops) {
vdev_t *first = vd->vdev_child[0];
vdev_t *last = vd->vdev_child[vd->vdev_children - 1];
if (last->vdev_unspare) {
oldvd = first;
newvd = last;
} else if (first->vdev_unspare) {
oldvd = last;
newvd = first;
} else {
oldvd = NULL;
}
if (oldvd != NULL &&
vdev_dtl_empty(newvd, DTL_MISSING) &&
vdev_dtl_empty(newvd, DTL_OUTAGE) &&
!vdev_dtl_required(oldvd))
return (oldvd);
/*
* If there are more than two spares attached to a disk,
* and those spares are not required, then we want to
* attempt to free them up now so that they can be used
* by other pools. Once we're back down to a single
* disk+spare, we stop removing them.
*/
if (vd->vdev_children > 2) {
newvd = vd->vdev_child[1];
if (newvd->vdev_isspare && last->vdev_isspare &&
vdev_dtl_empty(last, DTL_MISSING) &&
vdev_dtl_empty(last, DTL_OUTAGE) &&
!vdev_dtl_required(newvd))
return (newvd);
}
}
return (NULL);
}
static void
spa_vdev_resilver_done(spa_t *spa)
{
vdev_t *vd, *pvd, *ppvd;
uint64_t guid, sguid, pguid, ppguid;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
while ((vd = spa_vdev_resilver_done_hunt(spa->spa_root_vdev)) != NULL) {
pvd = vd->vdev_parent;
ppvd = pvd->vdev_parent;
guid = vd->vdev_guid;
pguid = pvd->vdev_guid;
ppguid = ppvd->vdev_guid;
sguid = 0;
/*
* If we have just finished replacing a hot spared device, then
* we need to detach the parent's first child (the original hot
* spare) as well.
*/
if (ppvd->vdev_ops == &vdev_spare_ops && pvd->vdev_id == 0 &&
ppvd->vdev_children == 2) {
ASSERT(pvd->vdev_ops == &vdev_replacing_ops);
sguid = ppvd->vdev_child[1]->vdev_guid;
}
ASSERT(vd->vdev_resilver_txg == 0 || !vdev_dtl_required(vd));
spa_config_exit(spa, SCL_ALL, FTAG);
if (spa_vdev_detach(spa, guid, pguid, B_TRUE) != 0)
return;
if (sguid && spa_vdev_detach(spa, sguid, ppguid, B_TRUE) != 0)
return;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
}
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* Update the stored path or FRU for this vdev.
*/
int
spa_vdev_set_common(spa_t *spa, uint64_t guid, const char *value,
boolean_t ispath)
{
vdev_t *vd;
boolean_t sync = B_FALSE;
ASSERT(spa_writeable(spa));
spa_vdev_state_enter(spa, SCL_ALL);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENOENT));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
if (ispath) {
if (strcmp(value, vd->vdev_path) != 0) {
spa_strfree(vd->vdev_path);
vd->vdev_path = spa_strdup(value);
sync = B_TRUE;
}
} else {
if (vd->vdev_fru == NULL) {
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
} else if (strcmp(value, vd->vdev_fru) != 0) {
spa_strfree(vd->vdev_fru);
vd->vdev_fru = spa_strdup(value);
sync = B_TRUE;
}
}
return (spa_vdev_state_exit(spa, sync ? vd : NULL, 0));
}
int
spa_vdev_setpath(spa_t *spa, uint64_t guid, const char *newpath)
{
return (spa_vdev_set_common(spa, guid, newpath, B_TRUE));
}
int
spa_vdev_setfru(spa_t *spa, uint64_t guid, const char *newfru)
{
return (spa_vdev_set_common(spa, guid, newfru, B_FALSE));
}
/*
* ==========================================================================
* SPA Scanning
* ==========================================================================
*/
int
spa_scrub_pause_resume(spa_t *spa, pool_scrub_cmd_t cmd)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scrub_set_pause_resume(spa->spa_dsl_pool, cmd));
}
int
spa_scan_stop(spa_t *spa)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (dsl_scan_resilvering(spa->spa_dsl_pool))
return (SET_ERROR(EBUSY));
return (dsl_scan_cancel(spa->spa_dsl_pool));
}
int
spa_scan(spa_t *spa, pool_scan_func_t func)
{
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == 0);
if (func >= POOL_SCAN_FUNCS || func == POOL_SCAN_NONE)
return (SET_ERROR(ENOTSUP));
/*
* If a resilver was requested, but there is no DTL on a
* writeable leaf device, we have nothing to do.
*/
if (func == POOL_SCAN_RESILVER &&
!vdev_resilver_needed(spa->spa_root_vdev, NULL, NULL)) {
spa_async_request(spa, SPA_ASYNC_RESILVER_DONE);
return (0);
}
return (dsl_scan(spa->spa_dsl_pool, func));
}
/*
* ==========================================================================
* SPA async task processing
* ==========================================================================
*/
static void
spa_async_remove(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_remove_wanted) {
vd->vdev_remove_wanted = B_FALSE;
vd->vdev_delayed_close = B_FALSE;
vdev_set_state(vd, B_FALSE, VDEV_STATE_REMOVED, VDEV_AUX_NONE);
/*
* We want to clear the stats, but we don't want to do a full
* vdev_clear() as that will cause us to throw away
* degraded/faulted state as well as attempt to reopen the
* device, all of which is a waste.
*/
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
vdev_state_dirty(vd->vdev_top);
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_remove(spa, vd->vdev_child[c]);
}
static void
spa_async_probe(spa_t *spa, vdev_t *vd)
{
if (vd->vdev_probe_wanted) {
vd->vdev_probe_wanted = B_FALSE;
vdev_reopen(vd); /* vdev_open() does the actual probe */
}
for (int c = 0; c < vd->vdev_children; c++)
spa_async_probe(spa, vd->vdev_child[c]);
}
static void
spa_async_autoexpand(spa_t *spa, vdev_t *vd)
{
if (!spa->spa_autoexpand)
return;
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
spa_async_autoexpand(spa, cvd);
}
if (!vd->vdev_ops->vdev_op_leaf || vd->vdev_physpath == NULL)
return;
spa_event_notify(vd->vdev_spa, vd, NULL, ESC_ZFS_VDEV_AUTOEXPAND);
}
static void
spa_async_thread(void *arg)
{
spa_t *spa = (spa_t *)arg;
int tasks;
ASSERT(spa->spa_sync_on);
mutex_enter(&spa->spa_async_lock);
tasks = spa->spa_async_tasks;
spa->spa_async_tasks = 0;
mutex_exit(&spa->spa_async_lock);
/*
* See if the config needs to be updated.
*/
if (tasks & SPA_ASYNC_CONFIG_UPDATE) {
uint64_t old_space, new_space;
mutex_enter(&spa_namespace_lock);
old_space = metaslab_class_get_space(spa_normal_class(spa));
spa_config_update(spa, SPA_CONFIG_UPDATE_POOL);
new_space = metaslab_class_get_space(spa_normal_class(spa));
mutex_exit(&spa_namespace_lock);
/*
* If the pool grew as a result of the config update,
* then log an internal history event.
*/
if (new_space != old_space) {
spa_history_log_internal(spa, "vdev online", NULL,
"pool '%s' size: %llu(+%llu)",
spa_name(spa), new_space, new_space - old_space);
}
}
/*
* See if any devices need to be marked REMOVED.
*/
if (tasks & SPA_ASYNC_REMOVE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_remove(spa, spa->spa_root_vdev);
for (int i = 0; i < spa->spa_l2cache.sav_count; i++)
spa_async_remove(spa, spa->spa_l2cache.sav_vdevs[i]);
for (int i = 0; i < spa->spa_spares.sav_count; i++)
spa_async_remove(spa, spa->spa_spares.sav_vdevs[i]);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
if ((tasks & SPA_ASYNC_AUTOEXPAND) && !spa_suspended(spa)) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa_async_autoexpand(spa, spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
/*
* See if any devices need to be probed.
*/
if (tasks & SPA_ASYNC_PROBE) {
spa_vdev_state_enter(spa, SCL_NONE);
spa_async_probe(spa, spa->spa_root_vdev);
(void) spa_vdev_state_exit(spa, NULL, 0);
}
/*
* If any devices are done replacing, detach them.
*/
if (tasks & SPA_ASYNC_RESILVER_DONE)
spa_vdev_resilver_done(spa);
/*
* Kick off a resilver.
*/
if (tasks & SPA_ASYNC_RESILVER)
dsl_resilver_restart(spa->spa_dsl_pool, 0);
/*
* Let the world know that we're done.
*/
mutex_enter(&spa->spa_async_lock);
spa->spa_async_thread = NULL;
cv_broadcast(&spa->spa_async_cv);
mutex_exit(&spa->spa_async_lock);
thread_exit();
}
void
spa_async_suspend(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
spa->spa_async_suspended++;
- while (spa->spa_async_thread != NULL)
+ while (spa->spa_async_thread != NULL ||
+ spa->spa_condense_thread != NULL)
cv_wait(&spa->spa_async_cv, &spa->spa_async_lock);
mutex_exit(&spa->spa_async_lock);
+
+ spa_vdev_remove_suspend(spa);
}
void
spa_async_resume(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
ASSERT(spa->spa_async_suspended != 0);
spa->spa_async_suspended--;
mutex_exit(&spa->spa_async_lock);
+ spa_restart_removal(spa);
}
static boolean_t
spa_async_tasks_pending(spa_t *spa)
{
uint_t non_config_tasks;
uint_t config_task;
boolean_t config_task_suspended;
non_config_tasks = spa->spa_async_tasks & ~SPA_ASYNC_CONFIG_UPDATE;
config_task = spa->spa_async_tasks & SPA_ASYNC_CONFIG_UPDATE;
if (spa->spa_ccw_fail_time == 0) {
config_task_suspended = B_FALSE;
} else {
config_task_suspended =
(gethrtime() - spa->spa_ccw_fail_time) <
((hrtime_t)zfs_ccw_retry_interval * NANOSEC);
}
return (non_config_tasks || (config_task && !config_task_suspended));
}
static void
spa_async_dispatch(spa_t *spa)
{
mutex_enter(&spa->spa_async_lock);
if (spa_async_tasks_pending(spa) &&
!spa->spa_async_suspended &&
spa->spa_async_thread == NULL &&
rootdir != NULL)
spa->spa_async_thread = thread_create(NULL, 0,
spa_async_thread, spa, 0, &p0, TS_RUN, maxclsyspri);
mutex_exit(&spa->spa_async_lock);
}
void
spa_async_request(spa_t *spa, int task)
{
zfs_dbgmsg("spa=%s async request task=%u", spa->spa_name, task);
mutex_enter(&spa->spa_async_lock);
spa->spa_async_tasks |= task;
mutex_exit(&spa->spa_async_lock);
}
/*
* ==========================================================================
* SPA syncing routines
* ==========================================================================
*/
static int
bpobj_enqueue_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
bpobj_t *bpo = arg;
bpobj_enqueue(bpo, bp, tx);
return (0);
}
static int
spa_free_sync_cb(void *arg, const blkptr_t *bp, dmu_tx_t *tx)
{
zio_t *zio = arg;
zio_nowait(zio_free_sync(zio, zio->io_spa, dmu_tx_get_txg(tx), bp,
zio->io_flags));
return (0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing frees.
*/
static void
spa_sync_frees(spa_t *spa, bplist_t *bpl, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
bplist_iterate(bpl, spa_free_sync_cb, zio, tx);
VERIFY(zio_wait(zio) == 0);
}
/*
* Note: this simple function is not inlined to make it easier to dtrace the
* amount of time spent syncing deferred frees.
*/
static void
spa_sync_deferred_frees(spa_t *spa, dmu_tx_t *tx)
{
zio_t *zio = zio_root(spa, NULL, NULL, 0);
VERIFY3U(bpobj_iterate(&spa->spa_deferred_bpobj,
spa_free_sync_cb, zio, tx), ==, 0);
VERIFY0(zio_wait(zio));
}
static void
spa_sync_nvlist(spa_t *spa, uint64_t obj, nvlist_t *nv, dmu_tx_t *tx)
{
char *packed = NULL;
size_t bufsize;
size_t nvsize = 0;
dmu_buf_t *db;
VERIFY(nvlist_size(nv, &nvsize, NV_ENCODE_XDR) == 0);
/*
* Write full (SPA_CONFIG_BLOCKSIZE) blocks of configuration
* information. This avoids the dmu_buf_will_dirty() path and
* saves us a pre-read to get data we don't actually care about.
*/
bufsize = P2ROUNDUP((uint64_t)nvsize, SPA_CONFIG_BLOCKSIZE);
packed = vmem_alloc(bufsize, KM_SLEEP);
VERIFY(nvlist_pack(nv, &packed, &nvsize, NV_ENCODE_XDR,
KM_SLEEP) == 0);
bzero(packed + nvsize, bufsize - nvsize);
dmu_write(spa->spa_meta_objset, obj, 0, bufsize, packed, tx);
vmem_free(packed, bufsize);
VERIFY(0 == dmu_bonus_hold(spa->spa_meta_objset, obj, FTAG, &db));
dmu_buf_will_dirty(db, tx);
*(uint64_t *)db->db_data = nvsize;
dmu_buf_rele(db, FTAG);
}
static void
spa_sync_aux_dev(spa_t *spa, spa_aux_vdev_t *sav, dmu_tx_t *tx,
const char *config, const char *entry)
{
nvlist_t *nvroot;
nvlist_t **list;
int i;
if (!sav->sav_sync)
return;
/*
* Update the MOS nvlist describing the list of available devices.
* spa_validate_aux() will have already made sure this nvlist is
* valid and the vdevs are labeled appropriately.
*/
if (sav->sav_object == 0) {
sav->sav_object = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_PACKED_NVLIST, 1 << 14, DMU_OT_PACKED_NVLIST_SIZE,
sizeof (uint64_t), tx);
VERIFY(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, entry, sizeof (uint64_t), 1,
&sav->sav_object, tx) == 0);
}
VERIFY(nvlist_alloc(&nvroot, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if (sav->sav_count == 0) {
VERIFY(nvlist_add_nvlist_array(nvroot, config, NULL, 0) == 0);
} else {
list = kmem_alloc(sav->sav_count*sizeof (void *), KM_SLEEP);
for (i = 0; i < sav->sav_count; i++)
list[i] = vdev_config_generate(spa, sav->sav_vdevs[i],
B_FALSE, VDEV_CONFIG_L2CACHE);
VERIFY(nvlist_add_nvlist_array(nvroot, config, list,
sav->sav_count) == 0);
for (i = 0; i < sav->sav_count; i++)
nvlist_free(list[i]);
kmem_free(list, sav->sav_count * sizeof (void *));
}
spa_sync_nvlist(spa, sav->sav_object, nvroot, tx);
nvlist_free(nvroot);
sav->sav_sync = B_FALSE;
}
/*
* Rebuild spa's all-vdev ZAP from the vdev ZAPs indicated in each vdev_t.
* The all-vdev ZAP must be empty.
*/
static void
spa_avz_build(vdev_t *vd, uint64_t avz, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
if (vd->vdev_top_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_top_zap, tx));
}
if (vd->vdev_leaf_zap != 0) {
VERIFY0(zap_add_int(spa->spa_meta_objset, avz,
vd->vdev_leaf_zap, tx));
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
spa_avz_build(vd->vdev_child[i], avz, tx);
}
}
static void
spa_sync_config_object(spa_t *spa, dmu_tx_t *tx)
{
nvlist_t *config;
/*
* If the pool is being imported from a pre-per-vdev-ZAP version of ZFS,
* its config may not be dirty but we still need to build per-vdev ZAPs.
* Similarly, if the pool is being assembled (e.g. after a split), we
* need to rebuild the AVZ although the config may not be dirty.
*/
if (list_is_empty(&spa->spa_config_dirty_list) &&
spa->spa_avz_action == AVZ_ACTION_NONE)
return;
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
ASSERT(spa->spa_avz_action == AVZ_ACTION_NONE ||
spa->spa_avz_action == AVZ_ACTION_INITIALIZE ||
spa->spa_all_vdev_zaps != 0);
if (spa->spa_avz_action == AVZ_ACTION_REBUILD) {
/* Make and build the new AVZ */
uint64_t new_avz = zap_create(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_OT_NONE, 0, tx);
spa_avz_build(spa->spa_root_vdev, new_avz, tx);
/* Diff old AVZ with new one */
zap_cursor_t zc;
zap_attribute_t za;
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t vdzap = za.za_first_integer;
if (zap_lookup_int(spa->spa_meta_objset, new_avz,
vdzap) == ENOENT) {
/*
* ZAP is listed in old AVZ but not in new one;
* destroy it
*/
VERIFY0(zap_destroy(spa->spa_meta_objset, vdzap,
tx));
}
}
zap_cursor_fini(&zc);
/* Destroy the old AVZ */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
/* Replace the old AVZ in the dir obj with the new one */
VERIFY0(zap_update(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP,
sizeof (new_avz), 1, &new_avz, tx));
spa->spa_all_vdev_zaps = new_avz;
} else if (spa->spa_avz_action == AVZ_ACTION_DESTROY) {
zap_cursor_t zc;
zap_attribute_t za;
/* Walk through the AVZ and destroy all listed ZAPs */
for (zap_cursor_init(&zc, spa->spa_meta_objset,
spa->spa_all_vdev_zaps);
zap_cursor_retrieve(&zc, &za) == 0;
zap_cursor_advance(&zc)) {
uint64_t zap = za.za_first_integer;
VERIFY0(zap_destroy(spa->spa_meta_objset, zap, tx));
}
zap_cursor_fini(&zc);
/* Destroy and unlink the AVZ itself */
VERIFY0(zap_destroy(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, tx));
VERIFY0(zap_remove(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_VDEV_ZAP_MAP, tx));
spa->spa_all_vdev_zaps = 0;
}
if (spa->spa_all_vdev_zaps == 0) {
spa->spa_all_vdev_zaps = zap_create_link(spa->spa_meta_objset,
DMU_OTN_ZAP_METADATA, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_VDEV_ZAP_MAP, tx);
}
spa->spa_avz_action = AVZ_ACTION_NONE;
/* Create ZAPs for vdevs that don't have them. */
vdev_construct_zaps(spa->spa_root_vdev, tx);
config = spa_config_generate(spa, spa->spa_root_vdev,
dmu_tx_get_txg(tx), B_FALSE);
/*
* If we're upgrading the spa version then make sure that
* the config object gets updated with the correct version.
*/
if (spa->spa_ubsync.ub_version < spa->spa_uberblock.ub_version)
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION,
spa->spa_uberblock.ub_version);
spa_config_exit(spa, SCL_STATE, FTAG);
nvlist_free(spa->spa_config_syncing);
spa->spa_config_syncing = config;
spa_sync_nvlist(spa, spa->spa_config_object, config, tx);
}
static void
spa_sync_version(void *arg, dmu_tx_t *tx)
{
uint64_t *versionp = arg;
uint64_t version = *versionp;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
/*
* Setting the version is special cased when first creating the pool.
*/
ASSERT(tx->tx_txg != TXG_INITIAL);
ASSERT(SPA_VERSION_IS_SUPPORTED(version));
ASSERT(version >= spa_version(spa));
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx, "version=%lld", version);
}
/*
* Set zpool properties.
*/
static void
spa_sync_props(void *arg, dmu_tx_t *tx)
{
nvlist_t *nvp = arg;
spa_t *spa = dmu_tx_pool(tx)->dp_spa;
objset_t *mos = spa->spa_meta_objset;
nvpair_t *elem = NULL;
mutex_enter(&spa->spa_props_lock);
while ((elem = nvlist_next_nvpair(nvp, elem))) {
uint64_t intval;
char *strval, *fname;
zpool_prop_t prop;
const char *propname;
zprop_type_t proptype;
spa_feature_t fid;
switch (prop = zpool_name_to_prop(nvpair_name(elem))) {
case ZPOOL_PROP_INVAL:
/*
* We checked this earlier in spa_prop_validate().
*/
ASSERT(zpool_prop_feature(nvpair_name(elem)));
fname = strchr(nvpair_name(elem), '@') + 1;
VERIFY0(zfeature_lookup_name(fname, &fid));
spa_feature_enable(spa, fid, tx);
spa_history_log_internal(spa, "set", tx,
"%s=enabled", nvpair_name(elem));
break;
case ZPOOL_PROP_VERSION:
intval = fnvpair_value_uint64(elem);
/*
* The version is synced separately before other
* properties and should be correct by now.
*/
ASSERT3U(spa_version(spa), >=, intval);
break;
case ZPOOL_PROP_ALTROOT:
/*
* 'altroot' is a non-persistent property. It should
* have been set temporarily at creation or import time.
*/
ASSERT(spa->spa_root != NULL);
break;
case ZPOOL_PROP_READONLY:
case ZPOOL_PROP_CACHEFILE:
/*
* 'readonly' and 'cachefile' are also non-persisitent
* properties.
*/
break;
case ZPOOL_PROP_COMMENT:
strval = fnvpair_value_string(elem);
if (spa->spa_comment != NULL)
spa_strfree(spa->spa_comment);
spa->spa_comment = spa_strdup(strval);
/*
* We need to dirty the configuration on all the vdevs
* so that their labels get updated. It's unnecessary
* to do this for pool creation since the vdev's
* configuration has already been dirtied.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
break;
default:
/*
* Set pool property values in the poolprops mos object.
*/
if (spa->spa_pool_props_object == 0) {
spa->spa_pool_props_object =
zap_create_link(mos, DMU_OT_POOL_PROPS,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_PROPS,
tx);
}
/* normalize the property name */
propname = zpool_prop_to_name(prop);
proptype = zpool_prop_get_type(prop);
if (nvpair_type(elem) == DATA_TYPE_STRING) {
ASSERT(proptype == PROP_TYPE_STRING);
strval = fnvpair_value_string(elem);
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
1, strlen(strval) + 1, strval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%s", nvpair_name(elem), strval);
} else if (nvpair_type(elem) == DATA_TYPE_UINT64) {
intval = fnvpair_value_uint64(elem);
if (proptype == PROP_TYPE_INDEX) {
const char *unused;
VERIFY0(zpool_prop_index_to_string(
prop, intval, &unused));
}
VERIFY0(zap_update(mos,
spa->spa_pool_props_object, propname,
8, 1, &intval, tx));
spa_history_log_internal(spa, "set", tx,
"%s=%lld", nvpair_name(elem), intval);
} else {
ASSERT(0); /* not allowed */
}
switch (prop) {
case ZPOOL_PROP_DELEGATION:
spa->spa_delegation = intval;
break;
case ZPOOL_PROP_BOOTFS:
spa->spa_bootfs = intval;
break;
case ZPOOL_PROP_FAILUREMODE:
spa->spa_failmode = intval;
break;
case ZPOOL_PROP_AUTOEXPAND:
spa->spa_autoexpand = intval;
if (tx->tx_txg != TXG_INITIAL)
spa_async_request(spa,
SPA_ASYNC_AUTOEXPAND);
break;
case ZPOOL_PROP_MULTIHOST:
spa->spa_multihost = intval;
break;
case ZPOOL_PROP_DEDUPDITTO:
spa->spa_dedup_ditto = intval;
break;
default:
break;
}
}
}
mutex_exit(&spa->spa_props_lock);
}
/*
* Perform one-time upgrade on-disk changes. spa_version() does not
* reflect the new version this txg, so there must be no changes this
* txg to anything that the upgrade code depends on after it executes.
* Therefore this must be called after dsl_pool_sync() does the sync
* tasks.
*/
static void
spa_sync_upgrades(spa_t *spa, dmu_tx_t *tx)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
ASSERT(spa->spa_sync_pass == 1);
rrw_enter(&dp->dp_config_rwlock, RW_WRITER, FTAG);
if (spa->spa_ubsync.ub_version < SPA_VERSION_ORIGIN &&
spa->spa_uberblock.ub_version >= SPA_VERSION_ORIGIN) {
dsl_pool_create_origin(dp, tx);
/* Keeping the origin open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_NEXT_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_NEXT_CLONES) {
dsl_pool_upgrade_clones(dp, tx);
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_DIR_CLONES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_DIR_CLONES) {
dsl_pool_upgrade_dir_clones(dp, tx);
/* Keeping the freedir open increases spa_minref */
spa->spa_minref += 3;
}
if (spa->spa_ubsync.ub_version < SPA_VERSION_FEATURES &&
spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
spa_feature_create_zap_objects(spa, tx);
}
/*
* LZ4_COMPRESS feature's behaviour was changed to activate_on_enable
* when possibility to use lz4 compression for metadata was added
* Old pools that have this feature enabled must be upgraded to have
* this feature active
*/
if (spa->spa_uberblock.ub_version >= SPA_VERSION_FEATURES) {
boolean_t lz4_en = spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS);
boolean_t lz4_ac = spa_feature_is_active(spa,
SPA_FEATURE_LZ4_COMPRESS);
if (lz4_en && !lz4_ac)
spa_feature_incr(spa, SPA_FEATURE_LZ4_COMPRESS, tx);
}
/*
* If we haven't written the salt, do so now. Note that the
* feature may not be activated yet, but that's fine since
* the presence of this ZAP entry is backwards compatible.
*/
if (zap_contains(spa->spa_meta_objset, DMU_POOL_DIRECTORY_OBJECT,
DMU_POOL_CHECKSUM_SALT) == ENOENT) {
VERIFY0(zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_CHECKSUM_SALT, 1,
sizeof (spa->spa_cksum_salt.zcs_bytes),
spa->spa_cksum_salt.zcs_bytes, tx));
}
rrw_exit(&dp->dp_config_rwlock, FTAG);
}
+static void
+vdev_indirect_state_sync_verify(vdev_t *vd)
+{
+ ASSERTV(vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping);
+ ASSERTV(vdev_indirect_births_t *vib = vd->vdev_indirect_births);
+
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ ASSERT(vim != NULL);
+ ASSERT(vib != NULL);
+ }
+
+ if (vdev_obsolete_sm_object(vd) != 0) {
+ ASSERT(vd->vdev_obsolete_sm != NULL);
+ ASSERT(vd->vdev_removing ||
+ vd->vdev_ops == &vdev_indirect_ops);
+ ASSERT(vdev_indirect_mapping_num_entries(vim) > 0);
+ ASSERT(vdev_indirect_mapping_bytes_mapped(vim) > 0);
+
+ ASSERT3U(vdev_obsolete_sm_object(vd), ==,
+ space_map_object(vd->vdev_obsolete_sm));
+ ASSERT3U(vdev_indirect_mapping_bytes_mapped(vim), >=,
+ space_map_allocated(vd->vdev_obsolete_sm));
+ }
+ ASSERT(vd->vdev_obsolete_segments != NULL);
+
+ /*
+ * Since frees / remaps to an indirect vdev can only
+ * happen in syncing context, the obsolete segments
+ * tree must be empty when we start syncing.
+ */
+ ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
+}
+
/*
* Sync the specified transaction group. New blocks may be dirtied as
* part of the process, so we iterate until it converges.
*/
void
spa_sync(spa_t *spa, uint64_t txg)
{
dsl_pool_t *dp = spa->spa_dsl_pool;
objset_t *mos = spa->spa_meta_objset;
bplist_t *free_bpl = &spa->spa_free_bplist[txg & TXG_MASK];
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd;
dmu_tx_t *tx;
int error;
uint32_t max_queue_depth = zfs_vdev_async_write_max_active *
zfs_vdev_queue_depth_pct / 100;
VERIFY(spa_writeable(spa));
+ /*
+ * Wait for i/os issued in open context that need to complete
+ * before this txg syncs.
+ */
+ VERIFY0(zio_wait(spa->spa_txg_zio[txg & TXG_MASK]));
+ spa->spa_txg_zio[txg & TXG_MASK] = zio_root(spa, NULL, NULL, 0);
+
/*
* Lock out configuration changes.
*/
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_READER);
spa->spa_syncing_txg = txg;
spa->spa_sync_pass = 0;
mutex_enter(&spa->spa_alloc_lock);
VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
mutex_exit(&spa->spa_alloc_lock);
/*
* If there are any pending vdev state changes, convert them
* into config changes that go out with this transaction group.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
while (list_head(&spa->spa_state_dirty_list) != NULL) {
/*
* We need the write lock here because, for aux vdevs,
* calling vdev_config_dirty() modifies sav_config.
* This is ugly and will become unnecessary when we
* eliminate the aux vdev wart by integrating all vdevs
* into the root vdev tree.
*/
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_WRITER);
while ((vd = list_head(&spa->spa_state_dirty_list)) != NULL) {
vdev_state_clean(vd);
vdev_config_dirty(vd);
}
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
spa_config_exit(spa, SCL_STATE, FTAG);
tx = dmu_tx_create_assigned(dp, txg);
spa->spa_sync_starttime = gethrtime();
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
NSEC_TO_TICK(spa->spa_deadman_synctime));
/*
* If we are upgrading to SPA_VERSION_RAIDZ_DEFLATE this txg,
* set spa_deflate if we have no raid-z vdevs.
*/
if (spa->spa_ubsync.ub_version < SPA_VERSION_RAIDZ_DEFLATE &&
spa->spa_uberblock.ub_version >= SPA_VERSION_RAIDZ_DEFLATE) {
int i;
for (i = 0; i < rvd->vdev_children; i++) {
vd = rvd->vdev_child[i];
if (vd->vdev_deflate_ratio != SPA_MINBLOCKSIZE)
break;
}
if (i == rvd->vdev_children) {
spa->spa_deflate = TRUE;
VERIFY(0 == zap_add(spa->spa_meta_objset,
DMU_POOL_DIRECTORY_OBJECT, DMU_POOL_DEFLATE,
sizeof (uint64_t), 1, &spa->spa_deflate, tx));
}
}
/*
* Set the top-level vdev's max queue depth. Evaluate each
* top-level's async write queue depth in case it changed.
* The max queue depth will not change in the middle of syncing
* out this txg.
*/
uint64_t queue_depth_total = 0;
for (int c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
metaslab_group_t *mg = tvd->vdev_mg;
if (mg == NULL || mg->mg_class != spa_normal_class(spa) ||
!metaslab_group_initialized(mg))
continue;
/*
* It is safe to do a lock-free check here because only async
* allocations look at mg_max_alloc_queue_depth, and async
* allocations all happen from spa_sync().
*/
ASSERT0(refcount_count(&mg->mg_alloc_queue_depth));
mg->mg_max_alloc_queue_depth = max_queue_depth;
queue_depth_total += mg->mg_max_alloc_queue_depth;
}
metaslab_class_t *mc = spa_normal_class(spa);
ASSERT0(refcount_count(&mc->mc_alloc_slots));
mc->mc_alloc_max_slots = queue_depth_total;
mc->mc_alloc_throttle_enabled = zio_dva_throttle_enabled;
ASSERT3U(mc->mc_alloc_max_slots, <=,
max_queue_depth * rvd->vdev_children);
+ for (int c = 0; c < rvd->vdev_children; c++) {
+ vdev_t *vd = rvd->vdev_child[c];
+ vdev_indirect_state_sync_verify(vd);
+
+ if (vdev_indirect_should_condense(vd)) {
+ spa_condense_indirect_start_sync(vd, tx);
+ break;
+ }
+ }
+
/*
* Iterate to convergence.
*/
do {
int pass = ++spa->spa_sync_pass;
spa_sync_config_object(spa, tx);
spa_sync_aux_dev(spa, &spa->spa_spares, tx,
ZPOOL_CONFIG_SPARES, DMU_POOL_SPARES);
spa_sync_aux_dev(spa, &spa->spa_l2cache, tx,
ZPOOL_CONFIG_L2CACHE, DMU_POOL_L2CACHE);
spa_errlog_sync(spa, txg);
dsl_pool_sync(dp, txg);
if (pass < zfs_sync_pass_deferred_free) {
spa_sync_frees(spa, free_bpl, tx);
} else {
/*
* We can not defer frees in pass 1, because
* we sync the deferred frees later in pass 1.
*/
ASSERT3U(pass, >, 1);
bplist_iterate(free_bpl, bpobj_enqueue_cb,
&spa->spa_deferred_bpobj, tx);
}
ddt_sync(spa, txg);
dsl_scan_sync(dp, tx);
- while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg)))
+ if (spa->spa_vdev_removal != NULL)
+ svr_sync(spa, tx);
+
+ while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, txg))
+ != NULL)
vdev_sync(vd, txg);
if (pass == 1) {
spa_sync_upgrades(spa, tx);
ASSERT3U(txg, >=,
spa->spa_uberblock.ub_rootbp.blk_birth);
/*
* Note: We need to check if the MOS is dirty
* because we could have marked the MOS dirty
* without updating the uberblock (e.g. if we
* have sync tasks but no dirty user data). We
* need to check the uberblock's rootbp because
* it is updated if we have synced out dirty
* data (though in this case the MOS will most
* likely also be dirty due to second order
* effects, we don't want to rely on that here).
*/
if (spa->spa_uberblock.ub_rootbp.blk_birth < txg &&
!dmu_objset_is_dirty(mos, txg)) {
/*
* Nothing changed on the first pass,
* therefore this TXG is a no-op. Avoid
* syncing deferred frees, so that we
* can keep this TXG as a no-op.
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets,
txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&dp->dp_sync_tasks, txg));
break;
}
spa_sync_deferred_frees(spa, tx);
}
} while (dmu_objset_is_dirty(mos, txg));
#ifdef ZFS_DEBUG
if (!list_is_empty(&spa->spa_config_dirty_list)) {
/*
* Make sure that the number of ZAPs for all the vdevs matches
* the number of ZAPs in the per-vdev ZAP list. This only gets
* called if the config is dirty; otherwise there may be
* outstanding AVZ operations that weren't completed in
* spa_sync_config_object.
*/
uint64_t all_vdev_zap_entry_count;
ASSERT0(zap_count(spa->spa_meta_objset,
spa->spa_all_vdev_zaps, &all_vdev_zap_entry_count));
ASSERT3U(vdev_count_verify_zaps(spa->spa_root_vdev), ==,
all_vdev_zap_entry_count);
}
#endif
+ if (spa->spa_vdev_removal != NULL) {
+ ASSERT0(spa->spa_vdev_removal->svr_bytes_done[txg & TXG_MASK]);
+ }
+
/*
* Rewrite the vdev configuration (which includes the uberblock)
* to commit the transaction group.
*
* If there are no dirty vdevs, we sync the uberblock to a few
* random top-level vdevs that are known to be visible in the
* config cache (see spa_vdev_add() for a complete description).
* If there *are* dirty vdevs, sync the uberblock to all vdevs.
*/
for (;;) {
/*
* We hold SCL_STATE to prevent vdev open/close/etc.
* while we're attempting to write the vdev labels.
*/
spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
if (list_is_empty(&spa->spa_config_dirty_list)) {
vdev_t *svd[SPA_DVAS_PER_BP];
int svdcount = 0;
int children = rvd->vdev_children;
int c0 = spa_get_random(children);
for (int c = 0; c < children; c++) {
vd = rvd->vdev_child[(c0 + c) % children];
- if (vd->vdev_ms_array == 0 || vd->vdev_islog)
+ if (vd->vdev_ms_array == 0 || vd->vdev_islog ||
+ !vdev_is_concrete(vd))
continue;
svd[svdcount++] = vd;
if (svdcount == SPA_DVAS_PER_BP)
break;
}
error = vdev_config_sync(svd, svdcount, txg);
} else {
error = vdev_config_sync(rvd->vdev_child,
rvd->vdev_children, txg);
}
if (error == 0)
spa->spa_last_synced_guid = rvd->vdev_guid;
spa_config_exit(spa, SCL_STATE, FTAG);
if (error == 0)
break;
zio_suspend(spa, NULL, ZIO_SUSPEND_IOERR);
zio_resume_wait(spa);
}
dmu_tx_commit(tx);
taskq_cancel_id(system_delay_taskq, spa->spa_deadman_tqid);
spa->spa_deadman_tqid = 0;
/*
* Clear the dirty config list.
*/
while ((vd = list_head(&spa->spa_config_dirty_list)) != NULL)
vdev_config_clean(vd);
/*
* Now that the new config has synced transactionally,
* let it become visible to the config cache.
*/
if (spa->spa_config_syncing != NULL) {
spa_config_set(spa, spa->spa_config_syncing);
spa->spa_config_txg = txg;
spa->spa_config_syncing = NULL;
}
dsl_pool_sync_done(dp, txg);
mutex_enter(&spa->spa_alloc_lock);
VERIFY0(avl_numnodes(&spa->spa_alloc_tree));
mutex_exit(&spa->spa_alloc_lock);
/*
* Update usable space statistics.
*/
while ((vd = txg_list_remove(&spa->spa_vdev_txg_list, TXG_CLEAN(txg))))
vdev_sync_done(vd, txg);
spa_update_dspace(spa);
/*
* It had better be the case that we didn't dirty anything
* since vdev_config_sync().
*/
ASSERT(txg_list_empty(&dp->dp_dirty_datasets, txg));
ASSERT(txg_list_empty(&dp->dp_dirty_dirs, txg));
ASSERT(txg_list_empty(&spa->spa_vdev_txg_list, txg));
spa->spa_sync_pass = 0;
/*
* Update the last synced uberblock here. We want to do this at
* the end of spa_sync() so that consumers of spa_last_synced_txg()
* will be guaranteed that all the processing associated with
* that txg has been completed.
*/
spa->spa_ubsync = spa->spa_uberblock;
spa_config_exit(spa, SCL_CONFIG, FTAG);
spa_handle_ignored_writes(spa);
/*
* If any async tasks have been requested, kick them off.
*/
spa_async_dispatch(spa);
}
/*
* Sync all pools. We don't want to hold the namespace lock across these
* operations, so we take a reference on the spa_t and drop the lock during the
* sync.
*/
void
spa_sync_allpools(void)
{
spa_t *spa = NULL;
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
txg_wait_synced(spa_get_dsl(spa), 0);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
}
/*
* ==========================================================================
* Miscellaneous routines
* ==========================================================================
*/
/*
* Remove all pools in the system.
*/
void
spa_evict_all(void)
{
spa_t *spa;
/*
* Remove all cached state. All pools should be closed now,
* so every spa in the AVL tree should be unreferenced.
*/
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(NULL)) != NULL) {
/*
* Stop async tasks. The async thread may need to detach
* a device that's been replaced, which requires grabbing
* spa_namespace_lock, so we must drop it here.
*/
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
spa_async_suspend(spa);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
if (spa->spa_state != POOL_STATE_UNINITIALIZED) {
spa_unload(spa);
spa_deactivate(spa);
}
spa_remove(spa);
}
mutex_exit(&spa_namespace_lock);
}
vdev_t *
spa_lookup_by_guid(spa_t *spa, uint64_t guid, boolean_t aux)
{
vdev_t *vd;
int i;
if ((vd = vdev_lookup_by_guid(spa->spa_root_vdev, guid)) != NULL)
return (vd);
if (aux) {
for (i = 0; i < spa->spa_l2cache.sav_count; i++) {
vd = spa->spa_l2cache.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
for (i = 0; i < spa->spa_spares.sav_count; i++) {
vd = spa->spa_spares.sav_vdevs[i];
if (vd->vdev_guid == guid)
return (vd);
}
}
return (NULL);
}
void
spa_upgrade(spa_t *spa, uint64_t version)
{
ASSERT(spa_writeable(spa));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
/*
* This should only be called for a non-faulted pool, and since a
* future version would result in an unopenable pool, this shouldn't be
* possible.
*/
ASSERT(SPA_VERSION_IS_SUPPORTED(spa->spa_uberblock.ub_version));
ASSERT3U(version, >=, spa->spa_uberblock.ub_version);
spa->spa_uberblock.ub_version = version;
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa_get_dsl(spa), 0);
}
boolean_t
spa_has_spare(spa_t *spa, uint64_t guid)
{
int i;
uint64_t spareguid;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++)
if (sav->sav_vdevs[i]->vdev_guid == guid)
return (B_TRUE);
for (i = 0; i < sav->sav_npending; i++) {
if (nvlist_lookup_uint64(sav->sav_pending[i], ZPOOL_CONFIG_GUID,
&spareguid) == 0 && spareguid == guid)
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Check if a pool has an active shared spare device.
* Note: reference count of an active spare is 2, as a spare and as a replace
*/
static boolean_t
spa_has_active_shared_spare(spa_t *spa)
{
int i, refcnt;
uint64_t pool;
spa_aux_vdev_t *sav = &spa->spa_spares;
for (i = 0; i < sav->sav_count; i++) {
if (spa_spare_exists(sav->sav_vdevs[i]->vdev_guid, &pool,
&refcnt) && pool != 0ULL && pool == spa_guid(spa) &&
refcnt > 2)
return (B_TRUE);
}
return (B_FALSE);
}
-static sysevent_t *
+sysevent_t *
spa_event_create(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
sysevent_t *ev = NULL;
#ifdef _KERNEL
nvlist_t *resource;
resource = zfs_event_create(spa, vd, FM_SYSEVENT_CLASS, name, hist_nvl);
if (resource) {
ev = kmem_alloc(sizeof (sysevent_t), KM_SLEEP);
ev->resource = resource;
}
#endif
return (ev);
}
-static void
+void
spa_event_post(sysevent_t *ev)
{
#ifdef _KERNEL
if (ev) {
zfs_zevent_post(ev->resource, NULL, zfs_zevent_post_cb);
kmem_free(ev, sizeof (*ev));
}
#endif
}
/*
* Post a zevent corresponding to the given sysevent. The 'name' must be one
* of the event definitions in sys/sysevent/eventdefs.h. The payload will be
* filled in from the spa and (optionally) the vdev. This doesn't do anything
* in the userland libzpool, as we don't want consumers to misinterpret ztest
* or zdb as real changes.
*/
void
spa_event_notify(spa_t *spa, vdev_t *vd, nvlist_t *hist_nvl, const char *name)
{
spa_event_post(spa_event_create(spa, vd, hist_nvl, name));
}
#if defined(_KERNEL) && defined(HAVE_SPL)
/* state manipulation functions */
EXPORT_SYMBOL(spa_open);
EXPORT_SYMBOL(spa_open_rewind);
EXPORT_SYMBOL(spa_get_stats);
EXPORT_SYMBOL(spa_create);
EXPORT_SYMBOL(spa_import);
EXPORT_SYMBOL(spa_tryimport);
EXPORT_SYMBOL(spa_destroy);
EXPORT_SYMBOL(spa_export);
EXPORT_SYMBOL(spa_reset);
EXPORT_SYMBOL(spa_async_request);
EXPORT_SYMBOL(spa_async_suspend);
EXPORT_SYMBOL(spa_async_resume);
EXPORT_SYMBOL(spa_inject_addref);
EXPORT_SYMBOL(spa_inject_delref);
EXPORT_SYMBOL(spa_scan_stat_init);
EXPORT_SYMBOL(spa_scan_get_stats);
/* device maniion */
EXPORT_SYMBOL(spa_vdev_add);
EXPORT_SYMBOL(spa_vdev_attach);
EXPORT_SYMBOL(spa_vdev_detach);
-EXPORT_SYMBOL(spa_vdev_remove);
EXPORT_SYMBOL(spa_vdev_setpath);
EXPORT_SYMBOL(spa_vdev_setfru);
EXPORT_SYMBOL(spa_vdev_split_mirror);
/* spare statech is global across all pools) */
EXPORT_SYMBOL(spa_spare_add);
EXPORT_SYMBOL(spa_spare_remove);
EXPORT_SYMBOL(spa_spare_exists);
EXPORT_SYMBOL(spa_spare_activate);
/* L2ARC statech is global across all pools) */
EXPORT_SYMBOL(spa_l2cache_add);
EXPORT_SYMBOL(spa_l2cache_remove);
EXPORT_SYMBOL(spa_l2cache_exists);
EXPORT_SYMBOL(spa_l2cache_activate);
EXPORT_SYMBOL(spa_l2cache_drop);
/* scanning */
EXPORT_SYMBOL(spa_scan);
EXPORT_SYMBOL(spa_scan_stop);
/* spa syncing */
EXPORT_SYMBOL(spa_sync); /* only for DMU use */
EXPORT_SYMBOL(spa_sync_allpools);
/* properties */
EXPORT_SYMBOL(spa_prop_set);
EXPORT_SYMBOL(spa_prop_get);
EXPORT_SYMBOL(spa_prop_clear_bootfs);
/* asynchronous event notification */
EXPORT_SYMBOL(spa_event_notify);
#endif
#if defined(_KERNEL) && defined(HAVE_SPL)
module_param(spa_load_verify_maxinflight, int, 0644);
MODULE_PARM_DESC(spa_load_verify_maxinflight,
"Max concurrent traversal I/Os while verifying pool during import -X");
module_param(spa_load_verify_metadata, int, 0644);
MODULE_PARM_DESC(spa_load_verify_metadata,
"Set to traverse metadata on pool import");
module_param(spa_load_verify_data, int, 0644);
MODULE_PARM_DESC(spa_load_verify_data,
"Set to traverse data on pool import");
/* CSTYLED */
module_param(zio_taskq_batch_pct, uint, 0444);
MODULE_PARM_DESC(zio_taskq_batch_pct,
"Percentage of CPUs to run an IO worker thread");
#endif
diff --git a/module/zfs/spa_config.c b/module/zfs/spa_config.c
index ec9661b86ebc..4e9fd6c575ff 100644
--- a/module/zfs/spa_config.c
+++ b/module/zfs/spa_config.c
@@ -1,614 +1,615 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright 2011 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright 2017 Joyent, Inc.
*/
#include <sys/spa.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa_impl.h>
#include <sys/nvpair.h>
#include <sys/uio.h>
#include <sys/fs/zfs.h>
#include <sys/vdev_impl.h>
#include <sys/zfs_ioctl.h>
#include <sys/systeminfo.h>
#include <sys/sunddi.h>
#include <sys/zfeature.h>
#ifdef _KERNEL
#include <sys/kobj.h>
#include <sys/zone.h>
#endif
/*
* Pool configuration repository.
*
* Pool configuration is stored as a packed nvlist on the filesystem. By
* default, all pools are stored in /etc/zfs/zpool.cache and loaded on boot
* (when the ZFS module is loaded). Pools can also have the 'cachefile'
* property set that allows them to be stored in an alternate location until
* the control of external software.
*
* For each cache file, we have a single nvlist which holds all the
* configuration information. When the module loads, we read this information
* from /etc/zfs/zpool.cache and populate the SPA namespace. This namespace is
* maintained independently in spa.c. Whenever the namespace is modified, or
- * the configuration of a pool is changed, we call spa_config_sync(), which
+ * the configuration of a pool is changed, we call spa_write_cachefile(), which
* walks through all the active pools and writes the configuration to disk.
*/
static uint64_t spa_config_generation = 1;
/*
* This can be overridden in userland to preserve an alternate namespace for
* userland pools when doing testing.
*/
char *spa_config_path = ZPOOL_CACHE;
int zfs_autoimport_disable = 1;
/*
* Called when the module is first loaded, this routine loads the configuration
* file into the SPA namespace. It does not actually open or load the pools; it
* only populates the namespace.
*/
void
spa_config_load(void)
{
void *buf = NULL;
nvlist_t *nvlist, *child;
nvpair_t *nvpair;
char *pathname;
struct _buf *file;
uint64_t fsize;
#ifdef _KERNEL
if (zfs_autoimport_disable)
return;
#endif
/*
* Open the configuration file.
*/
pathname = kmem_alloc(MAXPATHLEN, KM_SLEEP);
(void) snprintf(pathname, MAXPATHLEN, "%s%s",
(rootdir != NULL) ? "./" : "", spa_config_path);
file = kobj_open_file(pathname);
kmem_free(pathname, MAXPATHLEN);
if (file == (struct _buf *)-1)
return;
if (kobj_get_filesize(file, &fsize) != 0)
goto out;
buf = kmem_alloc(fsize, KM_SLEEP);
/*
* Read the nvlist from the file.
*/
if (kobj_read_file(file, buf, fsize, 0) < 0)
goto out;
/*
* Unpack the nvlist.
*/
if (nvlist_unpack(buf, fsize, &nvlist, KM_SLEEP) != 0)
goto out;
/*
* Iterate over all elements in the nvlist, creating a new spa_t for
* each one with the specified configuration.
*/
mutex_enter(&spa_namespace_lock);
nvpair = NULL;
while ((nvpair = nvlist_next_nvpair(nvlist, nvpair)) != NULL) {
if (nvpair_type(nvpair) != DATA_TYPE_NVLIST)
continue;
child = fnvpair_value_nvlist(nvpair);
if (spa_lookup(nvpair_name(nvpair)) != NULL)
continue;
(void) spa_add(nvpair_name(nvpair), child, NULL);
}
mutex_exit(&spa_namespace_lock);
nvlist_free(nvlist);
out:
if (buf != NULL)
kmem_free(buf, fsize);
kobj_close_file(file);
}
static int
spa_config_remove(spa_config_dirent_t *dp)
{
#if defined(__linux__) && defined(_KERNEL)
int error, flags = FWRITE | FTRUNC;
uio_seg_t seg = UIO_SYSSPACE;
vnode_t *vp;
error = vn_open(dp->scd_path, seg, flags, 0644, &vp, 0, 0);
if (error == 0) {
(void) VOP_FSYNC(vp, FSYNC, kcred, NULL);
(void) VOP_CLOSE(vp, 0, 1, 0, kcred, NULL);
}
return (error);
#else
return (vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE));
#endif
}
static int
spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl)
{
size_t buflen;
char *buf;
vnode_t *vp;
int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX;
char *temp;
int err;
/*
* If the nvlist is empty (NULL), then remove the old cachefile.
*/
if (nvl == NULL) {
err = spa_config_remove(dp);
if (err == ENOENT)
err = 0;
return (err);
}
/*
* Pack the configuration into a buffer.
*/
buf = fnvlist_pack(nvl, &buflen);
temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
#if defined(__linux__) && defined(_KERNEL)
/*
* Write the configuration to disk. Due to the complexity involved
* in performing a rename and remove from within the kernel the file
* is instead truncated and overwritten in place. This way we always
* have a consistent view of the data or a zero length file.
*/
err = vn_open(dp->scd_path, UIO_SYSSPACE, oflags, 0644, &vp, 0, 0);
if (err == 0) {
err = vn_rdwr(UIO_WRITE, vp, buf, buflen, 0,
UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, NULL);
if (err == 0)
err = VOP_FSYNC(vp, FSYNC, kcred, NULL);
(void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL);
if (err)
(void) spa_config_remove(dp);
}
#else
/*
* Write the configuration to disk. We need to do the traditional
* 'write to temporary file, sync, move over original' to make sure we
* always have a consistent view of the data.
*/
(void) snprintf(temp, MAXPATHLEN, "%s.tmp", dp->scd_path);
err = vn_open(temp, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0);
if (err == 0) {
err = vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE,
0, RLIM64_INFINITY, kcred, NULL);
if (err == 0)
err = VOP_FSYNC(vp, FSYNC, kcred, NULL);
if (err == 0)
err = vn_rename(temp, dp->scd_path, UIO_SYSSPACE);
(void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL);
}
(void) vn_remove(temp, UIO_SYSSPACE, RMFILE);
#endif
fnvlist_pack_free(buf, buflen);
kmem_free(temp, MAXPATHLEN);
return (err);
}
/*
* Synchronize pool configuration to disk. This must be called with the
* namespace lock held. Synchronizing the pool cache is typically done after
* the configuration has been synced to the MOS. This exposes a window where
* the MOS config will have been updated but the cache file has not. If
* the system were to crash at that instant then the cached config may not
* contain the correct information to open the pool and an explicit import
* would be required.
*/
void
-spa_config_sync(spa_t *target, boolean_t removing, boolean_t postsysevent)
+spa_write_cachefile(spa_t *target, boolean_t removing, boolean_t postsysevent)
{
spa_config_dirent_t *dp, *tdp;
nvlist_t *nvl;
char *pool_name;
boolean_t ccw_failure;
int error = 0;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (rootdir == NULL || !(spa_mode_global & FWRITE))
return;
/*
* Iterate over all cachefiles for the pool, past or present. When the
* cachefile is changed, the new one is pushed onto this list, allowing
* us to update previous cachefiles that no longer contain this pool.
*/
ccw_failure = B_FALSE;
for (dp = list_head(&target->spa_config_list); dp != NULL;
dp = list_next(&target->spa_config_list, dp)) {
spa_t *spa = NULL;
if (dp->scd_path == NULL)
continue;
/*
* Iterate over all pools, adding any matching pools to 'nvl'.
*/
nvl = NULL;
while ((spa = spa_next(spa)) != NULL) {
/*
* Skip over our own pool if we're about to remove
* ourselves from the spa namespace or any pool that
* is readonly. Since we cannot guarantee that a
* readonly pool would successfully import upon reboot,
* we don't allow them to be written to the cache file.
*/
if ((spa == target && removing) ||
!spa_writeable(spa))
continue;
mutex_enter(&spa->spa_props_lock);
tdp = list_head(&spa->spa_config_list);
if (spa->spa_config == NULL ||
tdp == NULL ||
tdp->scd_path == NULL ||
strcmp(tdp->scd_path, dp->scd_path) != 0) {
mutex_exit(&spa->spa_props_lock);
continue;
}
if (nvl == NULL)
nvl = fnvlist_alloc();
if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME)
pool_name = fnvlist_lookup_string(
spa->spa_config, ZPOOL_CONFIG_POOL_NAME);
else
pool_name = spa_name(spa);
fnvlist_add_nvlist(nvl, pool_name, spa->spa_config);
mutex_exit(&spa->spa_props_lock);
}
error = spa_config_write(dp, nvl);
if (error != 0)
ccw_failure = B_TRUE;
nvlist_free(nvl);
}
if (ccw_failure) {
/*
* Keep trying so that configuration data is
* written if/when any temporary filesystem
* resource issues are resolved.
*/
if (target->spa_ccw_fail_time == 0) {
zfs_ereport_post(FM_EREPORT_ZFS_CONFIG_CACHE_WRITE,
target, NULL, NULL, NULL, 0, 0);
}
target->spa_ccw_fail_time = gethrtime();
spa_async_request(target, SPA_ASYNC_CONFIG_UPDATE);
} else {
/*
* Do not rate limit future attempts to update
* the config cache.
*/
target->spa_ccw_fail_time = 0;
}
/*
* Remove any config entries older than the current one.
*/
dp = list_head(&target->spa_config_list);
while ((tdp = list_next(&target->spa_config_list, dp)) != NULL) {
list_remove(&target->spa_config_list, tdp);
if (tdp->scd_path != NULL)
spa_strfree(tdp->scd_path);
kmem_free(tdp, sizeof (spa_config_dirent_t));
}
spa_config_generation++;
if (postsysevent)
spa_event_notify(target, NULL, NULL, ESC_ZFS_CONFIG_SYNC);
}
/*
* Sigh. Inside a local zone, we don't have access to /etc/zfs/zpool.cache,
* and we don't want to allow the local zone to see all the pools anyway.
* So we have to invent the ZFS_IOC_CONFIG ioctl to grab the configuration
* information for all pool visible within the zone.
*/
nvlist_t *
spa_all_configs(uint64_t *generation)
{
nvlist_t *pools;
spa_t *spa = NULL;
if (*generation == spa_config_generation)
return (NULL);
pools = fnvlist_alloc();
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (INGLOBALZONE(curproc) ||
zone_dataset_visible(spa_name(spa), NULL)) {
mutex_enter(&spa->spa_props_lock);
fnvlist_add_nvlist(pools, spa_name(spa),
spa->spa_config);
mutex_exit(&spa->spa_props_lock);
}
}
*generation = spa_config_generation;
mutex_exit(&spa_namespace_lock);
return (pools);
}
void
spa_config_set(spa_t *spa, nvlist_t *config)
{
mutex_enter(&spa->spa_props_lock);
nvlist_free(spa->spa_config);
spa->spa_config = config;
mutex_exit(&spa->spa_props_lock);
}
/*
* Generate the pool's configuration based on the current in-core state.
*
* We infer whether to generate a complete config or just one top-level config
* based on whether vd is the root vdev.
*/
nvlist_t *
spa_config_generate(spa_t *spa, vdev_t *vd, uint64_t txg, int getstats)
{
nvlist_t *config, *nvroot;
vdev_t *rvd = spa->spa_root_vdev;
unsigned long hostid = 0;
boolean_t locked = B_FALSE;
uint64_t split_guid;
char *pool_name;
if (vd == NULL) {
vd = rvd;
locked = B_TRUE;
spa_config_enter(spa, SCL_CONFIG | SCL_STATE, FTAG, RW_READER);
}
ASSERT(spa_config_held(spa, SCL_CONFIG | SCL_STATE, RW_READER) ==
(SCL_CONFIG | SCL_STATE));
/*
* If txg is -1, report the current value of spa->spa_config_txg.
*/
if (txg == -1ULL)
txg = spa->spa_config_txg;
/*
* Originally, users had to handle spa namespace collisions by either
* exporting the already imported pool or by specifying a new name for
* the pool with a conflicting name. In the case of root pools from
* virtual guests, neither approach to collision resolution is
* reasonable. This is addressed by extending the new name syntax with
* an option to specify that the new name is temporary. When specified,
* ZFS_IMPORT_TEMP_NAME will be set in spa->spa_import_flags to tell us
* to use the previous name, which we do below.
*/
if (spa->spa_import_flags & ZFS_IMPORT_TEMP_NAME) {
VERIFY0(nvlist_lookup_string(spa->spa_config,
ZPOOL_CONFIG_POOL_NAME, &pool_name));
} else
pool_name = spa_name(spa);
config = fnvlist_alloc();
fnvlist_add_uint64(config, ZPOOL_CONFIG_VERSION, spa_version(spa));
fnvlist_add_string(config, ZPOOL_CONFIG_POOL_NAME, pool_name);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_STATE, spa_state(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_TXG, txg);
fnvlist_add_uint64(config, ZPOOL_CONFIG_POOL_GUID, spa_guid(spa));
fnvlist_add_uint64(config, ZPOOL_CONFIG_ERRATA, spa->spa_errata);
if (spa->spa_comment != NULL)
fnvlist_add_string(config, ZPOOL_CONFIG_COMMENT,
spa->spa_comment);
hostid = spa_get_hostid();
if (hostid != 0)
fnvlist_add_uint64(config, ZPOOL_CONFIG_HOSTID, hostid);
fnvlist_add_string(config, ZPOOL_CONFIG_HOSTNAME, utsname()->nodename);
int config_gen_flags = 0;
if (vd != rvd) {
fnvlist_add_uint64(config, ZPOOL_CONFIG_TOP_GUID,
vd->vdev_top->vdev_guid);
fnvlist_add_uint64(config, ZPOOL_CONFIG_GUID,
vd->vdev_guid);
if (vd->vdev_isspare)
fnvlist_add_uint64(config,
ZPOOL_CONFIG_IS_SPARE, 1ULL);
if (vd->vdev_islog)
fnvlist_add_uint64(config,
ZPOOL_CONFIG_IS_LOG, 1ULL);
vd = vd->vdev_top; /* label contains top config */
} else {
/*
* Only add the (potentially large) split information
* in the mos config, and not in the vdev labels
*/
if (spa->spa_config_splitting != NULL)
fnvlist_add_nvlist(config, ZPOOL_CONFIG_SPLIT,
spa->spa_config_splitting);
fnvlist_add_boolean(config, ZPOOL_CONFIG_HAS_PER_VDEV_ZAPS);
config_gen_flags |= VDEV_CONFIG_MOS;
}
/*
* Add the top-level config. We even add this on pools which
* don't support holes in the namespace.
*/
vdev_top_config_generate(spa, config);
/*
* If we're splitting, record the original pool's guid.
*/
if (spa->spa_config_splitting != NULL &&
nvlist_lookup_uint64(spa->spa_config_splitting,
ZPOOL_CONFIG_SPLIT_GUID, &split_guid) == 0) {
fnvlist_add_uint64(config, ZPOOL_CONFIG_SPLIT_GUID, split_guid);
}
nvroot = vdev_config_generate(spa, vd, getstats, config_gen_flags);
fnvlist_add_nvlist(config, ZPOOL_CONFIG_VDEV_TREE, nvroot);
nvlist_free(nvroot);
/*
* Store what's necessary for reading the MOS in the label.
*/
fnvlist_add_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
spa->spa_label_features);
if (getstats && spa_load_state(spa) == SPA_LOAD_NONE) {
ddt_histogram_t *ddh;
ddt_stat_t *dds;
ddt_object_t *ddo;
ddh = kmem_zalloc(sizeof (ddt_histogram_t), KM_SLEEP);
ddt_get_dedup_histogram(spa, ddh);
fnvlist_add_uint64_array(config,
ZPOOL_CONFIG_DDT_HISTOGRAM,
(uint64_t *)ddh, sizeof (*ddh) / sizeof (uint64_t));
kmem_free(ddh, sizeof (ddt_histogram_t));
ddo = kmem_zalloc(sizeof (ddt_object_t), KM_SLEEP);
ddt_get_dedup_object_stats(spa, ddo);
fnvlist_add_uint64_array(config,
ZPOOL_CONFIG_DDT_OBJ_STATS,
(uint64_t *)ddo, sizeof (*ddo) / sizeof (uint64_t));
kmem_free(ddo, sizeof (ddt_object_t));
dds = kmem_zalloc(sizeof (ddt_stat_t), KM_SLEEP);
ddt_get_dedup_stats(spa, dds);
fnvlist_add_uint64_array(config,
ZPOOL_CONFIG_DDT_STATS,
(uint64_t *)dds, sizeof (*dds) / sizeof (uint64_t));
kmem_free(dds, sizeof (ddt_stat_t));
}
if (locked)
spa_config_exit(spa, SCL_CONFIG | SCL_STATE, FTAG);
return (config);
}
/*
* Update all disk labels, generate a fresh config based on the current
* in-core state, and sync the global config cache (do not sync the config
* cache if this is a booting rootpool).
*/
void
spa_config_update(spa_t *spa, int what)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t txg;
int c;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
txg = spa_last_synced_txg(spa) + 1;
if (what == SPA_CONFIG_UPDATE_POOL) {
vdev_config_dirty(rvd);
} else {
/*
* If we have top-level vdevs that were added but have
* not yet been prepared for allocation, do that now.
* (It's safe now because the config cache is up to date,
* so it will be able to translate the new DVAs.)
* See comments in spa_vdev_add() for full details.
*/
for (c = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
if (tvd->vdev_ms_array == 0)
vdev_metaslab_set_size(tvd);
vdev_expand(tvd, txg);
}
}
spa_config_exit(spa, SCL_ALL, FTAG);
/*
* Wait for the mosconfig to be regenerated and synced.
*/
txg_wait_synced(spa->spa_dsl_pool, txg);
/*
* Update the global config cache to reflect the new mosconfig.
*/
- if (!spa->spa_is_root)
- spa_config_sync(spa, B_FALSE, what != SPA_CONFIG_UPDATE_POOL);
+ if (!spa->spa_is_root) {
+ spa_write_cachefile(spa, B_FALSE,
+ what != SPA_CONFIG_UPDATE_POOL);
+ }
if (what == SPA_CONFIG_UPDATE_POOL)
spa_config_update(spa, SPA_CONFIG_UPDATE_VDEVS);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
-EXPORT_SYMBOL(spa_config_sync);
EXPORT_SYMBOL(spa_config_load);
EXPORT_SYMBOL(spa_all_configs);
EXPORT_SYMBOL(spa_config_set);
EXPORT_SYMBOL(spa_config_generate);
EXPORT_SYMBOL(spa_config_update);
module_param(spa_config_path, charp, 0444);
MODULE_PARM_DESC(spa_config_path, "SPA config file (/etc/zfs/zpool.cache)");
module_param(zfs_autoimport_disable, int, 0644);
MODULE_PARM_DESC(zfs_autoimport_disable, "Disable pool import at module load");
#endif
diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c
index 17f8c1638619..5c6e825671b0 100644
--- a/module/zfs/spa_misc.c
+++ b/module/zfs/spa_misc.c
@@ -1,2284 +1,2356 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright 2015 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014 Spectra Logic Corporation, All rights reserved.
* Copyright 2013 Saso Kiselkov. All rights reserved.
* Copyright (c) 2017 Datto Inc.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/zio_compress.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/zap.h>
#include <sys/zil.h>
#include <sys/vdev_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_raidz.h>
#include <sys/metaslab.h>
#include <sys/uberblock_impl.h>
#include <sys/txg.h>
#include <sys/avl.h>
#include <sys/unique.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_prop.h>
#include <sys/fm/util.h>
#include <sys/dsl_scan.h>
#include <sys/fs/zfs.h>
#include <sys/metaslab_impl.h>
#include <sys/arc.h>
#include <sys/ddt.h>
#include <sys/kstat.h>
#include "zfs_prop.h"
#include <sys/zfeature.h>
#include "qat.h"
/*
* SPA locking
*
* There are four basic locks for managing spa_t structures:
*
* spa_namespace_lock (global mutex)
*
* This lock must be acquired to do any of the following:
*
* - Lookup a spa_t by name
* - Add or remove a spa_t from the namespace
* - Increase spa_refcount from non-zero
* - Check if spa_refcount is zero
* - Rename a spa_t
* - add/remove/attach/detach devices
* - Held for the duration of create/destroy/import/export
*
* It does not need to handle recursion. A create or destroy may
* reference objects (files or zvols) in other pools, but by
* definition they must have an existing reference, and will never need
* to lookup a spa_t by name.
*
* spa_refcount (per-spa refcount_t protected by mutex)
*
* This reference count keep track of any active users of the spa_t. The
* spa_t cannot be destroyed or freed while this is non-zero. Internally,
* the refcount is never really 'zero' - opening a pool implicitly keeps
* some references in the DMU. Internally we check against spa_minref, but
* present the image of a zero/non-zero value to consumers.
*
* spa_config_lock[] (per-spa array of rwlocks)
*
* This protects the spa_t from config changes, and must be held in
* the following circumstances:
*
* - RW_READER to perform I/O to the spa
* - RW_WRITER to change the vdev config
*
* The locking order is fairly straightforward:
*
* spa_namespace_lock -> spa_refcount
*
* The namespace lock must be acquired to increase the refcount from 0
* or to check if it is zero.
*
* spa_refcount -> spa_config_lock[]
*
* There must be at least one valid reference on the spa_t to acquire
* the config lock.
*
* spa_namespace_lock -> spa_config_lock[]
*
* The namespace lock must always be taken before the config lock.
*
*
* The spa_namespace_lock can be acquired directly and is globally visible.
*
* The namespace is manipulated using the following functions, all of which
* require the spa_namespace_lock to be held.
*
* spa_lookup() Lookup a spa_t by name.
*
* spa_add() Create a new spa_t in the namespace.
*
* spa_remove() Remove a spa_t from the namespace. This also
* frees up any memory associated with the spa_t.
*
* spa_next() Returns the next spa_t in the system, or the
* first if NULL is passed.
*
* spa_evict_all() Shutdown and remove all spa_t structures in
* the system.
*
* spa_guid_exists() Determine whether a pool/device guid exists.
*
* The spa_refcount is manipulated using the following functions:
*
* spa_open_ref() Adds a reference to the given spa_t. Must be
* called with spa_namespace_lock held if the
* refcount is currently zero.
*
* spa_close() Remove a reference from the spa_t. This will
* not free the spa_t or remove it from the
* namespace. No locking is required.
*
* spa_refcount_zero() Returns true if the refcount is currently
* zero. Must be called with spa_namespace_lock
* held.
*
* The spa_config_lock[] is an array of rwlocks, ordered as follows:
* SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV.
* spa_config_lock[] is manipulated with spa_config_{enter,exit,held}().
*
* To read the configuration, it suffices to hold one of these locks as reader.
* To modify the configuration, you must hold all locks as writer. To modify
* vdev state without altering the vdev tree's topology (e.g. online/offline),
* you must hold SCL_STATE and SCL_ZIO as writer.
*
* We use these distinct config locks to avoid recursive lock entry.
* For example, spa_sync() (which holds SCL_CONFIG as reader) induces
* block allocations (SCL_ALLOC), which may require reading space maps
* from disk (dmu_read() -> zio_read() -> SCL_ZIO).
*
* The spa config locks cannot be normal rwlocks because we need the
* ability to hand off ownership. For example, SCL_ZIO is acquired
* by the issuing thread and later released by an interrupt thread.
* They do, however, obey the usual write-wanted semantics to prevent
* writer (i.e. system administrator) starvation.
*
* The lock acquisition rules are as follows:
*
* SCL_CONFIG
* Protects changes to the vdev tree topology, such as vdev
* add/remove/attach/detach. Protects the dirty config list
* (spa_config_dirty_list) and the set of spares and l2arc devices.
*
* SCL_STATE
* Protects changes to pool state and vdev state, such as vdev
* online/offline/fault/degrade/clear. Protects the dirty state list
* (spa_state_dirty_list) and global pool state (spa_state).
*
* SCL_ALLOC
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_alloc() and metaslab_claim().
*
* SCL_ZIO
* Held by bp-level zios (those which have no io_vd upon entry)
* to prevent changes to the vdev tree. The bp-level zio implicitly
* protects all of its vdev child zios, which do not hold SCL_ZIO.
*
* SCL_FREE
* Protects changes to metaslab groups and classes.
* Held as reader by metaslab_free(). SCL_FREE is distinct from
* SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free
* blocks in zio_done() while another i/o that holds either
* SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete.
*
* SCL_VDEV
* Held as reader to prevent changes to the vdev tree during trivial
* inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the
* other locks, and lower than all of them, to ensure that it's safe
* to acquire regardless of caller context.
*
* In addition, the following rules apply:
*
* (a) spa_props_lock protects pool properties, spa_config and spa_config_list.
* The lock ordering is SCL_CONFIG > spa_props_lock.
*
* (b) I/O operations on leaf vdevs. For any zio operation that takes
* an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(),
* or zio_write_phys() -- the caller must ensure that the config cannot
* cannot change in the interim, and that the vdev cannot be reopened.
* SCL_STATE as reader suffices for both.
*
* The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit().
*
* spa_vdev_enter() Acquire the namespace lock and the config lock
* for writing.
*
* spa_vdev_exit() Release the config lock, wait for all I/O
* to complete, sync the updated configs to the
* cache, and release the namespace lock.
*
* vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit().
* Like spa_vdev_enter/exit, these are convenience wrappers -- the actual
* locking is, always, based on spa_namespace_lock and spa_config_lock[].
*
* spa_rename() is also implemented within this file since it requires
* manipulation of the namespace.
*/
static avl_tree_t spa_namespace_avl;
kmutex_t spa_namespace_lock;
static kcondvar_t spa_namespace_cv;
int spa_max_replication_override = SPA_DVAS_PER_BP;
static kmutex_t spa_spare_lock;
static avl_tree_t spa_spare_avl;
static kmutex_t spa_l2cache_lock;
static avl_tree_t spa_l2cache_avl;
kmem_cache_t *spa_buffer_pool;
int spa_mode_global;
#ifdef ZFS_DEBUG
-int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR | ZFS_DEBUG_SPA);
+/*
+ * Everything except dprintf, set_error, spa, and indirect_remap is on
+ * by default in debug builds.
+ */
+int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SET_ERROR |
+ ZFS_DEBUG_SPA | ZFS_DEBUG_INDIRECT_REMAP);
#else
int zfs_flags = 0;
#endif
/*
* zfs_recover can be set to nonzero to attempt to recover from
* otherwise-fatal errors, typically caused by on-disk corruption. When
* set, calls to zfs_panic_recover() will turn into warning messages.
* This should only be used as a last resort, as it typically results
* in leaked space, or worse.
*/
int zfs_recover = B_FALSE;
/*
* If destroy encounters an EIO while reading metadata (e.g. indirect
* blocks), space referenced by the missing metadata can not be freed.
* Normally this causes the background destroy to become "stalled", as
* it is unable to make forward progress. While in this stalled state,
* all remaining space to free from the error-encountering filesystem is
* "temporarily leaked". Set this flag to cause it to ignore the EIO,
* permanently leak the space from indirect blocks that can not be read,
* and continue to free everything else that it can.
*
* The default, "stalling" behavior is useful if the storage partially
* fails (i.e. some but not all i/os fail), and then later recovers. In
* this case, we will be able to continue pool operations while it is
* partially failed, and when it recovers, we can continue to free the
* space, with no leaks. However, note that this case is actually
* fairly rare.
*
* Typically pools either (a) fail completely (but perhaps temporarily,
* e.g. a top-level vdev going offline), or (b) have localized,
* permanent errors (e.g. disk returns the wrong data due to bit flip or
* firmware bug). In case (a), this setting does not matter because the
* pool will be suspended and the sync thread will not be able to make
* forward progress regardless. In case (b), because the error is
* permanent, the best we can do is leak the minimum amount of space,
* which is what setting this flag will do. Therefore, it is reasonable
* for this flag to normally be set, but we chose the more conservative
* approach of not setting it, so that there is no possibility of
* leaking space in the "partial temporary" failure case.
*/
int zfs_free_leak_on_eio = B_FALSE;
/*
* Expiration time in milliseconds. This value has two meanings. First it is
* used to determine when the spa_deadman() logic should fire. By default the
* spa_deadman() will fire if spa_sync() has not completed in 600 seconds.
* Secondly, the value determines if an I/O is considered "hung". Any I/O that
* has not completed in zfs_deadman_synctime_ms is considered "hung" resulting
* in one of three behaviors controlled by zfs_deadman_failmode.
*/
unsigned long zfs_deadman_synctime_ms = 600000ULL;
/*
* This value controls the maximum amount of time zio_wait() will block for an
* outstanding IO. By default this is 300 seconds at which point the "hung"
* behavior will be applied as described for zfs_deadman_synctime_ms.
*/
unsigned long zfs_deadman_ziotime_ms = 300000ULL;
/*
* Check time in milliseconds. This defines the frequency at which we check
* for hung I/O.
*/
unsigned long zfs_deadman_checktime_ms = 60000ULL;
/*
* By default the deadman is enabled.
*/
int zfs_deadman_enabled = 1;
/*
* Controls the behavior of the deadman when it detects a "hung" I/O.
* Valid values are zfs_deadman_failmode=<wait|continue|panic>.
*
* wait - Wait for the "hung" I/O (default)
* continue - Attempt to recover from a "hung" I/O
* panic - Panic the system
*/
char *zfs_deadman_failmode = "wait";
/*
* The worst case is single-sector max-parity RAID-Z blocks, in which
* case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1)
* times the size; so just assume that. Add to this the fact that
* we can have up to 3 DVAs per bp, and one more factor of 2 because
* the block may be dittoed with up to 3 DVAs by ddt_sync(). All together,
* the worst case is:
* (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24
*/
int spa_asize_inflation = 24;
/*
* Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in
* the pool to be consumed. This ensures that we don't run the pool
* completely out of space, due to unaccounted changes (e.g. to the MOS).
* It also limits the worst-case time to allocate space. If we have
* less than this amount of free space, most ZPL operations (e.g. write,
* create) will return ENOSPC.
*
* Certain operations (e.g. file removal, most administrative actions) can
* use half the slop space. They will only return ENOSPC if less than half
* the slop space is free. Typically, once the pool has less than the slop
* space free, the user will use these operations to free up space in the pool.
* These are the operations that call dsl_pool_adjustedsize() with the netfree
* argument set to TRUE.
*
* A very restricted set of operations are always permitted, regardless of
* the amount of free space. These are the operations that call
* dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these
* operations result in a net increase in the amount of space used,
* it is possible to run the pool completely out of space, causing it to
* be permanently read-only.
*
* Note that on very small pools, the slop space will be larger than
* 3.2%, in an effort to have it be at least spa_min_slop (128MB),
* but we never allow it to be more than half the pool size.
*
* See also the comments in zfs_space_check_t.
*/
int spa_slop_shift = 5;
uint64_t spa_min_slop = 128 * 1024 * 1024;
/*
* ==========================================================================
* SPA config locking
* ==========================================================================
*/
static void
spa_config_lock_init(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL);
refcount_create_untracked(&scl->scl_count);
scl->scl_writer = NULL;
scl->scl_write_wanted = 0;
}
}
static void
spa_config_lock_destroy(spa_t *spa)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
mutex_destroy(&scl->scl_lock);
cv_destroy(&scl->scl_cv);
refcount_destroy(&scl->scl_count);
ASSERT(scl->scl_writer == NULL);
ASSERT(scl->scl_write_wanted == 0);
}
}
int
spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw)
{
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
if (scl->scl_writer || scl->scl_write_wanted) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
} else {
ASSERT(scl->scl_writer != curthread);
if (!refcount_is_zero(&scl->scl_count)) {
mutex_exit(&scl->scl_lock);
spa_config_exit(spa, locks & ((1 << i) - 1),
tag);
return (0);
}
scl->scl_writer = curthread;
}
(void) refcount_add(&scl->scl_count, tag);
mutex_exit(&scl->scl_lock);
}
return (1);
}
void
spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw)
{
int wlocks_held = 0;
ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY);
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (scl->scl_writer == curthread)
wlocks_held |= (1 << i);
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
if (rw == RW_READER) {
while (scl->scl_writer || scl->scl_write_wanted) {
cv_wait(&scl->scl_cv, &scl->scl_lock);
}
} else {
ASSERT(scl->scl_writer != curthread);
while (!refcount_is_zero(&scl->scl_count)) {
scl->scl_write_wanted++;
cv_wait(&scl->scl_cv, &scl->scl_lock);
scl->scl_write_wanted--;
}
scl->scl_writer = curthread;
}
(void) refcount_add(&scl->scl_count, tag);
mutex_exit(&scl->scl_lock);
}
- ASSERT(wlocks_held <= locks);
+ ASSERT3U(wlocks_held, <=, locks);
}
void
spa_config_exit(spa_t *spa, int locks, void *tag)
{
for (int i = SCL_LOCKS - 1; i >= 0; i--) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
mutex_enter(&scl->scl_lock);
ASSERT(!refcount_is_zero(&scl->scl_count));
if (refcount_remove(&scl->scl_count, tag) == 0) {
ASSERT(scl->scl_writer == NULL ||
scl->scl_writer == curthread);
scl->scl_writer = NULL; /* OK in either case */
cv_broadcast(&scl->scl_cv);
}
mutex_exit(&scl->scl_lock);
}
}
int
spa_config_held(spa_t *spa, int locks, krw_t rw)
{
int locks_held = 0;
for (int i = 0; i < SCL_LOCKS; i++) {
spa_config_lock_t *scl = &spa->spa_config_lock[i];
if (!(locks & (1 << i)))
continue;
if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) ||
(rw == RW_WRITER && scl->scl_writer == curthread))
locks_held |= 1 << i;
}
return (locks_held);
}
/*
* ==========================================================================
* SPA namespace functions
* ==========================================================================
*/
/*
* Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held.
* Returns NULL if no matching spa_t is found.
*/
spa_t *
spa_lookup(const char *name)
{
static spa_t search; /* spa_t is large; don't allocate on stack */
spa_t *spa;
avl_index_t where;
char *cp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
(void) strlcpy(search.spa_name, name, sizeof (search.spa_name));
/*
* If it's a full dataset name, figure out the pool name and
* just use that.
*/
cp = strpbrk(search.spa_name, "/@#");
if (cp != NULL)
*cp = '\0';
spa = avl_find(&spa_namespace_avl, &search, &where);
return (spa);
}
/*
* Fires when spa_sync has not completed within zfs_deadman_synctime_ms.
* If the zfs_deadman_enabled flag is set then it inspects all vdev queues
* looking for potentially hung I/Os.
*/
void
spa_deadman(void *arg)
{
spa_t *spa = arg;
/* Disable the deadman if the pool is suspended. */
if (spa_suspended(spa))
return;
zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu",
(gethrtime() - spa->spa_sync_starttime) / NANOSEC,
++spa->spa_deadman_calls);
if (zfs_deadman_enabled)
vdev_deadman(spa->spa_root_vdev, FTAG);
spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq,
spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() +
MSEC_TO_TICK(zfs_deadman_checktime_ms));
}
/*
* Create an uninitialized spa_t with the given name. Requires
* spa_namespace_lock. The caller must ensure that the spa_t doesn't already
* exist by calling spa_lookup() first.
*/
spa_t *
spa_add(const char *name, nvlist_t *config, const char *altroot)
{
spa_t *spa;
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP);
mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL);
cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL);
for (int t = 0; t < TXG_SIZE; t++)
bplist_create(&spa->spa_free_bplist[t]);
(void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name));
spa->spa_state = POOL_STATE_UNINITIALIZED;
spa->spa_freeze_txg = UINT64_MAX;
spa->spa_final_txg = UINT64_MAX;
spa->spa_load_max_txg = UINT64_MAX;
spa->spa_proc = &p0;
spa->spa_proc_state = SPA_PROC_NONE;
spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms);
spa->spa_deadman_ziotime = MSEC2NSEC(zfs_deadman_ziotime_ms);
spa_set_deadman_failmode(spa, zfs_deadman_failmode);
refcount_create(&spa->spa_refcount);
spa_config_lock_init(spa);
spa_stats_init(spa);
avl_add(&spa_namespace_avl, spa);
/*
* Set the alternate root, if there is one.
*/
if (altroot)
spa->spa_root = spa_strdup(altroot);
avl_create(&spa->spa_alloc_tree, zio_bookmark_compare,
sizeof (zio_t), offsetof(zio_t, io_alloc_node));
/*
* Every pool starts with the default cachefile
*/
list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t),
offsetof(spa_config_dirent_t, scd_link));
dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP);
dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path);
list_insert_head(&spa->spa_config_list, dp);
VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
if (config != NULL) {
nvlist_t *features;
if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ,
&features) == 0) {
VERIFY(nvlist_dup(features, &spa->spa_label_features,
0) == 0);
}
VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0);
}
if (spa->spa_label_features == NULL) {
VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME,
KM_SLEEP) == 0);
}
spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0);
spa->spa_min_ashift = INT_MAX;
spa->spa_max_ashift = 0;
/* Reset cached value */
spa->spa_dedup_dspace = ~0ULL;
/*
* As a pool is being created, treat all features as disabled by
* setting SPA_FEATURE_DISABLED for all entries in the feature
* refcount cache.
*/
for (int i = 0; i < SPA_FEATURES; i++) {
spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED;
}
return (spa);
}
/*
* Removes a spa_t from the namespace, freeing up any memory used. Requires
* spa_namespace_lock. This is called only after the spa_t has been closed and
* deactivated.
*/
void
spa_remove(spa_t *spa)
{
spa_config_dirent_t *dp;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED);
ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0);
nvlist_free(spa->spa_config_splitting);
avl_remove(&spa_namespace_avl, spa);
cv_broadcast(&spa_namespace_cv);
if (spa->spa_root)
spa_strfree(spa->spa_root);
while ((dp = list_head(&spa->spa_config_list)) != NULL) {
list_remove(&spa->spa_config_list, dp);
if (dp->scd_path != NULL)
spa_strfree(dp->scd_path);
kmem_free(dp, sizeof (spa_config_dirent_t));
}
avl_destroy(&spa->spa_alloc_tree);
list_destroy(&spa->spa_config_list);
nvlist_free(spa->spa_label_features);
nvlist_free(spa->spa_load_info);
nvlist_free(spa->spa_feat_stats);
spa_config_set(spa, NULL);
refcount_destroy(&spa->spa_refcount);
spa_stats_destroy(spa);
spa_config_lock_destroy(spa);
for (int t = 0; t < TXG_SIZE; t++)
bplist_destroy(&spa->spa_free_bplist[t]);
zio_checksum_templates_free(spa);
cv_destroy(&spa->spa_async_cv);
cv_destroy(&spa->spa_evicting_os_cv);
cv_destroy(&spa->spa_proc_cv);
cv_destroy(&spa->spa_scrub_io_cv);
cv_destroy(&spa->spa_suspend_cv);
mutex_destroy(&spa->spa_alloc_lock);
mutex_destroy(&spa->spa_async_lock);
mutex_destroy(&spa->spa_errlist_lock);
mutex_destroy(&spa->spa_errlog_lock);
mutex_destroy(&spa->spa_evicting_os_lock);
mutex_destroy(&spa->spa_history_lock);
mutex_destroy(&spa->spa_proc_lock);
mutex_destroy(&spa->spa_props_lock);
mutex_destroy(&spa->spa_cksum_tmpls_lock);
mutex_destroy(&spa->spa_scrub_lock);
mutex_destroy(&spa->spa_suspend_lock);
mutex_destroy(&spa->spa_vdev_top_lock);
mutex_destroy(&spa->spa_feat_stats_lock);
kmem_free(spa, sizeof (spa_t));
}
/*
* Given a pool, return the next pool in the namespace, or NULL if there is
* none. If 'prev' is NULL, return the first pool.
*/
spa_t *
spa_next(spa_t *prev)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
if (prev)
return (AVL_NEXT(&spa_namespace_avl, prev));
else
return (avl_first(&spa_namespace_avl));
}
/*
* ==========================================================================
* SPA refcount functions
* ==========================================================================
*/
/*
* Add a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_open_ref(spa_t *spa, void *tag)
{
ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) refcount_add(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t. Must have at least one reference, or
* have the namespace lock held.
*/
void
spa_close(spa_t *spa, void *tag)
{
ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref ||
MUTEX_HELD(&spa_namespace_lock));
(void) refcount_remove(&spa->spa_refcount, tag);
}
/*
* Remove a reference to the given spa_t held by a dsl dir that is
* being asynchronously released. Async releases occur from a taskq
* performing eviction of dsl datasets and dirs. The namespace lock
* isn't held and the hold by the object being evicted may contribute to
* spa_minref (e.g. dataset or directory released during pool export),
* so the asserts in spa_close() do not apply.
*/
void
spa_async_close(spa_t *spa, void *tag)
{
(void) refcount_remove(&spa->spa_refcount, tag);
}
/*
* Check to see if the spa refcount is zero. Must be called with
* spa_namespace_lock held. We really compare against spa_minref, which is the
* number of references acquired when opening a pool
*/
boolean_t
spa_refcount_zero(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
return (refcount_count(&spa->spa_refcount) == spa->spa_minref);
}
/*
* ==========================================================================
* SPA spare and l2cache tracking
* ==========================================================================
*/
/*
* Hot spares and cache devices are tracked using the same code below,
* for 'auxiliary' devices.
*/
typedef struct spa_aux {
uint64_t aux_guid;
uint64_t aux_pool;
avl_node_t aux_avl;
int aux_count;
} spa_aux_t;
static inline int
spa_aux_compare(const void *a, const void *b)
{
const spa_aux_t *sa = (const spa_aux_t *)a;
const spa_aux_t *sb = (const spa_aux_t *)b;
return (AVL_CMP(sa->aux_guid, sb->aux_guid));
}
void
spa_aux_add(vdev_t *vd, avl_tree_t *avl)
{
avl_index_t where;
spa_aux_t search;
spa_aux_t *aux;
search.aux_guid = vd->vdev_guid;
if ((aux = avl_find(avl, &search, &where)) != NULL) {
aux->aux_count++;
} else {
aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP);
aux->aux_guid = vd->vdev_guid;
aux->aux_count = 1;
avl_insert(avl, aux, where);
}
}
void
spa_aux_remove(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search;
spa_aux_t *aux;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
aux = avl_find(avl, &search, &where);
ASSERT(aux != NULL);
if (--aux->aux_count == 0) {
avl_remove(avl, aux);
kmem_free(aux, sizeof (spa_aux_t));
} else if (aux->aux_pool == spa_guid(vd->vdev_spa)) {
aux->aux_pool = 0ULL;
}
}
boolean_t
spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl)
{
spa_aux_t search, *found;
search.aux_guid = guid;
found = avl_find(avl, &search, NULL);
if (pool) {
if (found)
*pool = found->aux_pool;
else
*pool = 0ULL;
}
if (refcnt) {
if (found)
*refcnt = found->aux_count;
else
*refcnt = 0;
}
return (found != NULL);
}
void
spa_aux_activate(vdev_t *vd, avl_tree_t *avl)
{
spa_aux_t search, *found;
avl_index_t where;
search.aux_guid = vd->vdev_guid;
found = avl_find(avl, &search, &where);
ASSERT(found != NULL);
ASSERT(found->aux_pool == 0ULL);
found->aux_pool = spa_guid(vd->vdev_spa);
}
/*
* Spares are tracked globally due to the following constraints:
*
* - A spare may be part of multiple pools.
* - A spare may be added to a pool even if it's actively in use within
* another pool.
* - A spare in use in any pool can only be the source of a replacement if
* the target is a spare in the same pool.
*
* We keep track of all spares on the system through the use of a reference
* counted AVL tree. When a vdev is added as a spare, or used as a replacement
* spare, then we bump the reference count in the AVL tree. In addition, we set
* the 'vdev_isspare' member to indicate that the device is a spare (active or
* inactive). When a spare is made active (used to replace a device in the
* pool), we also keep track of which pool its been made a part of.
*
* The 'spa_spare_lock' protects the AVL tree. These functions are normally
* called under the spa_namespace lock as part of vdev reconfiguration. The
* separate spare lock exists for the status query path, which does not need to
* be completely consistent with respect to other vdev configuration changes.
*/
static int
spa_spare_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_spare_add(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(!vd->vdev_isspare);
spa_aux_add(vd, &spa_spare_avl);
vd->vdev_isspare = B_TRUE;
mutex_exit(&spa_spare_lock);
}
void
spa_spare_remove(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_remove(vd, &spa_spare_avl);
vd->vdev_isspare = B_FALSE;
mutex_exit(&spa_spare_lock);
}
boolean_t
spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt)
{
boolean_t found;
mutex_enter(&spa_spare_lock);
found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
return (found);
}
void
spa_spare_activate(vdev_t *vd)
{
mutex_enter(&spa_spare_lock);
ASSERT(vd->vdev_isspare);
spa_aux_activate(vd, &spa_spare_avl);
mutex_exit(&spa_spare_lock);
}
/*
* Level 2 ARC devices are tracked globally for the same reasons as spares.
* Cache devices currently only support one pool per cache device, and so
* for these devices the aux reference count is currently unused beyond 1.
*/
static int
spa_l2cache_compare(const void *a, const void *b)
{
return (spa_aux_compare(a, b));
}
void
spa_l2cache_add(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(!vd->vdev_isl2cache);
spa_aux_add(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_TRUE;
mutex_exit(&spa_l2cache_lock);
}
void
spa_l2cache_remove(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_remove(vd, &spa_l2cache_avl);
vd->vdev_isl2cache = B_FALSE;
mutex_exit(&spa_l2cache_lock);
}
boolean_t
spa_l2cache_exists(uint64_t guid, uint64_t *pool)
{
boolean_t found;
mutex_enter(&spa_l2cache_lock);
found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
return (found);
}
void
spa_l2cache_activate(vdev_t *vd)
{
mutex_enter(&spa_l2cache_lock);
ASSERT(vd->vdev_isl2cache);
spa_aux_activate(vd, &spa_l2cache_avl);
mutex_exit(&spa_l2cache_lock);
}
/*
* ==========================================================================
* SPA vdev locking
* ==========================================================================
*/
/*
* Lock the given spa_t for the purpose of adding or removing a vdev.
* Grabs the global spa_namespace_lock plus the spa config lock for writing.
* It returns the next transaction group for the spa_t.
*/
uint64_t
spa_vdev_enter(spa_t *spa)
{
mutex_enter(&spa->spa_vdev_top_lock);
mutex_enter(&spa_namespace_lock);
return (spa_vdev_config_enter(spa));
}
/*
* Internal implementation for spa_vdev_enter(). Used when a vdev
* operation requires multiple syncs (i.e. removing a device) while
* keeping the spa_namespace_lock held.
*/
uint64_t
spa_vdev_config_enter(spa_t *spa)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
return (spa_last_synced_txg(spa) + 1);
}
/*
* Used in combination with spa_vdev_config_enter() to allow the syncing
* of multiple transactions without releasing the spa_namespace_lock.
*/
void
spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag)
{
ASSERT(MUTEX_HELD(&spa_namespace_lock));
int config_changed = B_FALSE;
ASSERT(txg > spa_last_synced_txg(spa));
spa->spa_pending_vdev = NULL;
/*
* Reassess the DTLs.
*/
vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE);
if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) {
config_changed = B_TRUE;
spa->spa_config_generation++;
}
/*
* Verify the metaslab classes.
*/
ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0);
ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0);
spa_config_exit(spa, SCL_ALL, spa);
/*
* Panic the system if the specified tag requires it. This
* is useful for ensuring that configurations are updated
* transactionally.
*/
if (zio_injection_enabled)
zio_handle_panic_injection(spa, tag, 0);
/*
* Note: this txg_wait_synced() is important because it ensures
* that there won't be more than one config change per txg.
* This allows us to use the txg as the generation number.
*/
if (error == 0)
txg_wait_synced(spa->spa_dsl_pool, txg);
if (vd != NULL) {
ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL);
spa_config_enter(spa, SCL_ALL, spa, RW_WRITER);
vdev_free(vd);
spa_config_exit(spa, SCL_ALL, spa);
}
/*
* If the config changed, update the config cache.
*/
if (config_changed)
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
}
/*
* Unlock the spa_t after adding or removing a vdev. Besides undoing the
* locking of spa_vdev_enter(), we also want make sure the transactions have
* synced to disk, and then update the global configuration cache with the new
* information.
*/
int
spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error)
{
spa_vdev_config_exit(spa, vd, txg, error, FTAG);
mutex_exit(&spa_namespace_lock);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Lock the given spa_t for the purpose of changing vdev state.
*/
void
spa_vdev_state_enter(spa_t *spa, int oplocks)
{
int locks = SCL_STATE_ALL | oplocks;
/*
* Root pools may need to read of the underlying devfs filesystem
* when opening up a vdev. Unfortunately if we're holding the
* SCL_ZIO lock it will result in a deadlock when we try to issue
* the read from the root filesystem. Instead we "prefetch"
* the associated vnodes that we need prior to opening the
* underlying devices and cache them so that we can prevent
* any I/O when we are doing the actual open.
*/
if (spa_is_root(spa)) {
int low = locks & ~(SCL_ZIO - 1);
int high = locks & ~low;
spa_config_enter(spa, high, spa, RW_WRITER);
vdev_hold(spa->spa_root_vdev);
spa_config_enter(spa, low, spa, RW_WRITER);
} else {
spa_config_enter(spa, locks, spa, RW_WRITER);
}
spa->spa_vdev_locks = locks;
}
int
spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error)
{
boolean_t config_changed = B_FALSE;
vdev_t *vdev_top;
if (vd == NULL || vd == spa->spa_root_vdev) {
vdev_top = spa->spa_root_vdev;
} else {
vdev_top = vd->vdev_top;
}
if (vd != NULL || error == 0)
vdev_dtl_reassess(vdev_top, 0, 0, B_FALSE);
if (vd != NULL) {
if (vd != spa->spa_root_vdev)
vdev_state_dirty(vdev_top);
config_changed = B_TRUE;
spa->spa_config_generation++;
}
if (spa_is_root(spa))
vdev_rele(spa->spa_root_vdev);
ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL);
spa_config_exit(spa, spa->spa_vdev_locks, spa);
/*
* If anything changed, wait for it to sync. This ensures that,
* from the system administrator's perspective, zpool(1M) commands
* are synchronous. This is important for things like zpool offline:
* when the command completes, you expect no further I/O from ZFS.
*/
if (vd != NULL)
txg_wait_synced(spa->spa_dsl_pool, 0);
/*
* If the config changed, update the config cache.
*/
if (config_changed) {
mutex_enter(&spa_namespace_lock);
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
mutex_exit(&spa_namespace_lock);
}
return (error);
}
/*
* ==========================================================================
* Miscellaneous functions
* ==========================================================================
*/
void
spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx)
{
if (!nvlist_exists(spa->spa_label_features, feature)) {
fnvlist_add_boolean(spa->spa_label_features, feature);
/*
* When we are creating the pool (tx_txg==TXG_INITIAL), we can't
* dirty the vdev config because lock SCL_CONFIG is not held.
* Thankfully, in this case we don't need to dirty the config
* because it will be written out anyway when we finish
* creating the pool.
*/
if (tx->tx_txg != TXG_INITIAL)
vdev_config_dirty(spa->spa_root_vdev);
}
}
void
spa_deactivate_mos_feature(spa_t *spa, const char *feature)
{
if (nvlist_remove_all(spa->spa_label_features, feature) == 0)
vdev_config_dirty(spa->spa_root_vdev);
}
/*
* Rename a spa_t.
*/
int
spa_rename(const char *name, const char *newname)
{
spa_t *spa;
int err;
/*
* Lookup the spa_t and grab the config lock for writing. We need to
* actually open the pool so that we can sync out the necessary labels.
* It's OK to call spa_open() with the namespace lock held because we
* allow recursive calls for other reasons.
*/
mutex_enter(&spa_namespace_lock);
if ((err = spa_open(name, &spa, FTAG)) != 0) {
mutex_exit(&spa_namespace_lock);
return (err);
}
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
avl_remove(&spa_namespace_avl, spa);
(void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name));
avl_add(&spa_namespace_avl, spa);
/*
* Sync all labels to disk with the new names by marking the root vdev
* dirty and waiting for it to sync. It will pick up the new pool name
* during the sync.
*/
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_ALL, FTAG);
txg_wait_synced(spa->spa_dsl_pool, 0);
/*
* Sync the updated config cache.
*/
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
spa_close(spa, FTAG);
mutex_exit(&spa_namespace_lock);
return (0);
}
/*
* Return the spa_t associated with given pool_guid, if it exists. If
* device_guid is non-zero, determine whether the pool exists *and* contains
* a device with the specified device_guid.
*/
spa_t *
spa_by_guid(uint64_t pool_guid, uint64_t device_guid)
{
spa_t *spa;
avl_tree_t *t = &spa_namespace_avl;
ASSERT(MUTEX_HELD(&spa_namespace_lock));
for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) {
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
continue;
if (spa->spa_root_vdev == NULL)
continue;
if (spa_guid(spa) == pool_guid) {
if (device_guid == 0)
break;
if (vdev_lookup_by_guid(spa->spa_root_vdev,
device_guid) != NULL)
break;
/*
* Check any devices we may be in the process of adding.
*/
if (spa->spa_pending_vdev) {
if (vdev_lookup_by_guid(spa->spa_pending_vdev,
device_guid) != NULL)
break;
}
}
}
return (spa);
}
/*
* Determine whether a pool with the given pool_guid exists.
*/
boolean_t
spa_guid_exists(uint64_t pool_guid, uint64_t device_guid)
{
return (spa_by_guid(pool_guid, device_guid) != NULL);
}
char *
spa_strdup(const char *s)
{
size_t len;
char *new;
len = strlen(s);
new = kmem_alloc(len + 1, KM_SLEEP);
bcopy(s, new, len);
new[len] = '\0';
return (new);
}
void
spa_strfree(char *s)
{
kmem_free(s, strlen(s) + 1);
}
uint64_t
spa_get_random(uint64_t range)
{
uint64_t r;
ASSERT(range != 0);
if (range == 1)
return (0);
(void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t));
return (r % range);
}
uint64_t
spa_generate_guid(spa_t *spa)
{
uint64_t guid = spa_get_random(-1ULL);
if (spa != NULL) {
while (guid == 0 || spa_guid_exists(spa_guid(spa), guid))
guid = spa_get_random(-1ULL);
} else {
while (guid == 0 || spa_guid_exists(guid, 0))
guid = spa_get_random(-1ULL);
}
return (guid);
}
void
snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp)
{
char type[256];
char *checksum = NULL;
char *compress = NULL;
if (bp != NULL) {
if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) {
dmu_object_byteswap_t bswap =
DMU_OT_BYTESWAP(BP_GET_TYPE(bp));
(void) snprintf(type, sizeof (type), "bswap %s %s",
DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ?
"metadata" : "data",
dmu_ot_byteswap[bswap].ob_name);
} else {
(void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name,
sizeof (type));
}
if (!BP_IS_EMBEDDED(bp)) {
checksum =
zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name;
}
compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name;
}
SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum,
compress);
}
void
spa_freeze(spa_t *spa)
{
uint64_t freeze_txg = 0;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
if (spa->spa_freeze_txg == UINT64_MAX) {
freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE;
spa->spa_freeze_txg = freeze_txg;
}
spa_config_exit(spa, SCL_ALL, FTAG);
if (freeze_txg != 0)
txg_wait_synced(spa_get_dsl(spa), freeze_txg);
}
void
zfs_panic_recover(const char *fmt, ...)
{
va_list adx;
va_start(adx, fmt);
vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx);
va_end(adx);
}
/*
* This is a stripped-down version of strtoull, suitable only for converting
* lowercase hexadecimal numbers that don't overflow.
*/
uint64_t
zfs_strtonum(const char *str, char **nptr)
{
uint64_t val = 0;
char c;
int digit;
while ((c = *str) != '\0') {
if (c >= '0' && c <= '9')
digit = c - '0';
else if (c >= 'a' && c <= 'f')
digit = 10 + c - 'a';
else
break;
val *= 16;
val += digit;
str++;
}
if (nptr)
*nptr = (char *)str;
return (val);
}
/*
* ==========================================================================
* Accessor functions
* ==========================================================================
*/
boolean_t
spa_shutting_down(spa_t *spa)
{
return (spa->spa_async_suspended);
}
dsl_pool_t *
spa_get_dsl(spa_t *spa)
{
return (spa->spa_dsl_pool);
}
boolean_t
spa_is_initializing(spa_t *spa)
{
return (spa->spa_is_initializing);
}
+boolean_t
+spa_indirect_vdevs_loaded(spa_t *spa)
+{
+ return (spa->spa_indirect_vdevs_loaded);
+}
+
blkptr_t *
spa_get_rootblkptr(spa_t *spa)
{
return (&spa->spa_ubsync.ub_rootbp);
}
void
spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp)
{
spa->spa_uberblock.ub_rootbp = *bp;
}
void
spa_altroot(spa_t *spa, char *buf, size_t buflen)
{
if (spa->spa_root == NULL)
buf[0] = '\0';
else
(void) strncpy(buf, spa->spa_root, buflen);
}
int
spa_sync_pass(spa_t *spa)
{
return (spa->spa_sync_pass);
}
char *
spa_name(spa_t *spa)
{
return (spa->spa_name);
}
uint64_t
spa_guid(spa_t *spa)
{
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t guid;
/*
* If we fail to parse the config during spa_load(), we can go through
* the error path (which posts an ereport) and end up here with no root
* vdev. We stash the original pool guid in 'spa_config_guid' to handle
* this case.
*/
if (spa->spa_root_vdev == NULL)
return (spa->spa_config_guid);
guid = spa->spa_last_synced_guid != 0 ?
spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid;
/*
* Return the most recently synced out guid unless we're
* in syncing context.
*/
if (dp && dsl_pool_sync_context(dp))
return (spa->spa_root_vdev->vdev_guid);
else
return (guid);
}
uint64_t
spa_load_guid(spa_t *spa)
{
/*
* This is a GUID that exists solely as a reference for the
* purposes of the arc. It is generated at load time, and
* is never written to persistent storage.
*/
return (spa->spa_load_guid);
}
uint64_t
spa_last_synced_txg(spa_t *spa)
{
return (spa->spa_ubsync.ub_txg);
}
uint64_t
spa_first_txg(spa_t *spa)
{
return (spa->spa_first_txg);
}
uint64_t
spa_syncing_txg(spa_t *spa)
{
return (spa->spa_syncing_txg);
}
/*
* Return the last txg where data can be dirtied. The final txgs
* will be used to just clear out any deferred frees that remain.
*/
uint64_t
spa_final_dirty_txg(spa_t *spa)
{
return (spa->spa_final_txg - TXG_DEFER_SIZE);
}
pool_state_t
spa_state(spa_t *spa)
{
return (spa->spa_state);
}
spa_load_state_t
spa_load_state(spa_t *spa)
{
return (spa->spa_load_state);
}
uint64_t
spa_freeze_txg(spa_t *spa)
{
return (spa->spa_freeze_txg);
}
/*
* Return the inflated asize for a logical write in bytes. This is used by the
* DMU to calculate the space a logical write will require on disk.
* If lsize is smaller than the largest physical block size allocatable on this
* pool we use its value instead, since the write will end up using the whole
* block anyway.
*/
uint64_t
spa_get_worst_case_asize(spa_t *spa, uint64_t lsize)
{
if (lsize == 0)
return (0); /* No inflation needed */
return (MAX(lsize, 1 << spa->spa_max_ashift) * spa_asize_inflation);
}
/*
* Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%),
* or at least 128MB, unless that would cause it to be more than half the
* pool size.
*
* See the comment above spa_slop_shift for details.
*/
uint64_t
spa_get_slop_space(spa_t *spa)
{
uint64_t space = spa_get_dspace(spa);
return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop)));
}
uint64_t
spa_get_dspace(spa_t *spa)
{
return (spa->spa_dspace);
}
void
spa_update_dspace(spa_t *spa)
{
spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) +
ddt_get_dedup_dspace(spa);
+ if (spa->spa_vdev_removal != NULL) {
+ /*
+ * We can't allocate from the removing device, so
+ * subtract its size. This prevents the DMU/DSL from
+ * filling up the (now smaller) pool while we are in the
+ * middle of removing the device.
+ *
+ * Note that the DMU/DSL doesn't actually know or care
+ * how much space is allocated (it does its own tracking
+ * of how much space has been logically used). So it
+ * doesn't matter that the data we are moving may be
+ * allocated twice (on the old device and the new
+ * device).
+ */
+ vdev_t *vd = spa->spa_vdev_removal->svr_vdev;
+ spa->spa_dspace -= spa_deflate(spa) ?
+ vd->vdev_stat.vs_dspace : vd->vdev_stat.vs_space;
+ }
}
/*
* Return the failure mode that has been set to this pool. The default
* behavior will be to block all I/Os when a complete failure occurs.
*/
uint64_t
spa_get_failmode(spa_t *spa)
{
return (spa->spa_failmode);
}
boolean_t
spa_suspended(spa_t *spa)
{
return (spa->spa_suspended != ZIO_SUSPEND_NONE);
}
uint64_t
spa_version(spa_t *spa)
{
return (spa->spa_ubsync.ub_version);
}
boolean_t
spa_deflate(spa_t *spa)
{
return (spa->spa_deflate);
}
metaslab_class_t *
spa_normal_class(spa_t *spa)
{
return (spa->spa_normal_class);
}
metaslab_class_t *
spa_log_class(spa_t *spa)
{
return (spa->spa_log_class);
}
void
spa_evicting_os_register(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_insert_head(&spa->spa_evicting_os_list, os);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_deregister(spa_t *spa, objset_t *os)
{
mutex_enter(&spa->spa_evicting_os_lock);
list_remove(&spa->spa_evicting_os_list, os);
cv_broadcast(&spa->spa_evicting_os_cv);
mutex_exit(&spa->spa_evicting_os_lock);
}
void
spa_evicting_os_wait(spa_t *spa)
{
mutex_enter(&spa->spa_evicting_os_lock);
while (!list_is_empty(&spa->spa_evicting_os_list))
cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock);
mutex_exit(&spa->spa_evicting_os_lock);
dmu_buf_user_evict_wait();
}
int
spa_max_replication(spa_t *spa)
{
/*
* As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to
* handle BPs with more than one DVA allocated. Set our max
* replication level accordingly.
*/
if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS)
return (1);
return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override));
}
int
spa_prev_software_version(spa_t *spa)
{
return (spa->spa_prev_software_version);
}
uint64_t
spa_deadman_synctime(spa_t *spa)
{
return (spa->spa_deadman_synctime);
}
uint64_t
spa_deadman_ziotime(spa_t *spa)
{
return (spa->spa_deadman_ziotime);
}
uint64_t
spa_get_deadman_failmode(spa_t *spa)
{
return (spa->spa_deadman_failmode);
}
void
spa_set_deadman_failmode(spa_t *spa, const char *failmode)
{
if (strcmp(failmode, "wait") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
else if (strcmp(failmode, "continue") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_CONTINUE;
else if (strcmp(failmode, "panic") == 0)
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_PANIC;
else
spa->spa_deadman_failmode = ZIO_FAILURE_MODE_WAIT;
}
uint64_t
dva_get_dsize_sync(spa_t *spa, const dva_t *dva)
{
uint64_t asize = DVA_GET_ASIZE(dva);
uint64_t dsize = asize;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (asize != 0 && spa->spa_deflate) {
vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva));
if (vd != NULL)
dsize = (asize >> SPA_MINBLOCKSHIFT) *
vd->vdev_deflate_ratio;
}
return (dsize);
}
uint64_t
bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
return (dsize);
}
uint64_t
bp_get_dsize(spa_t *spa, const blkptr_t *bp)
{
uint64_t dsize = 0;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
for (int d = 0; d < BP_GET_NDVAS(bp); d++)
dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (dsize);
}
/*
* ==========================================================================
* Initialization and Termination
* ==========================================================================
*/
static int
spa_name_compare(const void *a1, const void *a2)
{
const spa_t *s1 = a1;
const spa_t *s2 = a2;
int s;
s = strcmp(s1->spa_name, s2->spa_name);
return (AVL_ISIGN(s));
}
void
spa_boot_init(void)
{
spa_config_load();
}
void
spa_init(int mode)
{
mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL);
avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t),
offsetof(spa_t, spa_avl));
avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t),
offsetof(spa_aux_t, aux_avl));
spa_mode_global = mode;
#ifndef _KERNEL
if (spa_mode_global != FREAD && dprintf_find_string("watch")) {
struct sigaction sa;
sa.sa_flags = SA_SIGINFO;
sigemptyset(&sa.sa_mask);
sa.sa_sigaction = arc_buf_sigsegv;
if (sigaction(SIGSEGV, &sa, NULL) == -1) {
perror("could not enable watchpoints: "
"sigaction(SIGSEGV, ...) = ");
} else {
arc_watch = B_TRUE;
}
}
#endif
fm_init();
refcount_init();
unique_init();
range_tree_init();
metaslab_alloc_trace_init();
ddt_init();
zio_init();
dmu_init();
zil_init();
vdev_cache_stat_init();
vdev_mirror_stat_init();
vdev_raidz_math_init();
vdev_file_init();
zfs_prop_init();
zpool_prop_init();
zpool_feature_init();
spa_config_load();
l2arc_start();
scan_init();
qat_init();
}
void
spa_fini(void)
{
l2arc_stop();
spa_evict_all();
vdev_file_fini();
vdev_cache_stat_fini();
vdev_mirror_stat_fini();
vdev_raidz_math_fini();
zil_fini();
dmu_fini();
zio_fini();
ddt_fini();
metaslab_alloc_trace_fini();
range_tree_fini();
unique_fini();
refcount_fini();
fm_fini();
scan_fini();
qat_fini();
avl_destroy(&spa_namespace_avl);
avl_destroy(&spa_spare_avl);
avl_destroy(&spa_l2cache_avl);
cv_destroy(&spa_namespace_cv);
mutex_destroy(&spa_namespace_lock);
mutex_destroy(&spa_spare_lock);
mutex_destroy(&spa_l2cache_lock);
}
/*
* Return whether this pool has slogs. No locking needed.
* It's not a problem if the wrong answer is returned as it's only for
* performance and not correctness
*/
boolean_t
spa_has_slogs(spa_t *spa)
{
return (spa->spa_log_class->mc_rotor != NULL);
}
spa_log_state_t
spa_get_log_state(spa_t *spa)
{
return (spa->spa_log_state);
}
void
spa_set_log_state(spa_t *spa, spa_log_state_t state)
{
spa->spa_log_state = state;
}
boolean_t
spa_is_root(spa_t *spa)
{
return (spa->spa_is_root);
}
boolean_t
spa_writeable(spa_t *spa)
{
return (!!(spa->spa_mode & FWRITE));
}
/*
* Returns true if there is a pending sync task in any of the current
* syncing txg, the current quiescing txg, or the current open txg.
*/
boolean_t
spa_has_pending_synctask(spa_t *spa)
{
return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks));
}
int
spa_mode(spa_t *spa)
{
return (spa->spa_mode);
}
uint64_t
spa_bootfs(spa_t *spa)
{
return (spa->spa_bootfs);
}
uint64_t
spa_delegation(spa_t *spa)
{
return (spa->spa_delegation);
}
objset_t *
spa_meta_objset(spa_t *spa)
{
return (spa->spa_meta_objset);
}
enum zio_checksum
spa_dedup_checksum(spa_t *spa)
{
return (spa->spa_dedup_checksum);
}
/*
* Reset pool scan stat per scan pass (or reboot).
*/
void
spa_scan_stat_init(spa_t *spa)
{
/* data not stored on disk */
spa->spa_scan_pass_start = gethrestime_sec();
if (dsl_scan_is_paused_scrub(spa->spa_dsl_pool->dp_scan))
spa->spa_scan_pass_scrub_pause = spa->spa_scan_pass_start;
else
spa->spa_scan_pass_scrub_pause = 0;
spa->spa_scan_pass_scrub_spent_paused = 0;
spa->spa_scan_pass_exam = 0;
spa->spa_scan_pass_issued = 0;
vdev_scan_stat_init(spa->spa_root_vdev);
}
/*
* Get scan stats for zpool status reports
*/
int
spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps)
{
dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL;
if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE)
return (SET_ERROR(ENOENT));
bzero(ps, sizeof (pool_scan_stat_t));
/* data stored on disk */
ps->pss_func = scn->scn_phys.scn_func;
ps->pss_state = scn->scn_phys.scn_state;
ps->pss_start_time = scn->scn_phys.scn_start_time;
ps->pss_end_time = scn->scn_phys.scn_end_time;
ps->pss_to_examine = scn->scn_phys.scn_to_examine;
ps->pss_examined = scn->scn_phys.scn_examined;
ps->pss_to_process = scn->scn_phys.scn_to_process;
ps->pss_processed = scn->scn_phys.scn_processed;
ps->pss_errors = scn->scn_phys.scn_errors;
/* data not stored on disk */
ps->pss_pass_exam = spa->spa_scan_pass_exam;
ps->pss_pass_start = spa->spa_scan_pass_start;
ps->pss_pass_scrub_pause = spa->spa_scan_pass_scrub_pause;
ps->pss_pass_scrub_spent_paused = spa->spa_scan_pass_scrub_spent_paused;
ps->pss_pass_issued = spa->spa_scan_pass_issued;
ps->pss_issued =
scn->scn_issued_before_pass + spa->spa_scan_pass_issued;
return (0);
}
boolean_t
spa_debug_enabled(spa_t *spa)
{
return (spa->spa_debug);
}
int
spa_maxblocksize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS))
return (SPA_MAXBLOCKSIZE);
else
return (SPA_OLD_MAXBLOCKSIZE);
}
+
+/*
+ * Returns the txg that the last device removal completed. No indirect mappings
+ * have been added since this txg.
+ */
+uint64_t
+spa_get_last_removal_txg(spa_t *spa)
+{
+ uint64_t vdevid;
+ uint64_t ret = -1ULL;
+
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ /*
+ * sr_prev_indirect_vdev is only modified while holding all the
+ * config locks, so it is sufficient to hold SCL_VDEV as reader when
+ * examining it.
+ */
+ vdevid = spa->spa_removing_phys.sr_prev_indirect_vdev;
+
+ while (vdevid != -1ULL) {
+ vdev_t *vd = vdev_lookup_top(spa, vdevid);
+ vdev_indirect_births_t *vib = vd->vdev_indirect_births;
+
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ /*
+ * If the removal did not remap any data, we don't care.
+ */
+ if (vdev_indirect_births_count(vib) != 0) {
+ ret = vdev_indirect_births_last_entry_txg(vib);
+ break;
+ }
+
+ vdevid = vd->vdev_indirect_config.vic_prev_indirect_vdev;
+ }
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ IMPLY(ret != -1ULL,
+ spa_feature_is_active(spa, SPA_FEATURE_DEVICE_REMOVAL));
+
+ return (ret);
+}
+
int
spa_maxdnodesize(spa_t *spa)
{
if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE))
return (DNODE_MAX_SIZE);
else
return (DNODE_MIN_SIZE);
}
boolean_t
spa_multihost(spa_t *spa)
{
return (spa->spa_multihost ? B_TRUE : B_FALSE);
}
unsigned long
spa_get_hostid(void)
{
unsigned long myhostid;
#ifdef _KERNEL
myhostid = zone_get_hostid(NULL);
#else /* _KERNEL */
/*
* We're emulating the system's hostid in userland, so
* we can't use zone_get_hostid().
*/
(void) ddi_strtoul(hw_serial, NULL, 10, &myhostid);
#endif /* _KERNEL */
return (myhostid);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
#include <linux/mod_compat.h>
static int
param_set_deadman_failmode(const char *val, zfs_kernel_param_t *kp)
{
spa_t *spa = NULL;
char *p;
if (val == NULL)
return (SET_ERROR(-EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
if (strcmp(val, "wait") != 0 && strcmp(val, "continue") != 0 &&
strcmp(val, "panic"))
return (SET_ERROR(-EINVAL));
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL)
spa_set_deadman_failmode(spa, val);
mutex_exit(&spa_namespace_lock);
return (param_set_charp(val, kp));
}
/* Namespace manipulation */
EXPORT_SYMBOL(spa_lookup);
EXPORT_SYMBOL(spa_add);
EXPORT_SYMBOL(spa_remove);
EXPORT_SYMBOL(spa_next);
/* Refcount functions */
EXPORT_SYMBOL(spa_open_ref);
EXPORT_SYMBOL(spa_close);
EXPORT_SYMBOL(spa_refcount_zero);
/* Pool configuration lock */
EXPORT_SYMBOL(spa_config_tryenter);
EXPORT_SYMBOL(spa_config_enter);
EXPORT_SYMBOL(spa_config_exit);
EXPORT_SYMBOL(spa_config_held);
/* Pool vdev add/remove lock */
EXPORT_SYMBOL(spa_vdev_enter);
EXPORT_SYMBOL(spa_vdev_exit);
/* Pool vdev state change lock */
EXPORT_SYMBOL(spa_vdev_state_enter);
EXPORT_SYMBOL(spa_vdev_state_exit);
/* Accessor functions */
EXPORT_SYMBOL(spa_shutting_down);
EXPORT_SYMBOL(spa_get_dsl);
EXPORT_SYMBOL(spa_get_rootblkptr);
EXPORT_SYMBOL(spa_set_rootblkptr);
EXPORT_SYMBOL(spa_altroot);
EXPORT_SYMBOL(spa_sync_pass);
EXPORT_SYMBOL(spa_name);
EXPORT_SYMBOL(spa_guid);
EXPORT_SYMBOL(spa_last_synced_txg);
EXPORT_SYMBOL(spa_first_txg);
EXPORT_SYMBOL(spa_syncing_txg);
EXPORT_SYMBOL(spa_version);
EXPORT_SYMBOL(spa_state);
EXPORT_SYMBOL(spa_load_state);
EXPORT_SYMBOL(spa_freeze_txg);
EXPORT_SYMBOL(spa_get_dspace);
EXPORT_SYMBOL(spa_update_dspace);
EXPORT_SYMBOL(spa_deflate);
EXPORT_SYMBOL(spa_normal_class);
EXPORT_SYMBOL(spa_log_class);
EXPORT_SYMBOL(spa_max_replication);
EXPORT_SYMBOL(spa_prev_software_version);
EXPORT_SYMBOL(spa_get_failmode);
EXPORT_SYMBOL(spa_suspended);
EXPORT_SYMBOL(spa_bootfs);
EXPORT_SYMBOL(spa_delegation);
EXPORT_SYMBOL(spa_meta_objset);
EXPORT_SYMBOL(spa_maxblocksize);
EXPORT_SYMBOL(spa_maxdnodesize);
/* Miscellaneous support routines */
EXPORT_SYMBOL(spa_rename);
EXPORT_SYMBOL(spa_guid_exists);
EXPORT_SYMBOL(spa_strdup);
EXPORT_SYMBOL(spa_strfree);
EXPORT_SYMBOL(spa_get_random);
EXPORT_SYMBOL(spa_generate_guid);
EXPORT_SYMBOL(snprintf_blkptr);
EXPORT_SYMBOL(spa_freeze);
EXPORT_SYMBOL(spa_upgrade);
EXPORT_SYMBOL(spa_evict_all);
EXPORT_SYMBOL(spa_lookup_by_guid);
EXPORT_SYMBOL(spa_has_spare);
EXPORT_SYMBOL(dva_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize_sync);
EXPORT_SYMBOL(bp_get_dsize);
EXPORT_SYMBOL(spa_has_slogs);
EXPORT_SYMBOL(spa_is_root);
EXPORT_SYMBOL(spa_writeable);
EXPORT_SYMBOL(spa_mode);
EXPORT_SYMBOL(spa_namespace_lock);
/* BEGIN CSTYLED */
module_param(zfs_flags, uint, 0644);
MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags");
module_param(zfs_recover, int, 0644);
MODULE_PARM_DESC(zfs_recover, "Set to attempt to recover from fatal errors");
module_param(zfs_free_leak_on_eio, int, 0644);
MODULE_PARM_DESC(zfs_free_leak_on_eio,
"Set to ignore IO errors during free and permanently leak the space");
module_param(zfs_deadman_synctime_ms, ulong, 0644);
MODULE_PARM_DESC(zfs_deadman_synctime_ms,
"Pool sync expiration time in milliseconds");
module_param(zfs_deadman_ziotime_ms, ulong, 0644);
MODULE_PARM_DESC(zfs_deadman_ziotime_ms,
"IO expiration time in milliseconds");
module_param(zfs_deadman_checktime_ms, ulong, 0644);
MODULE_PARM_DESC(zfs_deadman_checktime_ms,
"Dead I/O check interval in milliseconds");
module_param(zfs_deadman_enabled, int, 0644);
MODULE_PARM_DESC(zfs_deadman_enabled, "Enable deadman timer");
module_param_call(zfs_deadman_failmode, param_set_deadman_failmode,
param_get_charp, &zfs_deadman_failmode, 0644);
MODULE_PARM_DESC(zfs_deadman_failmode, "Failmode for deadman timer");
module_param(spa_asize_inflation, int, 0644);
MODULE_PARM_DESC(spa_asize_inflation,
"SPA size estimate multiplication factor");
module_param(spa_slop_shift, int, 0644);
MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool");
/* END CSTYLED */
#endif
diff --git a/module/zfs/space_map.c b/module/zfs/space_map.c
index baab0610de8b..d84dd7583592 100644
--- a/module/zfs/space_map.c
+++ b/module/zfs/space_map.c
@@ -1,549 +1,567 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2016 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
#include <sys/dnode.h>
#include <sys/dsl_pool.h>
#include <sys/zio.h>
#include <sys/space_map.h>
#include <sys/refcount.h>
#include <sys/zfeature.h>
/*
* The data for a given space map can be kept on blocks of any size.
* Larger blocks entail fewer i/o operations, but they also cause the
* DMU to keep more data in-core, and also to waste more i/o bandwidth
* when only a few blocks have changed since the last transaction group.
*/
int space_map_blksz = (1 << 12);
/*
- * Load the space map disk into the specified range tree. Segments of maptype
- * are added to the range tree, other segment types are removed.
- *
- * Note: space_map_load() will drop sm_lock across dmu_read() calls.
- * The caller must be OK with this.
+ * Iterate through the space map, invoking the callback on each (non-debug)
+ * space map entry.
*/
int
-space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
+space_map_iterate(space_map_t *sm, sm_cb_t callback, void *arg)
{
uint64_t *entry, *entry_map, *entry_map_end;
- uint64_t bufsize, size, offset, end, space;
+ uint64_t bufsize, size, offset, end;
int error = 0;
- ASSERT(MUTEX_HELD(sm->sm_lock));
-
end = space_map_length(sm);
- space = space_map_allocated(sm);
-
- VERIFY0(range_tree_space(rt));
-
- if (maptype == SM_FREE) {
- range_tree_add(rt, sm->sm_start, sm->sm_size);
- space = sm->sm_size - space;
- }
bufsize = MAX(sm->sm_blksz, SPA_MINBLOCKSIZE);
entry_map = vmem_alloc(bufsize, KM_SLEEP);
- mutex_exit(sm->sm_lock);
if (end > bufsize) {
dmu_prefetch(sm->sm_os, space_map_object(sm), 0, bufsize,
end - bufsize, ZIO_PRIORITY_SYNC_READ);
}
- mutex_enter(sm->sm_lock);
- for (offset = 0; offset < end; offset += bufsize) {
+ for (offset = 0; offset < end && error == 0; offset += bufsize) {
size = MIN(end - offset, bufsize);
VERIFY(P2PHASE(size, sizeof (uint64_t)) == 0);
VERIFY(size != 0);
ASSERT3U(sm->sm_blksz, !=, 0);
dprintf("object=%llu offset=%llx size=%llx\n",
space_map_object(sm), offset, size);
- mutex_exit(sm->sm_lock);
error = dmu_read(sm->sm_os, space_map_object(sm), offset, size,
entry_map, DMU_READ_PREFETCH);
- mutex_enter(sm->sm_lock);
if (error != 0)
break;
entry_map_end = entry_map + (size / sizeof (uint64_t));
- for (entry = entry_map; entry < entry_map_end; entry++) {
+ for (entry = entry_map; entry < entry_map_end && error == 0;
+ entry++) {
uint64_t e = *entry;
uint64_t offset, size;
- if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
+ if (SM_DEBUG_DECODE(e)) /* Skip debug entries */
continue;
offset = (SM_OFFSET_DECODE(e) << sm->sm_shift) +
sm->sm_start;
size = SM_RUN_DECODE(e) << sm->sm_shift;
VERIFY0(P2PHASE(offset, 1ULL << sm->sm_shift));
VERIFY0(P2PHASE(size, 1ULL << sm->sm_shift));
VERIFY3U(offset, >=, sm->sm_start);
VERIFY3U(offset + size, <=, sm->sm_start + sm->sm_size);
- if (SM_TYPE_DECODE(e) == maptype) {
- VERIFY3U(range_tree_space(rt) + size, <=,
- sm->sm_size);
- range_tree_add(rt, offset, size);
- } else {
- range_tree_remove(rt, offset, size);
- }
+ error = callback(SM_TYPE_DECODE(e), offset, size, arg);
}
}
- if (error == 0)
+ vmem_free(entry_map, bufsize);
+ return (error);
+}
+
+typedef struct space_map_load_arg {
+ space_map_t *smla_sm;
+ range_tree_t *smla_rt;
+ maptype_t smla_type;
+} space_map_load_arg_t;
+
+static int
+space_map_load_callback(maptype_t type, uint64_t offset, uint64_t size,
+ void *arg)
+{
+ space_map_load_arg_t *smla = arg;
+ if (type == smla->smla_type) {
+ VERIFY3U(range_tree_space(smla->smla_rt) + size, <=,
+ smla->smla_sm->sm_size);
+ range_tree_add(smla->smla_rt, offset, size);
+ } else {
+ range_tree_remove(smla->smla_rt, offset, size);
+ }
+
+ return (0);
+}
+
+/*
+ * Load the space map disk into the specified range tree. Segments of maptype
+ * are added to the range tree, other segment types are removed.
+ */
+int
+space_map_load(space_map_t *sm, range_tree_t *rt, maptype_t maptype)
+{
+ uint64_t space;
+ int err;
+ space_map_load_arg_t smla;
+
+ VERIFY0(range_tree_space(rt));
+ space = space_map_allocated(sm);
+
+ if (maptype == SM_FREE) {
+ range_tree_add(rt, sm->sm_start, sm->sm_size);
+ space = sm->sm_size - space;
+ }
+
+ smla.smla_rt = rt;
+ smla.smla_sm = sm;
+ smla.smla_type = maptype;
+ err = space_map_iterate(sm, space_map_load_callback, &smla);
+
+ if (err == 0) {
VERIFY3U(range_tree_space(rt), ==, space);
- else
+ } else {
range_tree_vacate(rt, NULL, NULL);
+ }
- vmem_free(entry_map, bufsize);
- return (error);
+ return (err);
}
void
space_map_histogram_clear(space_map_t *sm)
{
if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
return;
bzero(sm->sm_phys->smp_histogram, sizeof (sm->sm_phys->smp_histogram));
}
boolean_t
space_map_histogram_verify(space_map_t *sm, range_tree_t *rt)
{
/*
* Verify that the in-core range tree does not have any
* ranges smaller than our sm_shift size.
*/
for (int i = 0; i < sm->sm_shift; i++) {
if (rt->rt_histogram[i] != 0)
return (B_FALSE);
}
return (B_TRUE);
}
void
space_map_histogram_add(space_map_t *sm, range_tree_t *rt, dmu_tx_t *tx)
{
int idx = 0;
- ASSERT(MUTEX_HELD(rt->rt_lock));
ASSERT(dmu_tx_is_syncing(tx));
VERIFY3U(space_map_object(sm), !=, 0);
if (sm->sm_dbuf->db_size != sizeof (space_map_phys_t))
return;
dmu_buf_will_dirty(sm->sm_dbuf, tx);
ASSERT(space_map_histogram_verify(sm, rt));
/*
* Transfer the content of the range tree histogram to the space
* map histogram. The space map histogram contains 32 buckets ranging
* between 2^sm_shift to 2^(32+sm_shift-1). The range tree,
* however, can represent ranges from 2^0 to 2^63. Since the space
* map only cares about allocatable blocks (minimum of sm_shift) we
* can safely ignore all ranges in the range tree smaller than sm_shift.
*/
for (int i = sm->sm_shift; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
/*
* Since the largest histogram bucket in the space map is
* 2^(32+sm_shift-1), we need to normalize the values in
* the range tree for any bucket larger than that size. For
* example given an sm_shift of 9, ranges larger than 2^40
* would get normalized as if they were 1TB ranges. Assume
* the range tree had a count of 5 in the 2^44 (16TB) bucket,
* the calculation below would normalize this to 5 * 2^4 (16).
*/
ASSERT3U(i, >=, idx + sm->sm_shift);
sm->sm_phys->smp_histogram[idx] +=
rt->rt_histogram[i] << (i - idx - sm->sm_shift);
/*
* Increment the space map's index as long as we haven't
* reached the maximum bucket size. Accumulate all ranges
* larger than the max bucket size into the last bucket.
*/
if (idx < SPACE_MAP_HISTOGRAM_SIZE - 1) {
ASSERT3U(idx + sm->sm_shift, ==, i);
idx++;
ASSERT3U(idx, <, SPACE_MAP_HISTOGRAM_SIZE);
}
}
}
uint64_t
space_map_entries(space_map_t *sm, range_tree_t *rt)
{
avl_tree_t *t = &rt->rt_root;
range_seg_t *rs;
uint64_t size, entries;
/*
* All space_maps always have a debug entry so account for it here.
*/
entries = 1;
/*
* Traverse the range tree and calculate the number of space map
* entries that would be required to write out the range tree.
*/
for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
entries += howmany(size, SM_RUN_MAX);
}
return (entries);
}
-/*
- * Note: space_map_write() will drop sm_lock across dmu_write() calls.
- */
void
space_map_write(space_map_t *sm, range_tree_t *rt, maptype_t maptype,
dmu_tx_t *tx)
{
objset_t *os = sm->sm_os;
spa_t *spa = dmu_objset_spa(os);
avl_tree_t *t = &rt->rt_root;
range_seg_t *rs;
uint64_t size, total, rt_space, nodes;
uint64_t *entry, *entry_map, *entry_map_end;
uint64_t expected_entries, actual_entries = 1;
- ASSERT(MUTEX_HELD(rt->rt_lock));
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
VERIFY3U(space_map_object(sm), !=, 0);
dmu_buf_will_dirty(sm->sm_dbuf, tx);
/*
* This field is no longer necessary since the in-core space map
* now contains the object number but is maintained for backwards
* compatibility.
*/
sm->sm_phys->smp_object = sm->sm_object;
if (range_tree_space(rt) == 0) {
VERIFY3U(sm->sm_object, ==, sm->sm_phys->smp_object);
return;
}
if (maptype == SM_ALLOC)
sm->sm_phys->smp_alloc += range_tree_space(rt);
else
sm->sm_phys->smp_alloc -= range_tree_space(rt);
expected_entries = space_map_entries(sm, rt);
entry_map = vmem_alloc(sm->sm_blksz, KM_SLEEP);
entry_map_end = entry_map + (sm->sm_blksz / sizeof (uint64_t));
entry = entry_map;
*entry++ = SM_DEBUG_ENCODE(1) |
SM_DEBUG_ACTION_ENCODE(maptype) |
SM_DEBUG_SYNCPASS_ENCODE(spa_sync_pass(spa)) |
SM_DEBUG_TXG_ENCODE(dmu_tx_get_txg(tx));
total = 0;
nodes = avl_numnodes(&rt->rt_root);
rt_space = range_tree_space(rt);
for (rs = avl_first(t); rs != NULL; rs = AVL_NEXT(t, rs)) {
uint64_t start;
size = (rs->rs_end - rs->rs_start) >> sm->sm_shift;
start = (rs->rs_start - sm->sm_start) >> sm->sm_shift;
total += size << sm->sm_shift;
while (size != 0) {
uint64_t run_len;
run_len = MIN(size, SM_RUN_MAX);
if (entry == entry_map_end) {
- mutex_exit(rt->rt_lock);
dmu_write(os, space_map_object(sm),
sm->sm_phys->smp_objsize, sm->sm_blksz,
entry_map, tx);
- mutex_enter(rt->rt_lock);
sm->sm_phys->smp_objsize += sm->sm_blksz;
entry = entry_map;
}
*entry++ = SM_OFFSET_ENCODE(start) |
SM_TYPE_ENCODE(maptype) |
SM_RUN_ENCODE(run_len);
start += run_len;
size -= run_len;
actual_entries++;
}
}
if (entry != entry_map) {
size = (entry - entry_map) * sizeof (uint64_t);
- mutex_exit(rt->rt_lock);
dmu_write(os, space_map_object(sm), sm->sm_phys->smp_objsize,
size, entry_map, tx);
- mutex_enter(rt->rt_lock);
sm->sm_phys->smp_objsize += size;
}
ASSERT3U(expected_entries, ==, actual_entries);
/*
* Ensure that the space_map's accounting wasn't changed
* while we were in the middle of writing it out.
*/
VERIFY3U(nodes, ==, avl_numnodes(&rt->rt_root));
VERIFY3U(range_tree_space(rt), ==, rt_space);
VERIFY3U(range_tree_space(rt), ==, total);
vmem_free(entry_map, sm->sm_blksz);
}
static int
space_map_open_impl(space_map_t *sm)
{
int error;
u_longlong_t blocks;
error = dmu_bonus_hold(sm->sm_os, sm->sm_object, sm, &sm->sm_dbuf);
if (error)
return (error);
dmu_object_size_from_db(sm->sm_dbuf, &sm->sm_blksz, &blocks);
sm->sm_phys = sm->sm_dbuf->db_data;
return (0);
}
int
space_map_open(space_map_t **smp, objset_t *os, uint64_t object,
- uint64_t start, uint64_t size, uint8_t shift, kmutex_t *lp)
+ uint64_t start, uint64_t size, uint8_t shift)
{
space_map_t *sm;
int error;
ASSERT(*smp == NULL);
ASSERT(os != NULL);
ASSERT(object != 0);
sm = kmem_alloc(sizeof (space_map_t), KM_SLEEP);
sm->sm_start = start;
sm->sm_size = size;
sm->sm_shift = shift;
- sm->sm_lock = lp;
sm->sm_os = os;
sm->sm_object = object;
sm->sm_length = 0;
sm->sm_alloc = 0;
sm->sm_blksz = 0;
sm->sm_dbuf = NULL;
sm->sm_phys = NULL;
error = space_map_open_impl(sm);
if (error != 0) {
space_map_close(sm);
return (error);
}
*smp = sm;
return (0);
}
void
space_map_close(space_map_t *sm)
{
if (sm == NULL)
return;
if (sm->sm_dbuf != NULL)
dmu_buf_rele(sm->sm_dbuf, sm);
sm->sm_dbuf = NULL;
sm->sm_phys = NULL;
kmem_free(sm, sizeof (*sm));
}
void
space_map_truncate(space_map_t *sm, dmu_tx_t *tx)
{
objset_t *os = sm->sm_os;
spa_t *spa = dmu_objset_spa(os);
dmu_object_info_t doi;
ASSERT(dsl_pool_sync_context(dmu_objset_pool(os)));
ASSERT(dmu_tx_is_syncing(tx));
VERIFY3U(dmu_tx_get_txg(tx), <=, spa_final_dirty_txg(spa));
dmu_object_info_from_db(sm->sm_dbuf, &doi);
/*
* If the space map has the wrong bonus size (because
* SPA_FEATURE_SPACEMAP_HISTOGRAM has recently been enabled), or
* the wrong block size (because space_map_blksz has changed),
* free and re-allocate its object with the updated sizes.
*
* Otherwise, just truncate the current object.
*/
if ((spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM) &&
doi.doi_bonus_size != sizeof (space_map_phys_t)) ||
doi.doi_data_block_size != space_map_blksz) {
zfs_dbgmsg("txg %llu, spa %s, sm %p, reallocating "
"object[%llu]: old bonus %u, old blocksz %u",
dmu_tx_get_txg(tx), spa_name(spa), sm, sm->sm_object,
doi.doi_bonus_size, doi.doi_data_block_size);
space_map_free(sm, tx);
dmu_buf_rele(sm->sm_dbuf, sm);
sm->sm_object = space_map_alloc(sm->sm_os, tx);
VERIFY0(space_map_open_impl(sm));
} else {
VERIFY0(dmu_free_range(os, space_map_object(sm), 0, -1ULL, tx));
/*
* If the spacemap is reallocated, its histogram
* will be reset. Do the same in the common case so that
* bugs related to the uncommon case do not go unnoticed.
*/
bzero(sm->sm_phys->smp_histogram,
sizeof (sm->sm_phys->smp_histogram));
}
dmu_buf_will_dirty(sm->sm_dbuf, tx);
sm->sm_phys->smp_objsize = 0;
sm->sm_phys->smp_alloc = 0;
}
/*
* Update the in-core space_map allocation and length values.
*/
void
space_map_update(space_map_t *sm)
{
if (sm == NULL)
return;
- ASSERT(MUTEX_HELD(sm->sm_lock));
-
sm->sm_alloc = sm->sm_phys->smp_alloc;
sm->sm_length = sm->sm_phys->smp_objsize;
}
uint64_t
space_map_alloc(objset_t *os, dmu_tx_t *tx)
{
spa_t *spa = dmu_objset_spa(os);
uint64_t object;
int bonuslen;
if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
spa_feature_incr(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
bonuslen = sizeof (space_map_phys_t);
ASSERT3U(bonuslen, <=, dmu_bonus_max());
} else {
bonuslen = SPACE_MAP_SIZE_V0;
}
object = dmu_object_alloc(os,
DMU_OT_SPACE_MAP, space_map_blksz,
DMU_OT_SPACE_MAP_HEADER, bonuslen, tx);
return (object);
}
void
-space_map_free(space_map_t *sm, dmu_tx_t *tx)
+space_map_free_obj(objset_t *os, uint64_t smobj, dmu_tx_t *tx)
{
- spa_t *spa;
-
- if (sm == NULL)
- return;
-
- spa = dmu_objset_spa(sm->sm_os);
+ spa_t *spa = dmu_objset_spa(os);
if (spa_feature_is_enabled(spa, SPA_FEATURE_SPACEMAP_HISTOGRAM)) {
dmu_object_info_t doi;
- dmu_object_info_from_db(sm->sm_dbuf, &doi);
+ VERIFY0(dmu_object_info(os, smobj, &doi));
if (doi.doi_bonus_size != SPACE_MAP_SIZE_V0) {
- VERIFY(spa_feature_is_active(spa,
- SPA_FEATURE_SPACEMAP_HISTOGRAM));
spa_feature_decr(spa,
SPA_FEATURE_SPACEMAP_HISTOGRAM, tx);
}
}
- VERIFY3U(dmu_object_free(sm->sm_os, space_map_object(sm), tx), ==, 0);
+ VERIFY0(dmu_object_free(os, smobj, tx));
+}
+
+void
+space_map_free(space_map_t *sm, dmu_tx_t *tx)
+{
+ if (sm == NULL)
+ return;
+
+ space_map_free_obj(sm->sm_os, space_map_object(sm), tx);
sm->sm_object = 0;
}
uint64_t
space_map_object(space_map_t *sm)
{
return (sm != NULL ? sm->sm_object : 0);
}
/*
* Returns the already synced, on-disk allocated space.
*/
uint64_t
space_map_allocated(space_map_t *sm)
{
return (sm != NULL ? sm->sm_alloc : 0);
}
/*
* Returns the already synced, on-disk length;
*/
uint64_t
space_map_length(space_map_t *sm)
{
return (sm != NULL ? sm->sm_length : 0);
}
/*
* Returns the allocated space that is currently syncing.
*/
int64_t
space_map_alloc_delta(space_map_t *sm)
{
if (sm == NULL)
return (0);
ASSERT(sm->sm_dbuf != NULL);
return (sm->sm_phys->smp_alloc - space_map_allocated(sm));
}
diff --git a/module/zfs/space_reftree.c b/module/zfs/space_reftree.c
index 038572b08873..aa289ba1061d 100644
--- a/module/zfs/space_reftree.c
+++ b/module/zfs/space_reftree.c
@@ -1,153 +1,149 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2013, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/range_tree.h>
#include <sys/space_reftree.h>
/*
* Space reference trees.
*
* A range tree is a collection of integers. Every integer is either
* in the tree, or it's not. A space reference tree generalizes
* the idea: it allows its members to have arbitrary reference counts,
* as opposed to the implicit reference count of 0 or 1 in a range tree.
* This representation comes in handy when computing the union or
* intersection of multiple space maps. For example, the union of
* N range trees is the subset of the reference tree with refcnt >= 1.
* The intersection of N range trees is the subset with refcnt >= N.
*
* [It's very much like a Fourier transform. Unions and intersections
* are hard to perform in the 'range tree domain', so we convert the trees
* into the 'reference count domain', where it's trivial, then invert.]
*
* vdev_dtl_reassess() uses computations of this form to determine
* DTL_MISSING and DTL_OUTAGE for interior vdevs -- e.g. a RAID-Z vdev
* has an outage wherever refcnt >= vdev_nparity + 1, and a mirror vdev
* has an outage wherever refcnt >= vdev_children.
*/
static int
space_reftree_compare(const void *x1, const void *x2)
{
const space_ref_t *sr1 = (const space_ref_t *)x1;
const space_ref_t *sr2 = (const space_ref_t *)x2;
int cmp = AVL_CMP(sr1->sr_offset, sr2->sr_offset);
if (likely(cmp))
return (cmp);
return (AVL_PCMP(sr1, sr2));
}
void
space_reftree_create(avl_tree_t *t)
{
avl_create(t, space_reftree_compare,
sizeof (space_ref_t), offsetof(space_ref_t, sr_node));
}
void
space_reftree_destroy(avl_tree_t *t)
{
space_ref_t *sr;
void *cookie = NULL;
while ((sr = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(sr, sizeof (*sr));
avl_destroy(t);
}
static void
space_reftree_add_node(avl_tree_t *t, uint64_t offset, int64_t refcnt)
{
space_ref_t *sr;
sr = kmem_alloc(sizeof (*sr), KM_SLEEP);
sr->sr_offset = offset;
sr->sr_refcnt = refcnt;
avl_add(t, sr);
}
void
space_reftree_add_seg(avl_tree_t *t, uint64_t start, uint64_t end,
int64_t refcnt)
{
space_reftree_add_node(t, start, refcnt);
space_reftree_add_node(t, end, -refcnt);
}
/*
* Convert (or add) a range tree into a reference tree.
*/
void
space_reftree_add_map(avl_tree_t *t, range_tree_t *rt, int64_t refcnt)
{
range_seg_t *rs;
- ASSERT(MUTEX_HELD(rt->rt_lock));
-
for (rs = avl_first(&rt->rt_root); rs; rs = AVL_NEXT(&rt->rt_root, rs))
space_reftree_add_seg(t, rs->rs_start, rs->rs_end, refcnt);
}
/*
* Convert a reference tree into a range tree. The range tree will contain
* all members of the reference tree for which refcnt >= minref.
*/
void
space_reftree_generate_map(avl_tree_t *t, range_tree_t *rt, int64_t minref)
{
uint64_t start = -1ULL;
int64_t refcnt = 0;
space_ref_t *sr;
- ASSERT(MUTEX_HELD(rt->rt_lock));
-
range_tree_vacate(rt, NULL, NULL);
for (sr = avl_first(t); sr != NULL; sr = AVL_NEXT(t, sr)) {
refcnt += sr->sr_refcnt;
if (refcnt >= minref) {
if (start == -1ULL) {
start = sr->sr_offset;
}
} else {
if (start != -1ULL) {
uint64_t end = sr->sr_offset;
ASSERT(start <= end);
if (end > start)
range_tree_add(rt, start, end - start);
start = -1ULL;
}
}
}
ASSERT(refcnt == 0);
ASSERT(start == -1ULL);
}
diff --git a/module/zfs/trace.c b/module/zfs/trace.c
index e4ebf31b3fbe..eb6efe841cbd 100644
--- a/module/zfs/trace.c
+++ b/module/zfs/trace.c
@@ -1,51 +1,52 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Each Linux tracepoints subsystem must define CREATE_TRACE_POINTS in one
* (and only one) C file, so this dummy file exists for that purpose.
*/
#include <sys/multilist.h>
#include <sys/arc_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/dbuf.h>
#include <sys/dmu_objset.h>
#include <sys/dsl_dataset.h>
#include <sys/dmu_tx.h>
#include <sys/dnode.h>
#include <sys/multilist.h>
#include <sys/zfs_znode.h>
#include <sys/zil_impl.h>
#include <sys/zrlock.h>
#define CREATE_TRACE_POINTS
#include <sys/trace.h>
#include <sys/trace_acl.h>
#include <sys/trace_arc.h>
#include <sys/trace_dbuf.h>
#include <sys/trace_dmu.h>
#include <sys/trace_dnode.h>
#include <sys/trace_multilist.h>
#include <sys/trace_txg.h>
+#include <sys/trace_vdev.h>
#include <sys/trace_zil.h>
#include <sys/trace_zio.h>
#include <sys/trace_zrlock.h>
diff --git a/module/zfs/txg.c b/module/zfs/txg.c
index bf8544507b60..2c7f5303b1fc 100644
--- a/module/zfs/txg.c
+++ b/module/zfs/txg.c
@@ -1,953 +1,955 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Portions Copyright 2011 Martin Matuska
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/txg_impl.h>
#include <sys/dmu_impl.h>
#include <sys/spa_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/dsl_scan.h>
#include <sys/zil.h>
#include <sys/callb.h>
#include <sys/trace_txg.h>
/*
* ZFS Transaction Groups
* ----------------------
*
* ZFS transaction groups are, as the name implies, groups of transactions
* that act on persistent state. ZFS asserts consistency at the granularity of
* these transaction groups. Each successive transaction group (txg) is
* assigned a 64-bit consecutive identifier. There are three active
* transaction group states: open, quiescing, or syncing. At any given time,
* there may be an active txg associated with each state; each active txg may
* either be processing, or blocked waiting to enter the next state. There may
* be up to three active txgs, and there is always a txg in the open state
* (though it may be blocked waiting to enter the quiescing state). In broad
* strokes, transactions -- operations that change in-memory structures -- are
* accepted into the txg in the open state, and are completed while the txg is
* in the open or quiescing states. The accumulated changes are written to
* disk in the syncing state.
*
* Open
*
* When a new txg becomes active, it first enters the open state. New
* transactions -- updates to in-memory structures -- are assigned to the
* currently open txg. There is always a txg in the open state so that ZFS can
* accept new changes (though the txg may refuse new changes if it has hit
* some limit). ZFS advances the open txg to the next state for a variety of
* reasons such as it hitting a time or size threshold, or the execution of an
* administrative action that must be completed in the syncing state.
*
* Quiescing
*
* After a txg exits the open state, it enters the quiescing state. The
* quiescing state is intended to provide a buffer between accepting new
* transactions in the open state and writing them out to stable storage in
* the syncing state. While quiescing, transactions can continue their
* operation without delaying either of the other states. Typically, a txg is
* in the quiescing state very briefly since the operations are bounded by
* software latencies rather than, say, slower I/O latencies. After all
* transactions complete, the txg is ready to enter the next state.
*
* Syncing
*
* In the syncing state, the in-memory state built up during the open and (to
* a lesser degree) the quiescing states is written to stable storage. The
* process of writing out modified data can, in turn modify more data. For
* example when we write new blocks, we need to allocate space for them; those
* allocations modify metadata (space maps)... which themselves must be
* written to stable storage. During the sync state, ZFS iterates, writing out
* data until it converges and all in-memory changes have been written out.
* The first such pass is the largest as it encompasses all the modified user
* data (as opposed to filesystem metadata). Subsequent passes typically have
* far less data to write as they consist exclusively of filesystem metadata.
*
* To ensure convergence, after a certain number of passes ZFS begins
* overwriting locations on stable storage that had been allocated earlier in
* the syncing state (and subsequently freed). ZFS usually allocates new
* blocks to optimize for large, continuous, writes. For the syncing state to
* converge however it must complete a pass where no new blocks are allocated
* since each allocation requires a modification of persistent metadata.
* Further, to hasten convergence, after a prescribed number of passes, ZFS
* also defers frees, and stops compressing.
*
* In addition to writing out user data, we must also execute synctasks during
* the syncing context. A synctask is the mechanism by which some
* administrative activities work such as creating and destroying snapshots or
* datasets. Note that when a synctask is initiated it enters the open txg,
* and ZFS then pushes that txg as quickly as possible to completion of the
* syncing state in order to reduce the latency of the administrative
* activity. To complete the syncing state, ZFS writes out a new uberblock,
* the root of the tree of blocks that comprise all state stored on the ZFS
* pool. Finally, if there is a quiesced txg waiting, we signal that it can
* now transition to the syncing state.
*/
static void txg_sync_thread(void *arg);
static void txg_quiesce_thread(void *arg);
int zfs_txg_timeout = 5; /* max seconds worth of delta per txg */
/*
* Prepare the txg subsystem.
*/
void
txg_init(dsl_pool_t *dp, uint64_t txg)
{
tx_state_t *tx = &dp->dp_tx;
int c;
bzero(tx, sizeof (tx_state_t));
tx->tx_cpu = vmem_zalloc(max_ncpus * sizeof (tx_cpu_t), KM_SLEEP);
for (c = 0; c < max_ncpus; c++) {
int i;
mutex_init(&tx->tx_cpu[c].tc_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&tx->tx_cpu[c].tc_open_lock, NULL, MUTEX_NOLOCKDEP,
NULL);
for (i = 0; i < TXG_SIZE; i++) {
cv_init(&tx->tx_cpu[c].tc_cv[i], NULL, CV_DEFAULT,
NULL);
list_create(&tx->tx_cpu[c].tc_callbacks[i],
sizeof (dmu_tx_callback_t),
offsetof(dmu_tx_callback_t, dcb_node));
}
}
mutex_init(&tx->tx_sync_lock, NULL, MUTEX_DEFAULT, NULL);
cv_init(&tx->tx_sync_more_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tx->tx_sync_done_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tx->tx_quiesce_more_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tx->tx_quiesce_done_cv, NULL, CV_DEFAULT, NULL);
cv_init(&tx->tx_exit_cv, NULL, CV_DEFAULT, NULL);
tx->tx_open_txg = txg;
}
/*
* Close down the txg subsystem.
*/
void
txg_fini(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
int c;
ASSERT0(tx->tx_threads);
mutex_destroy(&tx->tx_sync_lock);
cv_destroy(&tx->tx_sync_more_cv);
cv_destroy(&tx->tx_sync_done_cv);
cv_destroy(&tx->tx_quiesce_more_cv);
cv_destroy(&tx->tx_quiesce_done_cv);
cv_destroy(&tx->tx_exit_cv);
for (c = 0; c < max_ncpus; c++) {
int i;
mutex_destroy(&tx->tx_cpu[c].tc_open_lock);
mutex_destroy(&tx->tx_cpu[c].tc_lock);
for (i = 0; i < TXG_SIZE; i++) {
cv_destroy(&tx->tx_cpu[c].tc_cv[i]);
list_destroy(&tx->tx_cpu[c].tc_callbacks[i]);
}
}
if (tx->tx_commit_cb_taskq != NULL)
taskq_destroy(tx->tx_commit_cb_taskq);
vmem_free(tx->tx_cpu, max_ncpus * sizeof (tx_cpu_t));
bzero(tx, sizeof (tx_state_t));
}
/*
* Start syncing transaction groups.
*/
void
txg_sync_start(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
mutex_enter(&tx->tx_sync_lock);
dprintf("pool %p\n", dp);
ASSERT0(tx->tx_threads);
tx->tx_threads = 2;
tx->tx_quiesce_thread = thread_create(NULL, 0, txg_quiesce_thread,
dp, 0, &p0, TS_RUN, defclsyspri);
/*
* The sync thread can need a larger-than-default stack size on
* 32-bit x86. This is due in part to nested pools and
* scrub_visitbp() recursion.
*/
tx->tx_sync_thread = thread_create(NULL, 0, txg_sync_thread,
dp, 0, &p0, TS_RUN, defclsyspri);
mutex_exit(&tx->tx_sync_lock);
}
static void
txg_thread_enter(tx_state_t *tx, callb_cpr_t *cpr)
{
CALLB_CPR_INIT(cpr, &tx->tx_sync_lock, callb_generic_cpr, FTAG);
mutex_enter(&tx->tx_sync_lock);
}
static void
txg_thread_exit(tx_state_t *tx, callb_cpr_t *cpr, kthread_t **tpp)
{
ASSERT(*tpp != NULL);
*tpp = NULL;
tx->tx_threads--;
cv_broadcast(&tx->tx_exit_cv);
CALLB_CPR_EXIT(cpr); /* drops &tx->tx_sync_lock */
thread_exit();
}
static void
txg_thread_wait(tx_state_t *tx, callb_cpr_t *cpr, kcondvar_t *cv, clock_t time)
{
CALLB_CPR_SAFE_BEGIN(cpr);
if (time)
(void) cv_timedwait_sig(cv, &tx->tx_sync_lock,
ddi_get_lbolt() + time);
else
cv_wait_sig(cv, &tx->tx_sync_lock);
CALLB_CPR_SAFE_END(cpr, &tx->tx_sync_lock);
}
/*
* Stop syncing transaction groups.
*/
void
txg_sync_stop(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
dprintf("pool %p\n", dp);
/*
* Finish off any work in progress.
*/
ASSERT3U(tx->tx_threads, ==, 2);
/*
* We need to ensure that we've vacated the deferred space_maps.
*/
txg_wait_synced(dp, tx->tx_open_txg + TXG_DEFER_SIZE);
/*
* Wake all sync threads and wait for them to die.
*/
mutex_enter(&tx->tx_sync_lock);
ASSERT3U(tx->tx_threads, ==, 2);
tx->tx_exiting = 1;
cv_broadcast(&tx->tx_quiesce_more_cv);
cv_broadcast(&tx->tx_quiesce_done_cv);
cv_broadcast(&tx->tx_sync_more_cv);
while (tx->tx_threads != 0)
cv_wait(&tx->tx_exit_cv, &tx->tx_sync_lock);
tx->tx_exiting = 0;
mutex_exit(&tx->tx_sync_lock);
}
uint64_t
txg_hold_open(dsl_pool_t *dp, txg_handle_t *th)
{
tx_state_t *tx = &dp->dp_tx;
tx_cpu_t *tc;
uint64_t txg;
/*
* It appears the processor id is simply used as a "random"
* number to index into the array, and there isn't any other
* significance to the chosen tx_cpu. Because.. Why not use
* the current cpu to index into the array?
*/
kpreempt_disable();
tc = &tx->tx_cpu[CPU_SEQID];
kpreempt_enable();
mutex_enter(&tc->tc_open_lock);
txg = tx->tx_open_txg;
mutex_enter(&tc->tc_lock);
tc->tc_count[txg & TXG_MASK]++;
mutex_exit(&tc->tc_lock);
th->th_cpu = tc;
th->th_txg = txg;
return (txg);
}
void
txg_rele_to_quiesce(txg_handle_t *th)
{
tx_cpu_t *tc = th->th_cpu;
ASSERT(!MUTEX_HELD(&tc->tc_lock));
mutex_exit(&tc->tc_open_lock);
}
void
txg_register_callbacks(txg_handle_t *th, list_t *tx_callbacks)
{
tx_cpu_t *tc = th->th_cpu;
int g = th->th_txg & TXG_MASK;
mutex_enter(&tc->tc_lock);
list_move_tail(&tc->tc_callbacks[g], tx_callbacks);
mutex_exit(&tc->tc_lock);
}
void
txg_rele_to_sync(txg_handle_t *th)
{
tx_cpu_t *tc = th->th_cpu;
int g = th->th_txg & TXG_MASK;
mutex_enter(&tc->tc_lock);
ASSERT(tc->tc_count[g] != 0);
if (--tc->tc_count[g] == 0)
cv_broadcast(&tc->tc_cv[g]);
mutex_exit(&tc->tc_lock);
th->th_cpu = NULL; /* defensive */
}
/*
* Blocks until all transactions in the group are committed.
*
* On return, the transaction group has reached a stable state in which it can
* then be passed off to the syncing context.
*/
static void
txg_quiesce(dsl_pool_t *dp, uint64_t txg)
{
tx_state_t *tx = &dp->dp_tx;
uint64_t tx_open_time;
int g = txg & TXG_MASK;
int c;
/*
* Grab all tc_open_locks so nobody else can get into this txg.
*/
for (c = 0; c < max_ncpus; c++)
mutex_enter(&tx->tx_cpu[c].tc_open_lock);
ASSERT(txg == tx->tx_open_txg);
tx->tx_open_txg++;
tx->tx_open_time = tx_open_time = gethrtime();
DTRACE_PROBE2(txg__quiescing, dsl_pool_t *, dp, uint64_t, txg);
DTRACE_PROBE2(txg__opened, dsl_pool_t *, dp, uint64_t, tx->tx_open_txg);
/*
* Now that we've incremented tx_open_txg, we can let threads
* enter the next transaction group.
*/
for (c = 0; c < max_ncpus; c++)
mutex_exit(&tx->tx_cpu[c].tc_open_lock);
spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_OPEN, tx_open_time);
spa_txg_history_add(dp->dp_spa, txg + 1, tx_open_time);
/*
* Quiesce the transaction group by waiting for everyone to txg_exit().
*/
for (c = 0; c < max_ncpus; c++) {
tx_cpu_t *tc = &tx->tx_cpu[c];
mutex_enter(&tc->tc_lock);
while (tc->tc_count[g] != 0)
cv_wait(&tc->tc_cv[g], &tc->tc_lock);
mutex_exit(&tc->tc_lock);
}
spa_txg_history_set(dp->dp_spa, txg, TXG_STATE_QUIESCED, gethrtime());
}
static void
txg_do_callbacks(list_t *cb_list)
{
dmu_tx_do_callbacks(cb_list, 0);
list_destroy(cb_list);
kmem_free(cb_list, sizeof (list_t));
}
/*
* Dispatch the commit callbacks registered on this txg to worker threads.
*
* If no callbacks are registered for a given TXG, nothing happens.
* This function creates a taskq for the associated pool, if needed.
*/
static void
txg_dispatch_callbacks(dsl_pool_t *dp, uint64_t txg)
{
int c;
tx_state_t *tx = &dp->dp_tx;
list_t *cb_list;
for (c = 0; c < max_ncpus; c++) {
tx_cpu_t *tc = &tx->tx_cpu[c];
/*
* No need to lock tx_cpu_t at this point, since this can
* only be called once a txg has been synced.
*/
int g = txg & TXG_MASK;
if (list_is_empty(&tc->tc_callbacks[g]))
continue;
if (tx->tx_commit_cb_taskq == NULL) {
/*
* Commit callback taskq hasn't been created yet.
*/
tx->tx_commit_cb_taskq = taskq_create("tx_commit_cb",
max_ncpus, defclsyspri, max_ncpus, max_ncpus * 2,
TASKQ_PREPOPULATE | TASKQ_DYNAMIC);
}
cb_list = kmem_alloc(sizeof (list_t), KM_SLEEP);
list_create(cb_list, sizeof (dmu_tx_callback_t),
offsetof(dmu_tx_callback_t, dcb_node));
list_move_tail(cb_list, &tc->tc_callbacks[g]);
(void) taskq_dispatch(tx->tx_commit_cb_taskq, (task_func_t *)
txg_do_callbacks, cb_list, TQ_SLEEP);
}
}
/*
* Wait for pending commit callbacks of already-synced transactions to finish
* processing.
* Calling this function from within a commit callback will deadlock.
*/
void
txg_wait_callbacks(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
if (tx->tx_commit_cb_taskq != NULL)
taskq_wait_outstanding(tx->tx_commit_cb_taskq, 0);
}
static void
txg_sync_thread(void *arg)
{
dsl_pool_t *dp = arg;
spa_t *spa = dp->dp_spa;
tx_state_t *tx = &dp->dp_tx;
callb_cpr_t cpr;
clock_t start, delta;
(void) spl_fstrans_mark();
txg_thread_enter(tx, &cpr);
start = delta = 0;
for (;;) {
clock_t timeout = zfs_txg_timeout * hz;
clock_t timer;
uint64_t txg;
txg_stat_t *ts;
/*
* We sync when we're scanning, there's someone waiting
* on us, or the quiesce thread has handed off a txg to
* us, or we have reached our timeout.
*/
timer = (delta >= timeout ? 0 : timeout - delta);
while (!dsl_scan_active(dp->dp_scan) &&
!tx->tx_exiting && timer > 0 &&
tx->tx_synced_txg >= tx->tx_sync_txg_waiting &&
tx->tx_quiesced_txg == 0 &&
dp->dp_dirty_total < zfs_dirty_data_sync) {
dprintf("waiting; tx_synced=%llu waiting=%llu dp=%p\n",
tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
txg_thread_wait(tx, &cpr, &tx->tx_sync_more_cv, timer);
delta = ddi_get_lbolt() - start;
timer = (delta > timeout ? 0 : timeout - delta);
}
/*
* Wait until the quiesce thread hands off a txg to us,
* prompting it to do so if necessary.
*/
while (!tx->tx_exiting && tx->tx_quiesced_txg == 0) {
if (tx->tx_quiesce_txg_waiting < tx->tx_open_txg+1)
tx->tx_quiesce_txg_waiting = tx->tx_open_txg+1;
cv_broadcast(&tx->tx_quiesce_more_cv);
txg_thread_wait(tx, &cpr, &tx->tx_quiesce_done_cv, 0);
}
if (tx->tx_exiting)
txg_thread_exit(tx, &cpr, &tx->tx_sync_thread);
/*
* Consume the quiesced txg which has been handed off to
* us. This may cause the quiescing thread to now be
* able to quiesce another txg, so we must signal it.
*/
txg = tx->tx_quiesced_txg;
tx->tx_quiesced_txg = 0;
tx->tx_syncing_txg = txg;
DTRACE_PROBE2(txg__syncing, dsl_pool_t *, dp, uint64_t, txg);
ts = spa_txg_history_init_io(spa, txg, dp);
cv_broadcast(&tx->tx_quiesce_more_cv);
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
mutex_exit(&tx->tx_sync_lock);
start = ddi_get_lbolt();
spa_sync(spa, txg);
delta = ddi_get_lbolt() - start;
mutex_enter(&tx->tx_sync_lock);
tx->tx_synced_txg = txg;
tx->tx_syncing_txg = 0;
DTRACE_PROBE2(txg__synced, dsl_pool_t *, dp, uint64_t, txg);
spa_txg_history_fini_io(spa, ts);
cv_broadcast(&tx->tx_sync_done_cv);
/*
* Dispatch commit callbacks to worker threads.
*/
txg_dispatch_callbacks(dp, txg);
}
}
static void
txg_quiesce_thread(void *arg)
{
dsl_pool_t *dp = arg;
tx_state_t *tx = &dp->dp_tx;
callb_cpr_t cpr;
txg_thread_enter(tx, &cpr);
for (;;) {
uint64_t txg;
/*
* We quiesce when there's someone waiting on us.
* However, we can only have one txg in "quiescing" or
* "quiesced, waiting to sync" state. So we wait until
* the "quiesced, waiting to sync" txg has been consumed
* by the sync thread.
*/
while (!tx->tx_exiting &&
(tx->tx_open_txg >= tx->tx_quiesce_txg_waiting ||
tx->tx_quiesced_txg != 0))
txg_thread_wait(tx, &cpr, &tx->tx_quiesce_more_cv, 0);
if (tx->tx_exiting)
txg_thread_exit(tx, &cpr, &tx->tx_quiesce_thread);
txg = tx->tx_open_txg;
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
txg, tx->tx_quiesce_txg_waiting,
tx->tx_sync_txg_waiting);
mutex_exit(&tx->tx_sync_lock);
txg_quiesce(dp, txg);
mutex_enter(&tx->tx_sync_lock);
/*
* Hand this txg off to the sync thread.
*/
dprintf("quiesce done, handing off txg %llu\n", txg);
tx->tx_quiesced_txg = txg;
DTRACE_PROBE2(txg__quiesced, dsl_pool_t *, dp, uint64_t, txg);
cv_broadcast(&tx->tx_sync_more_cv);
cv_broadcast(&tx->tx_quiesce_done_cv);
}
}
/*
* Delay this thread by delay nanoseconds if we are still in the open
* transaction group and there is already a waiting txg quiesing or quiesced.
* Abort the delay if this txg stalls or enters the quiesing state.
*/
void
txg_delay(dsl_pool_t *dp, uint64_t txg, hrtime_t delay, hrtime_t resolution)
{
tx_state_t *tx = &dp->dp_tx;
hrtime_t start = gethrtime();
/* don't delay if this txg could transition to quiescing immediately */
if (tx->tx_open_txg > txg ||
tx->tx_syncing_txg == txg-1 || tx->tx_synced_txg == txg-1)
return;
mutex_enter(&tx->tx_sync_lock);
if (tx->tx_open_txg > txg || tx->tx_synced_txg == txg-1) {
mutex_exit(&tx->tx_sync_lock);
return;
}
while (gethrtime() - start < delay &&
tx->tx_syncing_txg < txg-1 && !txg_stalled(dp)) {
(void) cv_timedwait_hires(&tx->tx_quiesce_more_cv,
&tx->tx_sync_lock, delay, resolution, 0);
}
DMU_TX_STAT_BUMP(dmu_tx_delay);
mutex_exit(&tx->tx_sync_lock);
}
void
txg_wait_synced(dsl_pool_t *dp, uint64_t txg)
{
tx_state_t *tx = &dp->dp_tx;
ASSERT(!dsl_pool_config_held(dp));
mutex_enter(&tx->tx_sync_lock);
ASSERT3U(tx->tx_threads, ==, 2);
if (txg == 0)
txg = tx->tx_open_txg + TXG_DEFER_SIZE;
if (tx->tx_sync_txg_waiting < txg)
tx->tx_sync_txg_waiting = txg;
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
while (tx->tx_synced_txg < txg) {
dprintf("broadcasting sync more "
"tx_synced=%llu waiting=%llu dp=%p\n",
tx->tx_synced_txg, tx->tx_sync_txg_waiting, dp);
cv_broadcast(&tx->tx_sync_more_cv);
cv_wait(&tx->tx_sync_done_cv, &tx->tx_sync_lock);
}
mutex_exit(&tx->tx_sync_lock);
}
void
txg_wait_open(dsl_pool_t *dp, uint64_t txg)
{
tx_state_t *tx = &dp->dp_tx;
ASSERT(!dsl_pool_config_held(dp));
mutex_enter(&tx->tx_sync_lock);
ASSERT3U(tx->tx_threads, ==, 2);
if (txg == 0)
txg = tx->tx_open_txg + 1;
if (tx->tx_quiesce_txg_waiting < txg)
tx->tx_quiesce_txg_waiting = txg;
dprintf("txg=%llu quiesce_txg=%llu sync_txg=%llu\n",
txg, tx->tx_quiesce_txg_waiting, tx->tx_sync_txg_waiting);
while (tx->tx_open_txg < txg) {
cv_broadcast(&tx->tx_quiesce_more_cv);
cv_wait(&tx->tx_quiesce_done_cv, &tx->tx_sync_lock);
}
mutex_exit(&tx->tx_sync_lock);
}
/*
* If there isn't a txg syncing or in the pipeline, push another txg through
* the pipeline by queiscing the open txg.
*/
void
txg_kick(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
ASSERT(!dsl_pool_config_held(dp));
mutex_enter(&tx->tx_sync_lock);
if (tx->tx_syncing_txg == 0 &&
tx->tx_quiesce_txg_waiting <= tx->tx_open_txg &&
tx->tx_sync_txg_waiting <= tx->tx_synced_txg &&
tx->tx_quiesced_txg <= tx->tx_synced_txg) {
tx->tx_quiesce_txg_waiting = tx->tx_open_txg + 1;
cv_broadcast(&tx->tx_quiesce_more_cv);
}
mutex_exit(&tx->tx_sync_lock);
}
boolean_t
txg_stalled(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
return (tx->tx_quiesce_txg_waiting > tx->tx_open_txg);
}
boolean_t
txg_sync_waiting(dsl_pool_t *dp)
{
tx_state_t *tx = &dp->dp_tx;
return (tx->tx_syncing_txg <= tx->tx_sync_txg_waiting ||
tx->tx_quiesced_txg != 0);
}
/*
* Verify that this txg is active (open, quiescing, syncing). Non-active
* txg's should not be manipulated.
*/
void
txg_verify(spa_t *spa, uint64_t txg)
{
ASSERTV(dsl_pool_t *dp = spa_get_dsl(spa));
if (txg <= TXG_INITIAL || txg == ZILTEST_TXG)
return;
ASSERT3U(txg, <=, dp->dp_tx.tx_open_txg);
ASSERT3U(txg, >=, dp->dp_tx.tx_synced_txg);
ASSERT3U(txg, >=, dp->dp_tx.tx_open_txg - TXG_CONCURRENT_STATES);
}
/*
* Per-txg object lists.
*/
void
txg_list_create(txg_list_t *tl, spa_t *spa, size_t offset)
{
int t;
mutex_init(&tl->tl_lock, NULL, MUTEX_DEFAULT, NULL);
tl->tl_offset = offset;
tl->tl_spa = spa;
for (t = 0; t < TXG_SIZE; t++)
tl->tl_head[t] = NULL;
}
void
txg_list_destroy(txg_list_t *tl)
{
int t;
for (t = 0; t < TXG_SIZE; t++)
ASSERT(txg_list_empty(tl, t));
mutex_destroy(&tl->tl_lock);
}
boolean_t
txg_list_empty(txg_list_t *tl, uint64_t txg)
{
txg_verify(tl->tl_spa, txg);
return (tl->tl_head[txg & TXG_MASK] == NULL);
}
/*
* Returns true if all txg lists are empty.
*
* Warning: this is inherently racy (an item could be added immediately
* after this function returns). We don't bother with the lock because
* it wouldn't change the semantics.
*/
boolean_t
txg_all_lists_empty(txg_list_t *tl)
{
for (int i = 0; i < TXG_SIZE; i++) {
if (!txg_list_empty(tl, i)) {
return (B_FALSE);
}
}
return (B_TRUE);
}
/*
* Add an entry to the list (unless it's already on the list).
* Returns B_TRUE if it was actually added.
*/
boolean_t
txg_list_add(txg_list_t *tl, void *p, uint64_t txg)
{
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
boolean_t add;
txg_verify(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
add = (tn->tn_member[t] == 0);
if (add) {
tn->tn_member[t] = 1;
tn->tn_next[t] = tl->tl_head[t];
tl->tl_head[t] = tn;
}
mutex_exit(&tl->tl_lock);
return (add);
}
/*
* Add an entry to the end of the list, unless it's already on the list.
* (walks list to find end)
* Returns B_TRUE if it was actually added.
*/
boolean_t
txg_list_add_tail(txg_list_t *tl, void *p, uint64_t txg)
{
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
boolean_t add;
txg_verify(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
add = (tn->tn_member[t] == 0);
if (add) {
txg_node_t **tp;
for (tp = &tl->tl_head[t]; *tp != NULL; tp = &(*tp)->tn_next[t])
continue;
tn->tn_member[t] = 1;
tn->tn_next[t] = NULL;
*tp = tn;
}
mutex_exit(&tl->tl_lock);
return (add);
}
/*
* Remove the head of the list and return it.
*/
void *
txg_list_remove(txg_list_t *tl, uint64_t txg)
{
int t = txg & TXG_MASK;
txg_node_t *tn;
void *p = NULL;
txg_verify(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
if ((tn = tl->tl_head[t]) != NULL) {
+ ASSERT(tn->tn_member[t]);
+ ASSERT(tn->tn_next[t] == NULL || tn->tn_next[t]->tn_member[t]);
p = (char *)tn - tl->tl_offset;
tl->tl_head[t] = tn->tn_next[t];
tn->tn_next[t] = NULL;
tn->tn_member[t] = 0;
}
mutex_exit(&tl->tl_lock);
return (p);
}
/*
* Remove a specific item from the list and return it.
*/
void *
txg_list_remove_this(txg_list_t *tl, void *p, uint64_t txg)
{
int t = txg & TXG_MASK;
txg_node_t *tn, **tp;
txg_verify(tl->tl_spa, txg);
mutex_enter(&tl->tl_lock);
for (tp = &tl->tl_head[t]; (tn = *tp) != NULL; tp = &tn->tn_next[t]) {
if ((char *)tn - tl->tl_offset == p) {
*tp = tn->tn_next[t];
tn->tn_next[t] = NULL;
tn->tn_member[t] = 0;
mutex_exit(&tl->tl_lock);
return (p);
}
}
mutex_exit(&tl->tl_lock);
return (NULL);
}
boolean_t
txg_list_member(txg_list_t *tl, void *p, uint64_t txg)
{
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
txg_verify(tl->tl_spa, txg);
return (tn->tn_member[t] != 0);
}
/*
* Walk a txg list -- only safe if you know it's not changing.
*/
void *
txg_list_head(txg_list_t *tl, uint64_t txg)
{
int t = txg & TXG_MASK;
txg_node_t *tn = tl->tl_head[t];
txg_verify(tl->tl_spa, txg);
return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
}
void *
txg_list_next(txg_list_t *tl, void *p, uint64_t txg)
{
int t = txg & TXG_MASK;
txg_node_t *tn = (txg_node_t *)((char *)p + tl->tl_offset);
txg_verify(tl->tl_spa, txg);
tn = tn->tn_next[t];
return (tn == NULL ? NULL : (char *)tn - tl->tl_offset);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(txg_init);
EXPORT_SYMBOL(txg_fini);
EXPORT_SYMBOL(txg_sync_start);
EXPORT_SYMBOL(txg_sync_stop);
EXPORT_SYMBOL(txg_hold_open);
EXPORT_SYMBOL(txg_rele_to_quiesce);
EXPORT_SYMBOL(txg_rele_to_sync);
EXPORT_SYMBOL(txg_register_callbacks);
EXPORT_SYMBOL(txg_delay);
EXPORT_SYMBOL(txg_wait_synced);
EXPORT_SYMBOL(txg_wait_open);
EXPORT_SYMBOL(txg_wait_callbacks);
EXPORT_SYMBOL(txg_stalled);
EXPORT_SYMBOL(txg_sync_waiting);
module_param(zfs_txg_timeout, int, 0644);
MODULE_PARM_DESC(zfs_txg_timeout, "Max seconds worth of delta per txg");
#endif
diff --git a/module/zfs/vdev.c b/module/zfs/vdev.c
index 8ab9964345ee..f3e3f90fa6b1 100644
--- a/module/zfs/vdev.c
+++ b/module/zfs/vdev.c
@@ -1,3797 +1,3983 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright 2017 Joyent, Inc.
*/
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
+#include <sys/bpobj.h>
#include <sys/dmu.h>
#include <sys/dmu_tx.h>
+#include <sys/dsl_dir.h>
#include <sys/vdev_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
#include <sys/metaslab_impl.h>
#include <sys/space_map.h>
#include <sys/space_reftree.h>
#include <sys/zio.h>
#include <sys/zap.h>
#include <sys/fs/zfs.h>
#include <sys/arc.h>
#include <sys/zil.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
#include <sys/zvol.h>
#include <sys/zfs_ratelimit.h>
/*
* When a vdev is added, it will be divided into approximately (but no
* more than) this number of metaslabs.
*/
int metaslabs_per_vdev = 200;
/*
* Rate limit delay events to this many IO delays per second.
*/
unsigned int zfs_delays_per_second = 20;
/*
* Rate limit checksum events after this many checksum errors per second.
*/
unsigned int zfs_checksums_per_second = 20;
/*
* Ignore errors during scrub/resilver. Allows to work around resilver
* upon import when there are pool errors.
*/
int zfs_scan_ignore_errors = 0;
/*
* Virtual device management.
*/
static vdev_ops_t *vdev_ops_table[] = {
&vdev_root_ops,
&vdev_raidz_ops,
&vdev_mirror_ops,
&vdev_replacing_ops,
&vdev_spare_ops,
&vdev_disk_ops,
&vdev_file_ops,
&vdev_missing_ops,
&vdev_hole_ops,
+ &vdev_indirect_ops,
NULL
};
/*
* Given a vdev type, return the appropriate ops vector.
*/
static vdev_ops_t *
vdev_getops(const char *type)
{
vdev_ops_t *ops, **opspp;
for (opspp = vdev_ops_table; (ops = *opspp) != NULL; opspp++)
if (strcmp(ops->vdev_op_type, type) == 0)
break;
return (ops);
}
/*
* Default asize function: return the MAX of psize with the asize of
* all children. This is what's used by anything other than RAID-Z.
*/
uint64_t
vdev_default_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize = P2ROUNDUP(psize, 1ULL << vd->vdev_top->vdev_ashift);
uint64_t csize;
for (int c = 0; c < vd->vdev_children; c++) {
csize = vdev_psize_to_asize(vd->vdev_child[c], psize);
asize = MAX(asize, csize);
}
return (asize);
}
/*
* Get the minimum allocatable size. We define the allocatable size as
* the vdev's asize rounded to the nearest metaslab. This allows us to
* replace or attach devices which don't have the same physical size but
* can still satisfy the same number of allocations.
*/
uint64_t
vdev_get_min_asize(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
/*
* If our parent is NULL (inactive spare or cache) or is the root,
* just return our own asize.
*/
if (pvd == NULL)
return (vd->vdev_asize);
/*
* The top-level vdev just returns the allocatable size rounded
* to the nearest metaslab.
*/
if (vd == vd->vdev_top)
return (P2ALIGN(vd->vdev_asize, 1ULL << vd->vdev_ms_shift));
/*
* The allocatable space for a raidz vdev is N * sizeof(smallest child),
* so each child must provide at least 1/Nth of its asize.
*/
if (pvd->vdev_ops == &vdev_raidz_ops)
return ((pvd->vdev_min_asize + pvd->vdev_children - 1) /
pvd->vdev_children);
return (pvd->vdev_min_asize);
}
void
vdev_set_min_asize(vdev_t *vd)
{
vd->vdev_min_asize = vdev_get_min_asize(vd);
for (int c = 0; c < vd->vdev_children; c++)
vdev_set_min_asize(vd->vdev_child[c]);
}
vdev_t *
vdev_lookup_top(spa_t *spa, uint64_t vdev)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
if (vdev < rvd->vdev_children) {
ASSERT(rvd->vdev_child[vdev] != NULL);
return (rvd->vdev_child[vdev]);
}
return (NULL);
}
vdev_t *
vdev_lookup_by_guid(vdev_t *vd, uint64_t guid)
{
vdev_t *mvd;
if (vd->vdev_guid == guid)
return (vd);
for (int c = 0; c < vd->vdev_children; c++)
if ((mvd = vdev_lookup_by_guid(vd->vdev_child[c], guid)) !=
NULL)
return (mvd);
return (NULL);
}
static int
vdev_count_leaves_impl(vdev_t *vd)
{
int n = 0;
if (vd->vdev_ops->vdev_op_leaf)
return (1);
for (int c = 0; c < vd->vdev_children; c++)
n += vdev_count_leaves_impl(vd->vdev_child[c]);
return (n);
}
int
vdev_count_leaves(spa_t *spa)
{
int rc;
spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
rc = vdev_count_leaves_impl(spa->spa_root_vdev);
spa_config_exit(spa, SCL_VDEV, FTAG);
return (rc);
}
void
vdev_add_child(vdev_t *pvd, vdev_t *cvd)
{
size_t oldsize, newsize;
uint64_t id = cvd->vdev_id;
vdev_t **newchild;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(cvd->vdev_parent == NULL);
cvd->vdev_parent = pvd;
if (pvd == NULL)
return;
ASSERT(id >= pvd->vdev_children || pvd->vdev_child[id] == NULL);
oldsize = pvd->vdev_children * sizeof (vdev_t *);
pvd->vdev_children = MAX(pvd->vdev_children, id + 1);
newsize = pvd->vdev_children * sizeof (vdev_t *);
newchild = kmem_alloc(newsize, KM_SLEEP);
if (pvd->vdev_child != NULL) {
bcopy(pvd->vdev_child, newchild, oldsize);
kmem_free(pvd->vdev_child, oldsize);
}
pvd->vdev_child = newchild;
pvd->vdev_child[id] = cvd;
cvd->vdev_top = (pvd->vdev_top ? pvd->vdev_top: cvd);
ASSERT(cvd->vdev_top->vdev_parent->vdev_parent == NULL);
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += cvd->vdev_guid_sum;
}
void
vdev_remove_child(vdev_t *pvd, vdev_t *cvd)
{
int c;
uint_t id = cvd->vdev_id;
ASSERT(cvd->vdev_parent == pvd);
if (pvd == NULL)
return;
ASSERT(id < pvd->vdev_children);
ASSERT(pvd->vdev_child[id] == cvd);
pvd->vdev_child[id] = NULL;
cvd->vdev_parent = NULL;
for (c = 0; c < pvd->vdev_children; c++)
if (pvd->vdev_child[c])
break;
if (c == pvd->vdev_children) {
kmem_free(pvd->vdev_child, c * sizeof (vdev_t *));
pvd->vdev_child = NULL;
pvd->vdev_children = 0;
}
/*
* Walk up all ancestors to update guid sum.
*/
for (; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum -= cvd->vdev_guid_sum;
}
/*
* Remove any holes in the child array.
*/
void
vdev_compact_children(vdev_t *pvd)
{
vdev_t **newchild, *cvd;
int oldc = pvd->vdev_children;
int newc;
ASSERT(spa_config_held(pvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
+ if (oldc == 0)
+ return;
+
for (int c = newc = 0; c < oldc; c++)
if (pvd->vdev_child[c])
newc++;
- newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
+ if (newc > 0) {
+ newchild = kmem_zalloc(newc * sizeof (vdev_t *), KM_SLEEP);
- for (int c = newc = 0; c < oldc; c++) {
- if ((cvd = pvd->vdev_child[c]) != NULL) {
- newchild[newc] = cvd;
- cvd->vdev_id = newc++;
+ for (int c = newc = 0; c < oldc; c++) {
+ if ((cvd = pvd->vdev_child[c]) != NULL) {
+ newchild[newc] = cvd;
+ cvd->vdev_id = newc++;
+ }
}
+ } else {
+ newchild = NULL;
}
kmem_free(pvd->vdev_child, oldc * sizeof (vdev_t *));
pvd->vdev_child = newchild;
pvd->vdev_children = newc;
}
/*
* Allocate and minimally initialize a vdev_t.
*/
vdev_t *
vdev_alloc_common(spa_t *spa, uint_t id, uint64_t guid, vdev_ops_t *ops)
{
vdev_t *vd;
+ vdev_indirect_config_t *vic;
vd = kmem_zalloc(sizeof (vdev_t), KM_SLEEP);
+ vic = &vd->vdev_indirect_config;
if (spa->spa_root_vdev == NULL) {
ASSERT(ops == &vdev_root_ops);
spa->spa_root_vdev = vd;
spa->spa_load_guid = spa_generate_guid(NULL);
}
if (guid == 0 && ops != &vdev_hole_ops) {
if (spa->spa_root_vdev == vd) {
/*
* The root vdev's guid will also be the pool guid,
* which must be unique among all pools.
*/
guid = spa_generate_guid(NULL);
} else {
/*
* Any other vdev's guid must be unique within the pool.
*/
guid = spa_generate_guid(spa);
}
ASSERT(!spa_guid_exists(spa_guid(spa), guid));
}
vd->vdev_spa = spa;
vd->vdev_id = id;
vd->vdev_guid = guid;
vd->vdev_guid_sum = guid;
vd->vdev_ops = ops;
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_ishole = (ops == &vdev_hole_ops);
+ vic->vic_prev_indirect_vdev = UINT64_MAX;
+
+ rw_init(&vd->vdev_indirect_rwlock, NULL, RW_DEFAULT, NULL);
+ mutex_init(&vd->vdev_obsolete_lock, NULL, MUTEX_DEFAULT, NULL);
+ vd->vdev_obsolete_segments = range_tree_create(NULL, NULL);
/*
* Initialize rate limit structs for events. We rate limit ZIO delay
* and checksum events so that we don't overwhelm ZED with thousands
* of events when a disk is acting up.
*/
zfs_ratelimit_init(&vd->vdev_delay_rl, &zfs_delays_per_second, 1);
zfs_ratelimit_init(&vd->vdev_checksum_rl, &zfs_checksums_per_second, 1);
list_link_init(&vd->vdev_config_dirty_node);
list_link_init(&vd->vdev_state_dirty_node);
mutex_init(&vd->vdev_dtl_lock, NULL, MUTEX_NOLOCKDEP, NULL);
mutex_init(&vd->vdev_stat_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_probe_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_queue_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&vd->vdev_scan_io_queue_lock, NULL, MUTEX_DEFAULT, NULL);
for (int t = 0; t < DTL_TYPES; t++) {
- vd->vdev_dtl[t] = range_tree_create(NULL, NULL,
- &vd->vdev_dtl_lock);
+ vd->vdev_dtl[t] = range_tree_create(NULL, NULL);
}
txg_list_create(&vd->vdev_ms_list, spa,
offsetof(struct metaslab, ms_txg_node));
txg_list_create(&vd->vdev_dtl_list, spa,
offsetof(struct vdev, vdev_dtl_node));
vd->vdev_stat.vs_timestamp = gethrtime();
vdev_queue_init(vd);
vdev_cache_init(vd);
return (vd);
}
/*
* Allocate a new vdev. The 'alloctype' is used to control whether we are
* creating a new vdev or loading an existing one - the behavior is slightly
* different for each case.
*/
int
vdev_alloc(spa_t *spa, vdev_t **vdp, nvlist_t *nv, vdev_t *parent, uint_t id,
int alloctype)
{
vdev_ops_t *ops;
char *type;
uint64_t guid = 0, islog, nparity;
vdev_t *vd;
+ vdev_indirect_config_t *vic;
char *tmp = NULL;
int rc;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_TYPE, &type) != 0)
return (SET_ERROR(EINVAL));
if ((ops = vdev_getops(type)) == NULL)
return (SET_ERROR(EINVAL));
/*
* If this is a load, get the vdev guid from the nvlist.
* Otherwise, vdev_alloc_common() will generate one for us.
*/
if (alloctype == VDEV_ALLOC_LOAD) {
uint64_t label_id;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ID, &label_id) ||
label_id != id)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_SPARE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_L2CACHE) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
} else if (alloctype == VDEV_ALLOC_ROOTPOOL) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_GUID, &guid) != 0)
return (SET_ERROR(EINVAL));
}
/*
* The first allocated vdev must be of type 'root'.
*/
if (ops != &vdev_root_ops && spa->spa_root_vdev == NULL)
return (SET_ERROR(EINVAL));
/*
* Determine whether we're a log vdev.
*/
islog = 0;
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_LOG, &islog);
if (islog && spa_version(spa) < SPA_VERSION_SLOGS)
return (SET_ERROR(ENOTSUP));
if (ops == &vdev_hole_ops && spa_version(spa) < SPA_VERSION_HOLES)
return (SET_ERROR(ENOTSUP));
/*
* Set the nparity property for RAID-Z vdevs.
*/
nparity = -1ULL;
if (ops == &vdev_raidz_ops) {
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NPARITY,
&nparity) == 0) {
if (nparity == 0 || nparity > VDEV_RAIDZ_MAXPARITY)
return (SET_ERROR(EINVAL));
/*
* Previous versions could only support 1 or 2 parity
* device.
*/
if (nparity > 1 &&
spa_version(spa) < SPA_VERSION_RAIDZ2)
return (SET_ERROR(ENOTSUP));
if (nparity > 2 &&
spa_version(spa) < SPA_VERSION_RAIDZ3)
return (SET_ERROR(ENOTSUP));
} else {
/*
* We require the parity to be specified for SPAs that
* support multiple parity levels.
*/
if (spa_version(spa) >= SPA_VERSION_RAIDZ2)
return (SET_ERROR(EINVAL));
/*
* Otherwise, we default to 1 parity device for RAID-Z.
*/
nparity = 1;
}
} else {
nparity = 0;
}
ASSERT(nparity != -1ULL);
vd = vdev_alloc_common(spa, id, guid, ops);
+ vic = &vd->vdev_indirect_config;
vd->vdev_islog = islog;
vd->vdev_nparity = nparity;
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PATH, &vd->vdev_path) == 0)
vd->vdev_path = spa_strdup(vd->vdev_path);
/*
* ZPOOL_CONFIG_AUX_STATE = "external" means we previously forced a
* fault on a vdev and want it to persist across imports (like with
* zpool offline -f).
*/
rc = nvlist_lookup_string(nv, ZPOOL_CONFIG_AUX_STATE, &tmp);
if (rc == 0 && tmp != NULL && strcmp(tmp, "external") == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_faulted = 1;
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_DEVID, &vd->vdev_devid) == 0)
vd->vdev_devid = spa_strdup(vd->vdev_devid);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_PHYS_PATH,
&vd->vdev_physpath) == 0)
vd->vdev_physpath = spa_strdup(vd->vdev_physpath);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
&vd->vdev_enc_sysfs_path) == 0)
vd->vdev_enc_sysfs_path = spa_strdup(vd->vdev_enc_sysfs_path);
if (nvlist_lookup_string(nv, ZPOOL_CONFIG_FRU, &vd->vdev_fru) == 0)
vd->vdev_fru = spa_strdup(vd->vdev_fru);
/*
* Set the whole_disk property. If it's not specified, leave the value
* as -1.
*/
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
&vd->vdev_wholedisk) != 0)
vd->vdev_wholedisk = -1ULL;
+ ASSERT0(vic->vic_mapping_object);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
+ &vic->vic_mapping_object);
+ ASSERT0(vic->vic_births_object);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
+ &vic->vic_births_object);
+ ASSERT3U(vic->vic_prev_indirect_vdev, ==, UINT64_MAX);
+ (void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
+ &vic->vic_prev_indirect_vdev);
+
/*
* Look for the 'not present' flag. This will only be set if the device
* was not present at the time of import.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT,
&vd->vdev_not_present);
/*
* Get the alignment requirement.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASHIFT, &vd->vdev_ashift);
/*
* Retrieve the vdev creation time.
*/
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_CREATE_TXG,
&vd->vdev_crtxg);
/*
* If we're a top-level vdev, try to load the allocation parameters.
*/
if (parent && !parent->vdev_parent &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
&vd->vdev_ms_array);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
&vd->vdev_ms_shift);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_ASIZE,
&vd->vdev_asize);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVING,
&vd->vdev_removing);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
&vd->vdev_top_zap);
} else {
ASSERT0(vd->vdev_top_zap);
}
if (parent && !parent->vdev_parent && alloctype != VDEV_ALLOC_ATTACH) {
ASSERT(alloctype == VDEV_ALLOC_LOAD ||
alloctype == VDEV_ALLOC_ADD ||
alloctype == VDEV_ALLOC_SPLIT ||
alloctype == VDEV_ALLOC_ROOTPOOL);
vd->vdev_mg = metaslab_group_create(islog ?
spa_log_class(spa) : spa_normal_class(spa), vd);
}
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_SPLIT)) {
(void) nvlist_lookup_uint64(nv,
ZPOOL_CONFIG_VDEV_LEAF_ZAP, &vd->vdev_leaf_zap);
} else {
ASSERT0(vd->vdev_leaf_zap);
}
/*
* If we're a leaf vdev, try to load the DTL object and other state.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(alloctype == VDEV_ALLOC_LOAD || alloctype == VDEV_ALLOC_L2CACHE ||
alloctype == VDEV_ALLOC_ROOTPOOL)) {
if (alloctype == VDEV_ALLOC_LOAD) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DTL,
&vd->vdev_dtl_object);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_UNSPARE,
&vd->vdev_unspare);
}
if (alloctype == VDEV_ALLOC_ROOTPOOL) {
uint64_t spare = 0;
if (nvlist_lookup_uint64(nv, ZPOOL_CONFIG_IS_SPARE,
&spare) == 0 && spare)
spa_spare_add(vd);
}
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_OFFLINE,
&vd->vdev_offline);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
&vd->vdev_resilver_txg);
/*
* In general, when importing a pool we want to ignore the
* persistent fault state, as the diagnosis made on another
* system may not be valid in the current context. The only
* exception is if we forced a vdev to a persistently faulted
* state with 'zpool offline -f'. The persistent fault will
* remain across imports until cleared.
*
* Local vdevs will remain in the faulted state.
*/
if (spa_load_state(spa) == SPA_LOAD_OPEN ||
spa_load_state(spa) == SPA_LOAD_IMPORT) {
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_FAULTED,
&vd->vdev_faulted);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_DEGRADED,
&vd->vdev_degraded);
(void) nvlist_lookup_uint64(nv, ZPOOL_CONFIG_REMOVED,
&vd->vdev_removed);
if (vd->vdev_faulted || vd->vdev_degraded) {
char *aux;
vd->vdev_label_aux =
VDEV_AUX_ERR_EXCEEDED;
if (nvlist_lookup_string(nv,
ZPOOL_CONFIG_AUX_STATE, &aux) == 0 &&
strcmp(aux, "external") == 0)
vd->vdev_label_aux = VDEV_AUX_EXTERNAL;
}
}
}
/*
* Add ourselves to the parent's list of children.
*/
vdev_add_child(parent, vd);
*vdp = vd;
return (0);
}
void
vdev_free(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
/*
* Scan queues are normally destroyed at the end of a scan. If the
* queue exists here, that implies the vdev is being removed while
* the scan is still running.
*/
if (vd->vdev_scan_io_queue != NULL) {
mutex_enter(&vd->vdev_scan_io_queue_lock);
dsl_scan_io_queue_destroy(vd->vdev_scan_io_queue);
vd->vdev_scan_io_queue = NULL;
mutex_exit(&vd->vdev_scan_io_queue_lock);
}
/*
* vdev_free() implies closing the vdev first. This is simpler than
* trying to ensure complicated semantics for all callers.
*/
vdev_close(vd);
ASSERT(!list_link_active(&vd->vdev_config_dirty_node));
ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
/*
* Free all children.
*/
for (int c = 0; c < vd->vdev_children; c++)
vdev_free(vd->vdev_child[c]);
ASSERT(vd->vdev_child == NULL);
ASSERT(vd->vdev_guid_sum == vd->vdev_guid);
/*
* Discard allocation state.
*/
if (vd->vdev_mg != NULL) {
vdev_metaslab_fini(vd);
metaslab_group_destroy(vd->vdev_mg);
}
ASSERT0(vd->vdev_stat.vs_space);
ASSERT0(vd->vdev_stat.vs_dspace);
ASSERT0(vd->vdev_stat.vs_alloc);
/*
* Remove this vdev from its parent's child list.
*/
vdev_remove_child(vd->vdev_parent, vd);
ASSERT(vd->vdev_parent == NULL);
/*
* Clean up vdev structure.
*/
vdev_queue_fini(vd);
vdev_cache_fini(vd);
if (vd->vdev_path)
spa_strfree(vd->vdev_path);
if (vd->vdev_devid)
spa_strfree(vd->vdev_devid);
if (vd->vdev_physpath)
spa_strfree(vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path)
spa_strfree(vd->vdev_enc_sysfs_path);
if (vd->vdev_fru)
spa_strfree(vd->vdev_fru);
if (vd->vdev_isspare)
spa_spare_remove(vd);
if (vd->vdev_isl2cache)
spa_l2cache_remove(vd);
txg_list_destroy(&vd->vdev_ms_list);
txg_list_destroy(&vd->vdev_dtl_list);
mutex_enter(&vd->vdev_dtl_lock);
space_map_close(vd->vdev_dtl_sm);
for (int t = 0; t < DTL_TYPES; t++) {
range_tree_vacate(vd->vdev_dtl[t], NULL, NULL);
range_tree_destroy(vd->vdev_dtl[t]);
}
mutex_exit(&vd->vdev_dtl_lock);
+ EQUIV(vd->vdev_indirect_births != NULL,
+ vd->vdev_indirect_mapping != NULL);
+ if (vd->vdev_indirect_births != NULL) {
+ vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
+ vdev_indirect_births_close(vd->vdev_indirect_births);
+ }
+
+ if (vd->vdev_obsolete_sm != NULL) {
+ ASSERT(vd->vdev_removing ||
+ vd->vdev_ops == &vdev_indirect_ops);
+ space_map_close(vd->vdev_obsolete_sm);
+ vd->vdev_obsolete_sm = NULL;
+ }
+ range_tree_destroy(vd->vdev_obsolete_segments);
+ rw_destroy(&vd->vdev_indirect_rwlock);
+ mutex_destroy(&vd->vdev_obsolete_lock);
+
mutex_destroy(&vd->vdev_queue_lock);
mutex_destroy(&vd->vdev_dtl_lock);
mutex_destroy(&vd->vdev_stat_lock);
mutex_destroy(&vd->vdev_probe_lock);
mutex_destroy(&vd->vdev_scan_io_queue_lock);
zfs_ratelimit_fini(&vd->vdev_delay_rl);
zfs_ratelimit_fini(&vd->vdev_checksum_rl);
if (vd == spa->spa_root_vdev)
spa->spa_root_vdev = NULL;
kmem_free(vd, sizeof (vdev_t));
}
/*
* Transfer top-level vdev state from svd to tvd.
*/
static void
vdev_top_transfer(vdev_t *svd, vdev_t *tvd)
{
spa_t *spa = svd->vdev_spa;
metaslab_t *msp;
vdev_t *vd;
int t;
ASSERT(tvd == tvd->vdev_top);
tvd->vdev_pending_fastwrite = svd->vdev_pending_fastwrite;
tvd->vdev_ms_array = svd->vdev_ms_array;
tvd->vdev_ms_shift = svd->vdev_ms_shift;
tvd->vdev_ms_count = svd->vdev_ms_count;
tvd->vdev_top_zap = svd->vdev_top_zap;
svd->vdev_ms_array = 0;
svd->vdev_ms_shift = 0;
svd->vdev_ms_count = 0;
svd->vdev_top_zap = 0;
if (tvd->vdev_mg)
ASSERT3P(tvd->vdev_mg, ==, svd->vdev_mg);
tvd->vdev_mg = svd->vdev_mg;
tvd->vdev_ms = svd->vdev_ms;
svd->vdev_mg = NULL;
svd->vdev_ms = NULL;
if (tvd->vdev_mg != NULL)
tvd->vdev_mg->mg_vd = tvd;
tvd->vdev_stat.vs_alloc = svd->vdev_stat.vs_alloc;
tvd->vdev_stat.vs_space = svd->vdev_stat.vs_space;
tvd->vdev_stat.vs_dspace = svd->vdev_stat.vs_dspace;
svd->vdev_stat.vs_alloc = 0;
svd->vdev_stat.vs_space = 0;
svd->vdev_stat.vs_dspace = 0;
for (t = 0; t < TXG_SIZE; t++) {
while ((msp = txg_list_remove(&svd->vdev_ms_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_ms_list, msp, t);
while ((vd = txg_list_remove(&svd->vdev_dtl_list, t)) != NULL)
(void) txg_list_add(&tvd->vdev_dtl_list, vd, t);
if (txg_list_remove_this(&spa->spa_vdev_txg_list, svd, t))
(void) txg_list_add(&spa->spa_vdev_txg_list, tvd, t);
}
if (list_link_active(&svd->vdev_config_dirty_node)) {
vdev_config_clean(svd);
vdev_config_dirty(tvd);
}
if (list_link_active(&svd->vdev_state_dirty_node)) {
vdev_state_clean(svd);
vdev_state_dirty(tvd);
}
tvd->vdev_deflate_ratio = svd->vdev_deflate_ratio;
svd->vdev_deflate_ratio = 0;
tvd->vdev_islog = svd->vdev_islog;
svd->vdev_islog = 0;
dsl_scan_io_queue_vdev_xfer(svd, tvd);
}
static void
vdev_top_update(vdev_t *tvd, vdev_t *vd)
{
if (vd == NULL)
return;
vd->vdev_top = tvd;
for (int c = 0; c < vd->vdev_children; c++)
vdev_top_update(tvd, vd->vdev_child[c]);
}
/*
* Add a mirror/replacing vdev above an existing vdev.
*/
vdev_t *
vdev_add_parent(vdev_t *cvd, vdev_ops_t *ops)
{
spa_t *spa = cvd->vdev_spa;
vdev_t *pvd = cvd->vdev_parent;
vdev_t *mvd;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
mvd = vdev_alloc_common(spa, cvd->vdev_id, 0, ops);
mvd->vdev_asize = cvd->vdev_asize;
mvd->vdev_min_asize = cvd->vdev_min_asize;
mvd->vdev_max_asize = cvd->vdev_max_asize;
+ mvd->vdev_psize = cvd->vdev_psize;
mvd->vdev_ashift = cvd->vdev_ashift;
mvd->vdev_state = cvd->vdev_state;
mvd->vdev_crtxg = cvd->vdev_crtxg;
vdev_remove_child(pvd, cvd);
vdev_add_child(pvd, mvd);
cvd->vdev_id = mvd->vdev_children;
vdev_add_child(mvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (mvd == mvd->vdev_top)
vdev_top_transfer(cvd, mvd);
return (mvd);
}
/*
* Remove a 1-way mirror/replacing vdev from the tree.
*/
void
vdev_remove_parent(vdev_t *cvd)
{
vdev_t *mvd = cvd->vdev_parent;
vdev_t *pvd = mvd->vdev_parent;
ASSERT(spa_config_held(cvd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
ASSERT(mvd->vdev_children == 1);
ASSERT(mvd->vdev_ops == &vdev_mirror_ops ||
mvd->vdev_ops == &vdev_replacing_ops ||
mvd->vdev_ops == &vdev_spare_ops);
cvd->vdev_ashift = mvd->vdev_ashift;
vdev_remove_child(mvd, cvd);
vdev_remove_child(pvd, mvd);
/*
* If cvd will replace mvd as a top-level vdev, preserve mvd's guid.
* Otherwise, we could have detached an offline device, and when we
* go to import the pool we'll think we have two top-level vdevs,
* instead of a different version of the same top-level vdev.
*/
if (mvd->vdev_top == mvd) {
uint64_t guid_delta = mvd->vdev_guid - cvd->vdev_guid;
cvd->vdev_orig_guid = cvd->vdev_guid;
cvd->vdev_guid += guid_delta;
cvd->vdev_guid_sum += guid_delta;
/*
* If pool not set for autoexpand, we need to also preserve
* mvd's asize to prevent automatic expansion of cvd.
* Otherwise if we are adjusting the mirror by attaching and
* detaching children of non-uniform sizes, the mirror could
* autoexpand, unexpectedly requiring larger devices to
* re-establish the mirror.
*/
if (!cvd->vdev_spa->spa_autoexpand)
cvd->vdev_asize = mvd->vdev_asize;
}
cvd->vdev_id = mvd->vdev_id;
vdev_add_child(pvd, cvd);
vdev_top_update(cvd->vdev_top, cvd->vdev_top);
if (cvd == cvd->vdev_top)
vdev_top_transfer(mvd, cvd);
ASSERT(mvd->vdev_children == 0);
vdev_free(mvd);
}
int
vdev_metaslab_init(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
uint64_t m;
uint64_t oldc = vd->vdev_ms_count;
uint64_t newc = vd->vdev_asize >> vd->vdev_ms_shift;
metaslab_t **mspp;
int error;
ASSERT(txg == 0 || spa_config_held(spa, SCL_ALLOC, RW_WRITER));
/*
* This vdev is not being allocated from yet or is a hole.
*/
if (vd->vdev_ms_shift == 0)
return (0);
ASSERT(!vd->vdev_ishole);
- /*
- * Compute the raidz-deflation ratio. Note, we hard-code
- * in 128k (1 << 17) because it is the "typical" blocksize.
- * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
- * otherwise it would inconsistently account for existing bp's.
- */
- vd->vdev_deflate_ratio = (1 << 17) /
- (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
-
ASSERT(oldc <= newc);
mspp = vmem_zalloc(newc * sizeof (*mspp), KM_SLEEP);
if (oldc != 0) {
bcopy(vd->vdev_ms, mspp, oldc * sizeof (*mspp));
vmem_free(vd->vdev_ms, oldc * sizeof (*mspp));
}
vd->vdev_ms = mspp;
vd->vdev_ms_count = newc;
for (m = oldc; m < newc; m++) {
uint64_t object = 0;
- if (txg == 0) {
+ /*
+ * vdev_ms_array may be 0 if we are creating the "fake"
+ * metaslabs for an indirect vdev for zdb's leak detection.
+ * See zdb_leak_init().
+ */
+ if (txg == 0 && vd->vdev_ms_array != 0) {
error = dmu_read(mos, vd->vdev_ms_array,
m * sizeof (uint64_t), sizeof (uint64_t), &object,
DMU_READ_PREFETCH);
if (error)
return (error);
}
error = metaslab_init(vd->vdev_mg, m, object, txg,
&(vd->vdev_ms[m]));
if (error)
return (error);
}
if (txg == 0)
spa_config_enter(spa, SCL_ALLOC, FTAG, RW_WRITER);
/*
* If the vdev is being removed we don't activate
* the metaslabs since we want to ensure that no new
* allocations are performed on this device.
*/
if (oldc == 0 && !vd->vdev_removing)
metaslab_group_activate(vd->vdev_mg);
if (txg == 0)
spa_config_exit(spa, SCL_ALLOC, FTAG);
return (0);
}
void
vdev_metaslab_fini(vdev_t *vd)
{
- uint64_t m;
- uint64_t count = vd->vdev_ms_count;
-
if (vd->vdev_ms != NULL) {
+ uint64_t count = vd->vdev_ms_count;
+
metaslab_group_passivate(vd->vdev_mg);
- for (m = 0; m < count; m++) {
+ for (uint64_t m = 0; m < count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp != NULL)
metaslab_fini(msp);
}
vmem_free(vd->vdev_ms, count * sizeof (metaslab_t *));
vd->vdev_ms = NULL;
- }
+ vd->vdev_ms_count = 0;
+ }
+ ASSERT0(vd->vdev_ms_count);
ASSERT3U(vd->vdev_pending_fastwrite, ==, 0);
}
typedef struct vdev_probe_stats {
boolean_t vps_readable;
boolean_t vps_writeable;
int vps_flags;
} vdev_probe_stats_t;
static void
vdev_probe_done(zio_t *zio)
{
spa_t *spa = zio->io_spa;
vdev_t *vd = zio->io_vd;
vdev_probe_stats_t *vps = zio->io_private;
ASSERT(vd->vdev_probe_zio != NULL);
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_error == 0)
vps->vps_readable = 1;
if (zio->io_error == 0 && spa_writeable(spa)) {
zio_nowait(zio_write_phys(vd->vdev_probe_zio, vd,
zio->io_offset, zio->io_size, zio->io_abd,
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_WRITE, vps->vps_flags, B_TRUE));
} else {
abd_free(zio->io_abd);
}
} else if (zio->io_type == ZIO_TYPE_WRITE) {
if (zio->io_error == 0)
vps->vps_writeable = 1;
abd_free(zio->io_abd);
} else if (zio->io_type == ZIO_TYPE_NULL) {
zio_t *pio;
zio_link_t *zl;
vd->vdev_cant_read |= !vps->vps_readable;
vd->vdev_cant_write |= !vps->vps_writeable;
if (vdev_readable(vd) &&
(vdev_writeable(vd) || !spa_writeable(spa))) {
zio->io_error = 0;
} else {
ASSERT(zio->io_error != 0);
+ zfs_dbgmsg("failed probe on vdev %llu",
+ (longlong_t)vd->vdev_id);
zfs_ereport_post(FM_EREPORT_ZFS_PROBE_FAILURE,
spa, vd, NULL, NULL, 0, 0);
zio->io_error = SET_ERROR(ENXIO);
}
mutex_enter(&vd->vdev_probe_lock);
ASSERT(vd->vdev_probe_zio == zio);
vd->vdev_probe_zio = NULL;
mutex_exit(&vd->vdev_probe_lock);
zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
if (!vdev_accessible(vd, pio))
pio->io_error = SET_ERROR(ENXIO);
kmem_free(vps, sizeof (*vps));
}
}
/*
* Determine whether this device is accessible.
*
* Read and write to several known locations: the pad regions of each
* vdev label but the first, which we leave alone in case it contains
* a VTOC.
*/
zio_t *
vdev_probe(vdev_t *vd, zio_t *zio)
{
spa_t *spa = vd->vdev_spa;
vdev_probe_stats_t *vps = NULL;
zio_t *pio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
/*
* Don't probe the probe.
*/
if (zio && (zio->io_flags & ZIO_FLAG_PROBE))
return (NULL);
/*
* To prevent 'probe storms' when a device fails, we create
* just one probe i/o at a time. All zios that want to probe
* this vdev will become parents of the probe io.
*/
mutex_enter(&vd->vdev_probe_lock);
if ((pio = vd->vdev_probe_zio) == NULL) {
vps = kmem_zalloc(sizeof (*vps), KM_SLEEP);
vps->vps_flags = ZIO_FLAG_CANFAIL | ZIO_FLAG_PROBE |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE |
ZIO_FLAG_TRYHARD;
if (spa_config_held(spa, SCL_ZIO, RW_WRITER)) {
/*
* vdev_cant_read and vdev_cant_write can only
* transition from TRUE to FALSE when we have the
* SCL_ZIO lock as writer; otherwise they can only
* transition from FALSE to TRUE. This ensures that
* any zio looking at these values can assume that
* failures persist for the life of the I/O. That's
* important because when a device has intermittent
* connectivity problems, we want to ensure that
* they're ascribed to the device (ENXIO) and not
* the zio (EIO).
*
* Since we hold SCL_ZIO as writer here, clear both
* values so the probe can reevaluate from first
* principles.
*/
vps->vps_flags |= ZIO_FLAG_CONFIG_WRITER;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
}
vd->vdev_probe_zio = pio = zio_null(NULL, spa, vd,
vdev_probe_done, vps,
vps->vps_flags | ZIO_FLAG_DONT_PROPAGATE);
/*
* We can't change the vdev state in this context, so we
* kick off an async task to do it on our behalf.
*/
if (zio != NULL) {
vd->vdev_probe_wanted = B_TRUE;
spa_async_request(spa, SPA_ASYNC_PROBE);
}
}
if (zio != NULL)
zio_add_child(zio, pio);
mutex_exit(&vd->vdev_probe_lock);
if (vps == NULL) {
ASSERT(zio != NULL);
return (NULL);
}
for (int l = 1; l < VDEV_LABELS; l++) {
zio_nowait(zio_read_phys(pio, vd,
vdev_label_offset(vd->vdev_psize, l,
offsetof(vdev_label_t, vl_pad2)), VDEV_PAD_SIZE,
abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE),
ZIO_CHECKSUM_OFF, vdev_probe_done, vps,
ZIO_PRIORITY_SYNC_READ, vps->vps_flags, B_TRUE));
}
if (zio == NULL)
return (pio);
zio_nowait(pio);
return (NULL);
}
static void
vdev_open_child(void *arg)
{
vdev_t *vd = arg;
vd->vdev_open_thread = curthread;
vd->vdev_open_error = vdev_open(vd);
vd->vdev_open_thread = NULL;
}
static boolean_t
vdev_uses_zvols(vdev_t *vd)
{
#ifdef _KERNEL
if (zvol_is_zvol(vd->vdev_path))
return (B_TRUE);
#endif
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_uses_zvols(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
void
vdev_open_children(vdev_t *vd)
{
taskq_t *tq;
int children = vd->vdev_children;
/*
* in order to handle pools on top of zvols, do the opens
* in a single thread so that the same thread holds the
* spa_namespace_lock
*/
if (vdev_uses_zvols(vd)) {
retry_sync:
for (int c = 0; c < children; c++)
vd->vdev_child[c]->vdev_open_error =
vdev_open(vd->vdev_child[c]);
} else {
tq = taskq_create("vdev_open", children, minclsyspri,
children, children, TASKQ_PREPOPULATE);
if (tq == NULL)
goto retry_sync;
for (int c = 0; c < children; c++)
VERIFY(taskq_dispatch(tq, vdev_open_child,
vd->vdev_child[c], TQ_SLEEP) != TASKQID_INVALID);
taskq_destroy(tq);
}
vd->vdev_nonrot = B_TRUE;
for (int c = 0; c < children; c++)
vd->vdev_nonrot &= vd->vdev_child[c]->vdev_nonrot;
}
+/*
+ * Compute the raidz-deflation ratio. Note, we hard-code
+ * in 128k (1 << 17) because it is the "typical" blocksize.
+ * Even though SPA_MAXBLOCKSIZE changed, this algorithm can not change,
+ * otherwise it would inconsistently account for existing bp's.
+ */
+static void
+vdev_set_deflate_ratio(vdev_t *vd)
+{
+ if (vd == vd->vdev_top && !vd->vdev_ishole && vd->vdev_ashift != 0) {
+ vd->vdev_deflate_ratio = (1 << 17) /
+ (vdev_psize_to_asize(vd, 1 << 17) >> SPA_MINBLOCKSHIFT);
+ }
+}
+
/*
* Prepare a virtual device for access.
*/
int
vdev_open(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
int error;
uint64_t osize = 0;
uint64_t max_osize = 0;
uint64_t asize, max_asize, psize;
uint64_t ashift = 0;
ASSERT(vd->vdev_open_thread == curthread ||
spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT(vd->vdev_state == VDEV_STATE_CLOSED ||
vd->vdev_state == VDEV_STATE_CANT_OPEN ||
vd->vdev_state == VDEV_STATE_OFFLINE);
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_min_asize = vdev_get_min_asize(vd);
/*
* If this vdev is not removed, check its fault status. If it's
* faulted, bail out of the open.
*/
if (!vd->vdev_removed && vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
} else if (vd->vdev_offline) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_OFFLINE, VDEV_AUX_NONE);
return (SET_ERROR(ENXIO));
}
error = vd->vdev_ops->vdev_op_open(vd, &osize, &max_osize, &ashift);
/*
* Reset the vdev_reopening flag so that we actually close
* the vdev on error.
*/
vd->vdev_reopening = B_FALSE;
if (zio_injection_enabled && error == 0)
error = zio_handle_device_injection(vd, NULL, ENXIO);
if (error) {
if (vd->vdev_removed &&
vd->vdev_stat.vs_aux != VDEV_AUX_OPEN_FAILED)
vd->vdev_removed = B_FALSE;
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
vd->vdev_stat.vs_aux);
return (error);
}
vd->vdev_removed = B_FALSE;
/*
* Recheck the faulted flag now that we have confirmed that
* the vdev is accessible. If we're faulted, bail.
*/
if (vd->vdev_faulted) {
ASSERT(vd->vdev_children == 0);
ASSERT(vd->vdev_label_aux == VDEV_AUX_ERR_EXCEEDED ||
vd->vdev_label_aux == VDEV_AUX_EXTERNAL);
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
vd->vdev_label_aux);
return (SET_ERROR(ENXIO));
}
if (vd->vdev_degraded) {
ASSERT(vd->vdev_children == 0);
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_ERR_EXCEEDED);
} else {
vdev_set_state(vd, B_TRUE, VDEV_STATE_HEALTHY, 0);
}
/*
* For hole or missing vdevs we just return success.
*/
if (vd->vdev_ishole || vd->vdev_ops == &vdev_missing_ops)
return (0);
for (int c = 0; c < vd->vdev_children; c++) {
if (vd->vdev_child[c]->vdev_state != VDEV_STATE_HEALTHY) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_DEGRADED,
VDEV_AUX_NONE);
break;
}
}
osize = P2ALIGN(osize, (uint64_t)sizeof (vdev_label_t));
max_osize = P2ALIGN(max_osize, (uint64_t)sizeof (vdev_label_t));
if (vd->vdev_children == 0) {
if (osize < SPA_MINDEVSIZE) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = osize;
asize = osize - (VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE);
max_asize = max_osize - (VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE);
} else {
if (vd->vdev_parent != NULL && osize < SPA_MINDEVSIZE -
(VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_TOO_SMALL);
return (SET_ERROR(EOVERFLOW));
}
psize = 0;
asize = osize;
max_asize = max_osize;
}
/*
* If the vdev was expanded, record this so that we can re-create the
* uberblock rings in labels {2,3}, during the next sync.
*/
if ((psize > vd->vdev_psize) && (vd->vdev_psize != 0))
vd->vdev_copy_uberblocks = B_TRUE;
vd->vdev_psize = psize;
/*
* Make sure the allocatable size hasn't shrunk too much.
*/
if (asize < vd->vdev_min_asize) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (SET_ERROR(EINVAL));
}
if (vd->vdev_asize == 0) {
/*
* This is the first-ever open, so use the computed values.
* For compatibility, a different ashift can be requested.
*/
vd->vdev_asize = asize;
vd->vdev_max_asize = max_asize;
if (vd->vdev_ashift == 0) {
vd->vdev_ashift = ashift; /* use detected value */
}
if (vd->vdev_ashift != 0 && (vd->vdev_ashift < ASHIFT_MIN ||
vd->vdev_ashift > ASHIFT_MAX)) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_ASHIFT);
return (SET_ERROR(EDOM));
}
} else {
/*
* Detect if the alignment requirement has increased.
* We don't want to make the pool unavailable, just
* post an event instead.
*/
if (ashift > vd->vdev_top->vdev_ashift &&
vd->vdev_ops->vdev_op_leaf) {
zfs_ereport_post(FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT,
spa, vd, NULL, NULL, 0, 0);
}
vd->vdev_max_asize = max_asize;
}
/*
* If all children are healthy we update asize if either:
* The asize has increased, due to a device expansion caused by dynamic
* LUN growth or vdev replacement, and automatic expansion is enabled;
* making the additional space available.
*
* The asize has decreased, due to a device shrink usually caused by a
* vdev replace with a smaller device. This ensures that calculations
* based of max_asize and asize e.g. esize are always valid. It's safe
* to do this as we've already validated that asize is greater than
* vdev_min_asize.
*/
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
((asize > vd->vdev_asize &&
(vd->vdev_expanding || spa->spa_autoexpand)) ||
(asize < vd->vdev_asize)))
vd->vdev_asize = asize;
vdev_set_min_asize(vd);
/*
* Ensure we can issue some IO before declaring the
* vdev open for business.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(error = zio_wait(vdev_probe(vd, NULL))) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_FAULTED,
VDEV_AUX_ERR_EXCEEDED);
return (error);
}
+ if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
+ !vd->vdev_isl2cache && !vd->vdev_islog) {
+ if (vd->vdev_ashift > spa->spa_max_ashift)
+ spa->spa_max_ashift = vd->vdev_ashift;
+ if (vd->vdev_ashift < spa->spa_min_ashift)
+ spa->spa_min_ashift = vd->vdev_ashift;
+ }
+
/*
* Track the min and max ashift values for normal data devices.
*/
if (vd->vdev_top == vd && vd->vdev_ashift != 0 &&
!vd->vdev_islog && vd->vdev_aux == NULL) {
if (vd->vdev_ashift > spa->spa_max_ashift)
spa->spa_max_ashift = vd->vdev_ashift;
if (vd->vdev_ashift < spa->spa_min_ashift)
spa->spa_min_ashift = vd->vdev_ashift;
}
/*
* If a leaf vdev has a DTL, and seems healthy, then kick off a
* resilver. But don't do this if we are doing a reopen for a scrub,
* since this would just restart the scrub we are already doing.
*/
if (vd->vdev_ops->vdev_op_leaf && !spa->spa_scrub_reopen &&
vdev_resilver_needed(vd, NULL, NULL))
spa_async_request(spa, SPA_ASYNC_RESILVER);
return (0);
}
/*
* Called once the vdevs are all opened, this routine validates the label
* contents. This needs to be done before vdev_load() so that we don't
* inadvertently do repair I/Os to the wrong device.
*
* If 'strict' is false ignore the spa guid check. This is necessary because
* if the machine crashed during a re-guid the new guid might have been written
* to all of the vdev labels, but not the cached config. The strict check
* will be performed when the pool is opened again using the mos config.
*
* This function will only return failure if one of the vdevs indicates that it
* has since been destroyed or exported. This is only possible if
* /etc/zfs/zpool.cache was readonly at the time. Otherwise, the vdev state
* will be updated but the function will return 0.
*/
int
vdev_validate(vdev_t *vd, boolean_t strict)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
uint64_t guid = 0, top_guid;
uint64_t state;
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_validate(vd->vdev_child[c], strict) != 0)
return (SET_ERROR(EBADF));
/*
* If the device has already failed, or was marked offline, don't do
* any further validation. Otherwise, label I/O will fail and we will
* overwrite the previous state.
*/
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
uint64_t aux_guid = 0;
nvlist_t *nvl;
uint64_t txg = spa_last_synced_txg(spa) != 0 ?
spa_last_synced_txg(spa) : -1ULL;
if ((label = vdev_label_read_config(vd, txg)) == NULL) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_BAD_LABEL);
return (0);
}
/*
* Determine if this vdev has been split off into another
* pool. If so, then refuse to open it.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_SPLIT_GUID,
&aux_guid) == 0 && aux_guid == spa_guid(spa)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_SPLIT_POOL);
nvlist_free(label);
return (0);
}
if (strict && (nvlist_lookup_uint64(label,
ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
guid != spa_guid(spa))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (0);
}
if (nvlist_lookup_nvlist(label, ZPOOL_CONFIG_VDEV_TREE, &nvl)
!= 0 || nvlist_lookup_uint64(nvl, ZPOOL_CONFIG_ORIG_GUID,
&aux_guid) != 0)
aux_guid = 0;
/*
* If this vdev just became a top-level vdev because its
* sibling was detached, it will have adopted the parent's
* vdev guid -- but the label may or may not be on disk yet.
* Fortunately, either version of the label will have the
* same top guid, so if we're a top-level vdev, we can
* safely compare to that instead.
*
* If we split this vdev off instead, then we also check the
* original pool's guid. We don't want to consider the vdev
* corrupt if it is partway through a split operation.
*/
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
&guid) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_TOP_GUID,
&top_guid) != 0 ||
((vd->vdev_guid != guid && vd->vdev_guid != aux_guid) &&
(vd->vdev_guid != top_guid || vd != vd->vdev_top))) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (0);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (0);
}
nvlist_free(label);
/*
* If this is a verbatim import, no need to check the
* state of the pool.
*/
if (!(spa->spa_import_flags & ZFS_IMPORT_VERBATIM) &&
spa_load_state(spa) == SPA_LOAD_OPEN &&
state != POOL_STATE_ACTIVE)
return (SET_ERROR(EBADF));
/*
* If we were able to open and validate a vdev that was
* previously marked permanently unavailable, clear that state
* now.
*/
if (vd->vdev_not_present)
vd->vdev_not_present = 0;
}
return (0);
}
/*
* Close a virtual device.
*/
void
vdev_close(vdev_t *vd)
{
vdev_t *pvd = vd->vdev_parent;
ASSERTV(spa_t *spa = vd->vdev_spa);
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/*
* If our parent is reopening, then we are as well, unless we are
* going offline.
*/
if (pvd != NULL && pvd->vdev_reopening)
vd->vdev_reopening = (pvd->vdev_reopening && !vd->vdev_offline);
vd->vdev_ops->vdev_op_close(vd);
vdev_cache_purge(vd);
/*
* We record the previous state before we close it, so that if we are
* doing a reopen(), we don't generate FMA ereports if we notice that
* it's still faulted.
*/
vd->vdev_prevstate = vd->vdev_state;
if (vd->vdev_offline)
vd->vdev_state = VDEV_STATE_OFFLINE;
else
vd->vdev_state = VDEV_STATE_CLOSED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
}
void
vdev_hold(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_is_root(spa));
if (spa->spa_state == POOL_STATE_UNINITIALIZED)
return;
for (int c = 0; c < vd->vdev_children; c++)
vdev_hold(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_hold(vd);
}
void
vdev_rele(vdev_t *vd)
{
ASSERT(spa_is_root(vd->vdev_spa));
for (int c = 0; c < vd->vdev_children; c++)
vdev_rele(vd->vdev_child[c]);
if (vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_rele(vd);
}
/*
* Reopen all interior vdevs and any unopened leaves. We don't actually
* reopen leaf vdevs which had previously been opened as they might deadlock
* on the spa_config_lock. Instead we only obtain the leaf's physical size.
* If the leaf has never been opened then open it, as usual.
*/
void
vdev_reopen(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
/* set the reopening flag unless we're taking the vdev offline */
vd->vdev_reopening = !vd->vdev_offline;
vdev_close(vd);
(void) vdev_open(vd);
/*
* Call vdev_validate() here to make sure we have the same device.
* Otherwise, a device with an invalid label could be successfully
* opened in response to vdev_reopen().
*/
if (vd->vdev_aux) {
(void) vdev_validate_aux(vd);
if (vdev_readable(vd) && vdev_writeable(vd) &&
vd->vdev_aux == &spa->spa_l2cache &&
!l2arc_vdev_present(vd))
l2arc_add_vdev(spa, vd);
} else {
(void) vdev_validate(vd, B_TRUE);
}
/*
* Reassess parent vdev's health.
*/
vdev_propagate_state(vd);
}
int
vdev_create(vdev_t *vd, uint64_t txg, boolean_t isreplacing)
{
int error;
/*
* Normally, partial opens (e.g. of a mirror) are allowed.
* For a create, however, we want to fail the request if
* there are any components we can't open.
*/
error = vdev_open(vd);
if (error || vd->vdev_state != VDEV_STATE_HEALTHY) {
vdev_close(vd);
return (error ? error : ENXIO);
}
/*
* Recursively load DTLs and initialize all labels.
*/
if ((error = vdev_dtl_load(vd)) != 0 ||
(error = vdev_label_init(vd, txg, isreplacing ?
VDEV_LABEL_REPLACE : VDEV_LABEL_CREATE)) != 0) {
vdev_close(vd);
return (error);
}
return (0);
}
void
vdev_metaslab_set_size(vdev_t *vd)
{
/*
* Aim for roughly metaslabs_per_vdev (default 200) metaslabs per vdev.
*/
vd->vdev_ms_shift = highbit64(vd->vdev_asize / metaslabs_per_vdev);
vd->vdev_ms_shift = MAX(vd->vdev_ms_shift, SPA_MAXBLOCKSHIFT);
}
void
vdev_dirty(vdev_t *vd, int flags, void *arg, uint64_t txg)
{
ASSERT(vd == vd->vdev_top);
- ASSERT(!vd->vdev_ishole);
+ /* indirect vdevs don't have metaslabs or dtls */
+ ASSERT(vdev_is_concrete(vd) || flags == 0);
ASSERT(ISP2(flags));
ASSERT(spa_writeable(vd->vdev_spa));
if (flags & VDD_METASLAB)
(void) txg_list_add(&vd->vdev_ms_list, arg, txg);
if (flags & VDD_DTL)
(void) txg_list_add(&vd->vdev_dtl_list, arg, txg);
(void) txg_list_add(&vd->vdev_spa->spa_vdev_txg_list, vd, txg);
}
void
vdev_dirty_leaves(vdev_t *vd, int flags, uint64_t txg)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_dirty_leaves(vd->vdev_child[c], flags, txg);
if (vd->vdev_ops->vdev_op_leaf)
vdev_dirty(vd->vdev_top, flags, vd, txg);
}
/*
* DTLs.
*
* A vdev's DTL (dirty time log) is the set of transaction groups for which
* the vdev has less than perfect replication. There are four kinds of DTL:
*
* DTL_MISSING: txgs for which the vdev has no valid copies of the data
*
* DTL_PARTIAL: txgs for which data is available, but not fully replicated
*
* DTL_SCRUB: the txgs that could not be repaired by the last scrub; upon
* scrub completion, DTL_SCRUB replaces DTL_MISSING in the range of
* txgs that was scrubbed.
*
* DTL_OUTAGE: txgs which cannot currently be read, whether due to
* persistent errors or just some device being offline.
* Unlike the other three, the DTL_OUTAGE map is not generally
* maintained; it's only computed when needed, typically to
* determine whether a device can be detached.
*
* For leaf vdevs, DTL_MISSING and DTL_PARTIAL are identical: the device
* either has the data or it doesn't.
*
* For interior vdevs such as mirror and RAID-Z the picture is more complex.
* A vdev's DTL_PARTIAL is the union of its children's DTL_PARTIALs, because
* if any child is less than fully replicated, then so is its parent.
* A vdev's DTL_MISSING is a modified union of its children's DTL_MISSINGs,
* comprising only those txgs which appear in 'maxfaults' or more children;
* those are the txgs we don't have enough replication to read. For example,
* double-parity RAID-Z can tolerate up to two missing devices (maxfaults == 2);
* thus, its DTL_MISSING consists of the set of txgs that appear in more than
* two child DTL_MISSING maps.
*
* It should be clear from the above that to compute the DTLs and outage maps
* for all vdevs, it suffices to know just the leaf vdevs' DTL_MISSING maps.
* Therefore, that is all we keep on disk. When loading the pool, or after
* a configuration change, we generate all other DTLs from first principles.
*/
void
vdev_dtl_dirty(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
ASSERT(spa_writeable(vd->vdev_spa));
- mutex_enter(rt->rt_lock);
+ mutex_enter(&vd->vdev_dtl_lock);
if (!range_tree_contains(rt, txg, size))
range_tree_add(rt, txg, size);
- mutex_exit(rt->rt_lock);
+ mutex_exit(&vd->vdev_dtl_lock);
}
boolean_t
vdev_dtl_contains(vdev_t *vd, vdev_dtl_type_t t, uint64_t txg, uint64_t size)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t dirty = B_FALSE;
ASSERT(t < DTL_TYPES);
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
- mutex_enter(rt->rt_lock);
+ /*
+ * While we are loading the pool, the DTLs have not been loaded yet.
+ * Ignore the DTLs and try all devices. This avoids a recursive
+ * mutex enter on the vdev_dtl_lock, and also makes us try hard
+ * when loading the pool (relying on the checksum to ensure that
+ * we get the right data -- note that we while loading, we are
+ * only reading the MOS, which is always checksummed).
+ */
+ if (vd->vdev_spa->spa_load_state != SPA_LOAD_NONE)
+ return (B_FALSE);
+
+ mutex_enter(&vd->vdev_dtl_lock);
if (range_tree_space(rt) != 0)
dirty = range_tree_contains(rt, txg, size);
- mutex_exit(rt->rt_lock);
+ mutex_exit(&vd->vdev_dtl_lock);
return (dirty);
}
boolean_t
vdev_dtl_empty(vdev_t *vd, vdev_dtl_type_t t)
{
range_tree_t *rt = vd->vdev_dtl[t];
boolean_t empty;
- mutex_enter(rt->rt_lock);
+ mutex_enter(&vd->vdev_dtl_lock);
empty = (range_tree_space(rt) == 0);
- mutex_exit(rt->rt_lock);
+ mutex_exit(&vd->vdev_dtl_lock);
return (empty);
}
/*
* Returns B_TRUE if vdev determines offset needs to be resilvered.
*/
boolean_t
vdev_dtl_need_resilver(vdev_t *vd, uint64_t offset, size_t psize)
{
ASSERT(vd != vd->vdev_spa->spa_root_vdev);
if (vd->vdev_ops->vdev_op_need_resilver == NULL ||
vd->vdev_ops->vdev_op_leaf)
return (B_TRUE);
return (vd->vdev_ops->vdev_op_need_resilver(vd, offset, psize));
}
/*
* Returns the lowest txg in the DTL range.
*/
static uint64_t
vdev_dtl_min(vdev_t *vd)
{
range_seg_t *rs;
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
rs = avl_first(&vd->vdev_dtl[DTL_MISSING]->rt_root);
return (rs->rs_start - 1);
}
/*
* Returns the highest txg in the DTL.
*/
static uint64_t
vdev_dtl_max(vdev_t *vd)
{
range_seg_t *rs;
ASSERT(MUTEX_HELD(&vd->vdev_dtl_lock));
ASSERT3U(range_tree_space(vd->vdev_dtl[DTL_MISSING]), !=, 0);
ASSERT0(vd->vdev_children);
rs = avl_last(&vd->vdev_dtl[DTL_MISSING]->rt_root);
return (rs->rs_end);
}
/*
* Determine if a resilvering vdev should remove any DTL entries from
* its range. If the vdev was resilvering for the entire duration of the
* scan then it should excise that range from its DTLs. Otherwise, this
* vdev is considered partially resilvered and should leave its DTL
* entries intact. The comment in vdev_dtl_reassess() describes how we
* excise the DTLs.
*/
static boolean_t
vdev_dtl_should_excise(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
ASSERT0(scn->scn_phys.scn_errors);
ASSERT0(vd->vdev_children);
if (vd->vdev_state < VDEV_STATE_DEGRADED)
return (B_FALSE);
if (vd->vdev_resilver_txg == 0 ||
range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0)
return (B_TRUE);
/*
* When a resilver is initiated the scan will assign the scn_max_txg
* value to the highest txg value that exists in all DTLs. If this
* device's max DTL is not part of this scan (i.e. it is not in
* the range (scn_min_txg, scn_max_txg] then it is not eligible
* for excision.
*/
if (vdev_dtl_max(vd) <= scn->scn_phys.scn_max_txg) {
ASSERT3U(scn->scn_phys.scn_min_txg, <=, vdev_dtl_min(vd));
ASSERT3U(scn->scn_phys.scn_min_txg, <, vd->vdev_resilver_txg);
ASSERT3U(vd->vdev_resilver_txg, <=, scn->scn_phys.scn_max_txg);
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Reassess DTLs after a config change or scrub completion.
*/
void
vdev_dtl_reassess(vdev_t *vd, uint64_t txg, uint64_t scrub_txg, int scrub_done)
{
spa_t *spa = vd->vdev_spa;
avl_tree_t reftree;
int minref;
ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
for (int c = 0; c < vd->vdev_children; c++)
vdev_dtl_reassess(vd->vdev_child[c], txg,
scrub_txg, scrub_done);
- if (vd == spa->spa_root_vdev || vd->vdev_ishole || vd->vdev_aux)
+ if (vd == spa->spa_root_vdev || !vdev_is_concrete(vd) || vd->vdev_aux)
return;
if (vd->vdev_ops->vdev_op_leaf) {
dsl_scan_t *scn = spa->spa_dsl_pool->dp_scan;
mutex_enter(&vd->vdev_dtl_lock);
/*
* If requested, pretend the scan completed cleanly.
*/
if (zfs_scan_ignore_errors && scn)
scn->scn_phys.scn_errors = 0;
/*
* If we've completed a scan cleanly then determine
* if this vdev should remove any DTLs. We only want to
* excise regions on vdevs that were available during
* the entire duration of this scan.
*/
if (scrub_txg != 0 &&
(spa->spa_scrub_started ||
(scn != NULL && scn->scn_phys.scn_errors == 0)) &&
vdev_dtl_should_excise(vd)) {
/*
* We completed a scrub up to scrub_txg. If we
* did it without rebooting, then the scrub dtl
* will be valid, so excise the old region and
* fold in the scrub dtl. Otherwise, leave the
* dtl as-is if there was an error.
*
* There's little trick here: to excise the beginning
* of the DTL_MISSING map, we put it into a reference
* tree and then add a segment with refcnt -1 that
* covers the range [0, scrub_txg). This means
* that each txg in that range has refcnt -1 or 0.
* We then add DTL_SCRUB with a refcnt of 2, so that
* entries in the range [0, scrub_txg) will have a
* positive refcnt -- either 1 or 2. We then convert
* the reference tree into the new DTL_MISSING map.
*/
space_reftree_create(&reftree);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_add_seg(&reftree, 0, scrub_txg, -1);
space_reftree_add_map(&reftree,
vd->vdev_dtl[DTL_SCRUB], 2);
space_reftree_generate_map(&reftree,
vd->vdev_dtl[DTL_MISSING], 1);
space_reftree_destroy(&reftree);
}
range_tree_vacate(vd->vdev_dtl[DTL_PARTIAL], NULL, NULL);
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_PARTIAL]);
if (scrub_done)
range_tree_vacate(vd->vdev_dtl[DTL_SCRUB], NULL, NULL);
range_tree_vacate(vd->vdev_dtl[DTL_OUTAGE], NULL, NULL);
if (!vdev_readable(vd))
range_tree_add(vd->vdev_dtl[DTL_OUTAGE], 0, -1ULL);
else
range_tree_walk(vd->vdev_dtl[DTL_MISSING],
range_tree_add, vd->vdev_dtl[DTL_OUTAGE]);
/*
* If the vdev was resilvering and no longer has any
* DTLs then reset its resilvering flag and dirty
* the top level so that we persist the change.
*/
if (vd->vdev_resilver_txg != 0 &&
range_tree_space(vd->vdev_dtl[DTL_MISSING]) == 0 &&
range_tree_space(vd->vdev_dtl[DTL_OUTAGE]) == 0) {
vd->vdev_resilver_txg = 0;
vdev_config_dirty(vd->vdev_top);
}
mutex_exit(&vd->vdev_dtl_lock);
if (txg != 0)
vdev_dirty(vd->vdev_top, VDD_DTL, vd, txg);
return;
}
mutex_enter(&vd->vdev_dtl_lock);
for (int t = 0; t < DTL_TYPES; t++) {
/* account for child's outage in parent's missing map */
int s = (t == DTL_MISSING) ? DTL_OUTAGE: t;
if (t == DTL_SCRUB)
continue; /* leaf vdevs only */
if (t == DTL_PARTIAL)
minref = 1; /* i.e. non-zero */
else if (vd->vdev_nparity != 0)
minref = vd->vdev_nparity + 1; /* RAID-Z */
else
minref = vd->vdev_children; /* any kind of mirror */
space_reftree_create(&reftree);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
mutex_enter(&cvd->vdev_dtl_lock);
space_reftree_add_map(&reftree, cvd->vdev_dtl[s], 1);
mutex_exit(&cvd->vdev_dtl_lock);
}
space_reftree_generate_map(&reftree, vd->vdev_dtl[t], minref);
space_reftree_destroy(&reftree);
}
mutex_exit(&vd->vdev_dtl_lock);
}
int
vdev_dtl_load(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
objset_t *mos = spa->spa_meta_objset;
int error = 0;
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_dtl_object != 0) {
- ASSERT(!vd->vdev_ishole);
+ ASSERT(vdev_is_concrete(vd));
error = space_map_open(&vd->vdev_dtl_sm, mos,
- vd->vdev_dtl_object, 0, -1ULL, 0, &vd->vdev_dtl_lock);
+ vd->vdev_dtl_object, 0, -1ULL, 0);
if (error)
return (error);
ASSERT(vd->vdev_dtl_sm != NULL);
mutex_enter(&vd->vdev_dtl_lock);
/*
* Now that we've opened the space_map we need to update
* the in-core DTL.
*/
space_map_update(vd->vdev_dtl_sm);
error = space_map_load(vd->vdev_dtl_sm,
vd->vdev_dtl[DTL_MISSING], SM_ALLOC);
mutex_exit(&vd->vdev_dtl_lock);
return (error);
}
for (int c = 0; c < vd->vdev_children; c++) {
error = vdev_dtl_load(vd->vdev_child[c]);
if (error != 0)
break;
}
return (error);
}
void
vdev_destroy_unlink_zap(vdev_t *vd, uint64_t zapobj, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
VERIFY0(zap_destroy(spa->spa_meta_objset, zapobj, tx));
VERIFY0(zap_remove_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zapobj, tx));
}
uint64_t
vdev_create_link_zap(vdev_t *vd, dmu_tx_t *tx)
{
spa_t *spa = vd->vdev_spa;
uint64_t zap = zap_create(spa->spa_meta_objset, DMU_OTN_ZAP_METADATA,
DMU_OT_NONE, 0, tx);
ASSERT(zap != 0);
VERIFY0(zap_add_int(spa->spa_meta_objset, spa->spa_all_vdev_zaps,
zap, tx));
return (zap);
}
void
vdev_construct_zaps(vdev_t *vd, dmu_tx_t *tx)
{
if (vd->vdev_ops != &vdev_hole_ops &&
vd->vdev_ops != &vdev_missing_ops &&
vd->vdev_ops != &vdev_root_ops &&
!vd->vdev_top->vdev_removing) {
if (vd->vdev_ops->vdev_op_leaf && vd->vdev_leaf_zap == 0) {
vd->vdev_leaf_zap = vdev_create_link_zap(vd, tx);
}
if (vd == vd->vdev_top && vd->vdev_top_zap == 0) {
vd->vdev_top_zap = vdev_create_link_zap(vd, tx);
}
}
for (uint64_t i = 0; i < vd->vdev_children; i++) {
vdev_construct_zaps(vd->vdev_child[i], tx);
}
}
void
vdev_dtl_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
range_tree_t *rt = vd->vdev_dtl[DTL_MISSING];
objset_t *mos = spa->spa_meta_objset;
range_tree_t *rtsync;
- kmutex_t rtlock;
dmu_tx_t *tx;
uint64_t object = space_map_object(vd->vdev_dtl_sm);
- ASSERT(!vd->vdev_ishole);
+ ASSERT(vdev_is_concrete(vd));
ASSERT(vd->vdev_ops->vdev_op_leaf);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
if (vd->vdev_detached || vd->vdev_top->vdev_removing) {
mutex_enter(&vd->vdev_dtl_lock);
space_map_free(vd->vdev_dtl_sm, tx);
space_map_close(vd->vdev_dtl_sm);
vd->vdev_dtl_sm = NULL;
mutex_exit(&vd->vdev_dtl_lock);
/*
* We only destroy the leaf ZAP for detached leaves or for
* removed log devices. Removed data devices handle leaf ZAP
* cleanup later, once cancellation is no longer possible.
*/
if (vd->vdev_leaf_zap != 0 && (vd->vdev_detached ||
vd->vdev_top->vdev_islog)) {
vdev_destroy_unlink_zap(vd, vd->vdev_leaf_zap, tx);
vd->vdev_leaf_zap = 0;
}
dmu_tx_commit(tx);
return;
}
if (vd->vdev_dtl_sm == NULL) {
uint64_t new_object;
new_object = space_map_alloc(mos, tx);
VERIFY3U(new_object, !=, 0);
VERIFY0(space_map_open(&vd->vdev_dtl_sm, mos, new_object,
- 0, -1ULL, 0, &vd->vdev_dtl_lock));
+ 0, -1ULL, 0));
ASSERT(vd->vdev_dtl_sm != NULL);
}
- mutex_init(&rtlock, NULL, MUTEX_DEFAULT, NULL);
-
- rtsync = range_tree_create(NULL, NULL, &rtlock);
-
- mutex_enter(&rtlock);
+ rtsync = range_tree_create(NULL, NULL);
mutex_enter(&vd->vdev_dtl_lock);
range_tree_walk(rt, range_tree_add, rtsync);
mutex_exit(&vd->vdev_dtl_lock);
space_map_truncate(vd->vdev_dtl_sm, tx);
space_map_write(vd->vdev_dtl_sm, rtsync, SM_ALLOC, tx);
range_tree_vacate(rtsync, NULL, NULL);
range_tree_destroy(rtsync);
- mutex_exit(&rtlock);
- mutex_destroy(&rtlock);
-
/*
* If the object for the space map has changed then dirty
* the top level so that we update the config.
*/
if (object != space_map_object(vd->vdev_dtl_sm)) {
zfs_dbgmsg("txg %llu, spa %s, DTL old object %llu, "
"new object %llu", txg, spa_name(spa), object,
space_map_object(vd->vdev_dtl_sm));
vdev_config_dirty(vd->vdev_top);
}
dmu_tx_commit(tx);
mutex_enter(&vd->vdev_dtl_lock);
space_map_update(vd->vdev_dtl_sm);
mutex_exit(&vd->vdev_dtl_lock);
}
/*
* Determine whether the specified vdev can be offlined/detached/removed
* without losing data.
*/
boolean_t
vdev_dtl_required(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *tvd = vd->vdev_top;
uint8_t cant_read = vd->vdev_cant_read;
boolean_t required;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == spa->spa_root_vdev || vd == tvd)
return (B_TRUE);
/*
* Temporarily mark the device as unreadable, and then determine
* whether this results in any DTL outages in the top-level vdev.
* If not, we can safely offline/detach/remove the device.
*/
vd->vdev_cant_read = B_TRUE;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
required = !vdev_dtl_empty(tvd, DTL_OUTAGE);
vd->vdev_cant_read = cant_read;
vdev_dtl_reassess(tvd, 0, 0, B_FALSE);
if (!required && zio_injection_enabled)
required = !!zio_handle_device_injection(vd, NULL, ECHILD);
return (required);
}
/*
* Determine if resilver is needed, and if so the txg range.
*/
boolean_t
vdev_resilver_needed(vdev_t *vd, uint64_t *minp, uint64_t *maxp)
{
boolean_t needed = B_FALSE;
uint64_t thismin = UINT64_MAX;
uint64_t thismax = 0;
if (vd->vdev_children == 0) {
mutex_enter(&vd->vdev_dtl_lock);
if (range_tree_space(vd->vdev_dtl[DTL_MISSING]) != 0 &&
vdev_writeable(vd)) {
thismin = vdev_dtl_min(vd);
thismax = vdev_dtl_max(vd);
needed = B_TRUE;
}
mutex_exit(&vd->vdev_dtl_lock);
} else {
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
uint64_t cmin, cmax;
if (vdev_resilver_needed(cvd, &cmin, &cmax)) {
thismin = MIN(thismin, cmin);
thismax = MAX(thismax, cmax);
needed = B_TRUE;
}
}
}
if (needed && minp) {
*minp = thismin;
*maxp = thismax;
}
return (needed);
}
-void
+int
vdev_load(vdev_t *vd)
{
+ int error = 0;
+
/*
* Recursively load all children.
*/
- for (int c = 0; c < vd->vdev_children; c++)
- vdev_load(vd->vdev_child[c]);
+ for (int c = 0; c < vd->vdev_children; c++) {
+ error = vdev_load(vd->vdev_child[c]);
+ if (error != 0) {
+ return (error);
+ }
+ }
+
+ vdev_set_deflate_ratio(vd);
/*
* If this is a top-level vdev, initialize its metaslabs.
*/
- if (vd == vd->vdev_top && !vd->vdev_ishole &&
- (vd->vdev_ashift == 0 || vd->vdev_asize == 0 ||
- vdev_metaslab_init(vd, 0) != 0))
- vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
- VDEV_AUX_CORRUPT_DATA);
+ if (vd == vd->vdev_top && vdev_is_concrete(vd)) {
+ if (vd->vdev_ashift == 0 || vd->vdev_asize == 0) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ return (SET_ERROR(ENXIO));
+ } else if ((error = vdev_metaslab_init(vd, 0)) != 0) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ return (error);
+ }
+ }
+
/*
* If this is a leaf vdev, load its DTL.
*/
- if (vd->vdev_ops->vdev_op_leaf && vdev_dtl_load(vd) != 0)
+ if (vd->vdev_ops->vdev_op_leaf && (error = vdev_dtl_load(vd)) != 0) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
+ return (error);
+ }
+
+ uint64_t obsolete_sm_object = vdev_obsolete_sm_object(vd);
+ if (obsolete_sm_object != 0) {
+ objset_t *mos = vd->vdev_spa->spa_meta_objset;
+ ASSERT(vd->vdev_asize != 0);
+ ASSERT(vd->vdev_obsolete_sm == NULL);
+
+ if ((error = space_map_open(&vd->vdev_obsolete_sm, mos,
+ obsolete_sm_object, 0, vd->vdev_asize, 0))) {
+ vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
+ VDEV_AUX_CORRUPT_DATA);
+ return (error);
+ }
+ space_map_update(vd->vdev_obsolete_sm);
+ }
+
+ return (0);
}
/*
* The special vdev case is used for hot spares and l2cache devices. Its
* sole purpose it to set the vdev state for the associated vdev. To do this,
* we make sure that we can open the underlying device, then try to read the
* label, and make sure that the label is sane and that it hasn't been
* repurposed to another pool.
*/
int
vdev_validate_aux(vdev_t *vd)
{
nvlist_t *label;
uint64_t guid, version;
uint64_t state;
if (!vdev_readable(vd))
return (0);
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
return (-1);
}
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_VERSION, &version) != 0 ||
!SPA_VERSION_IS_SUPPORTED(version) ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID, &guid) != 0 ||
guid != vd->vdev_guid ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE, &state) != 0) {
vdev_set_state(vd, B_TRUE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
nvlist_free(label);
return (-1);
}
/*
* We don't actually check the pool state here. If it's in fact in
* use by another pool, we update this fact on the fly when requested.
*/
nvlist_free(label);
return (0);
}
+/*
+ * Free the objects used to store this vdev's spacemaps, and the array
+ * that points to them.
+ */
void
-vdev_remove(vdev_t *vd, uint64_t txg)
+vdev_destroy_spacemaps(vdev_t *vd, dmu_tx_t *tx)
+{
+ if (vd->vdev_ms_array == 0)
+ return;
+
+ objset_t *mos = vd->vdev_spa->spa_meta_objset;
+ uint64_t array_count = vd->vdev_asize >> vd->vdev_ms_shift;
+ size_t array_bytes = array_count * sizeof (uint64_t);
+ uint64_t *smobj_array = kmem_alloc(array_bytes, KM_SLEEP);
+ VERIFY0(dmu_read(mos, vd->vdev_ms_array, 0,
+ array_bytes, smobj_array, 0));
+
+ for (uint64_t i = 0; i < array_count; i++) {
+ uint64_t smobj = smobj_array[i];
+ if (smobj == 0)
+ continue;
+
+ space_map_free_obj(mos, smobj, tx);
+ }
+
+ kmem_free(smobj_array, array_bytes);
+ VERIFY0(dmu_object_free(mos, vd->vdev_ms_array, tx));
+ vd->vdev_ms_array = 0;
+}
+
+static void
+vdev_remove_empty(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
- objset_t *mos = spa->spa_meta_objset;
dmu_tx_t *tx;
- tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
ASSERT(vd == vd->vdev_top);
ASSERT3U(txg, ==, spa_syncing_txg(spa));
if (vd->vdev_ms != NULL) {
metaslab_group_t *mg = vd->vdev_mg;
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
for (int m = 0; m < vd->vdev_ms_count; m++) {
metaslab_t *msp = vd->vdev_ms[m];
if (msp == NULL || msp->ms_sm == NULL)
continue;
mutex_enter(&msp->ms_lock);
/*
* If the metaslab was not loaded when the vdev
* was removed then the histogram accounting may
* not be accurate. Update the histogram information
* here so that we ensure that the metaslab group
* and metaslab class are up-to-date.
*/
metaslab_group_histogram_remove(mg, msp);
VERIFY0(space_map_allocated(msp->ms_sm));
- space_map_free(msp->ms_sm, tx);
space_map_close(msp->ms_sm);
msp->ms_sm = NULL;
mutex_exit(&msp->ms_lock);
}
metaslab_group_histogram_verify(mg);
metaslab_class_histogram_verify(mg->mg_class);
for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++)
ASSERT0(mg->mg_histogram[i]);
-
}
- if (vd->vdev_ms_array) {
- (void) dmu_object_free(mos, vd->vdev_ms_array, tx);
- vd->vdev_ms_array = 0;
- }
+ tx = dmu_tx_create_assigned(spa_get_dsl(spa), txg);
+ vdev_destroy_spacemaps(vd, tx);
if (vd->vdev_islog && vd->vdev_top_zap != 0) {
vdev_destroy_unlink_zap(vd, vd->vdev_top_zap, tx);
vd->vdev_top_zap = 0;
}
dmu_tx_commit(tx);
}
void
vdev_sync_done(vdev_t *vd, uint64_t txg)
{
metaslab_t *msp;
boolean_t reassess = !txg_list_empty(&vd->vdev_ms_list, TXG_CLEAN(txg));
- ASSERT(!vd->vdev_ishole);
+ ASSERT(vdev_is_concrete(vd));
while ((msp = txg_list_remove(&vd->vdev_ms_list, TXG_CLEAN(txg))))
metaslab_sync_done(msp, txg);
if (reassess)
metaslab_sync_reassess(vd->vdev_mg);
}
void
vdev_sync(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
vdev_t *lvd;
metaslab_t *msp;
dmu_tx_t *tx;
- ASSERT(!vd->vdev_ishole);
+ if (range_tree_space(vd->vdev_obsolete_segments) > 0) {
+ dmu_tx_t *tx;
- if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0) {
+ ASSERT(vd->vdev_removing ||
+ vd->vdev_ops == &vdev_indirect_ops);
+
+ tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
+ vdev_indirect_sync_obsolete(vd, tx);
+ dmu_tx_commit(tx);
+
+ /*
+ * If the vdev is indirect, it can't have dirty
+ * metaslabs or DTLs.
+ */
+ if (vd->vdev_ops == &vdev_indirect_ops) {
+ ASSERT(txg_list_empty(&vd->vdev_ms_list, txg));
+ ASSERT(txg_list_empty(&vd->vdev_dtl_list, txg));
+ return;
+ }
+ }
+
+ ASSERT(vdev_is_concrete(vd));
+
+ if (vd->vdev_ms_array == 0 && vd->vdev_ms_shift != 0 &&
+ !vd->vdev_removing) {
ASSERT(vd == vd->vdev_top);
+ ASSERT0(vd->vdev_indirect_config.vic_mapping_object);
tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
vd->vdev_ms_array = dmu_object_alloc(spa->spa_meta_objset,
DMU_OT_OBJECT_ARRAY, 0, DMU_OT_NONE, 0, tx);
ASSERT(vd->vdev_ms_array != 0);
vdev_config_dirty(vd);
dmu_tx_commit(tx);
}
- /*
- * Remove the metadata associated with this vdev once it's empty.
- */
- if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing)
- vdev_remove(vd, txg);
-
while ((msp = txg_list_remove(&vd->vdev_ms_list, txg)) != NULL) {
metaslab_sync(msp, txg);
(void) txg_list_add(&vd->vdev_ms_list, msp, TXG_CLEAN(txg));
}
while ((lvd = txg_list_remove(&vd->vdev_dtl_list, txg)) != NULL)
vdev_dtl_sync(lvd, txg);
+ /*
+ * Remove the metadata associated with this vdev once it's empty.
+ * Note that this is typically used for log/cache device removal;
+ * we don't empty toplevel vdevs when removing them. But if
+ * a toplevel happens to be emptied, this is not harmful.
+ */
+ if (vd->vdev_stat.vs_alloc == 0 && vd->vdev_removing) {
+ vdev_remove_empty(vd, txg);
+ }
+
(void) txg_list_add(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg));
}
uint64_t
vdev_psize_to_asize(vdev_t *vd, uint64_t psize)
{
return (vd->vdev_ops->vdev_op_asize(vd, psize));
}
/*
* Mark the given vdev faulted. A faulted vdev behaves as if the device could
* not be opened, and no I/O is attempted.
*/
int
vdev_fault(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd, *tvd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
/*
* If user did a 'zpool offline -f' then make the fault persist across
* reboots.
*/
if (aux == VDEV_AUX_EXTERNAL_PERSIST) {
/*
* There are two kinds of forced faults: temporary and
* persistent. Temporary faults go away at pool import, while
* persistent faults stay set. Both types of faults can be
* cleared with a zpool clear.
*
* We tell if a vdev is persistently faulted by looking at the
* ZPOOL_CONFIG_AUX_STATE nvpair. If it's set to "external" at
* import then it's a persistent fault. Otherwise, it's
* temporary. We get ZPOOL_CONFIG_AUX_STATE set to "external"
* by setting vd.vdev_stat.vs_aux to VDEV_AUX_EXTERNAL. This
* tells vdev_config_generate() (which gets run later) to set
* ZPOOL_CONFIG_AUX_STATE to "external" in the nvlist.
*/
vd->vdev_stat.vs_aux = VDEV_AUX_EXTERNAL;
vd->vdev_tmpoffline = B_FALSE;
aux = VDEV_AUX_EXTERNAL;
} else {
vd->vdev_tmpoffline = B_TRUE;
}
/*
* We don't directly use the aux state here, but if we do a
* vdev_reopen(), we need this value to be present to remember why we
* were faulted.
*/
vd->vdev_label_aux = aux;
/*
* Faulted state takes precedence over degraded.
*/
vd->vdev_delayed_close = B_FALSE;
vd->vdev_faulted = 1ULL;
vd->vdev_degraded = 0ULL;
vdev_set_state(vd, B_FALSE, VDEV_STATE_FAULTED, aux);
/*
* If this device has the only valid copy of the data, then
* back off and simply mark the vdev as degraded instead.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL && vdev_dtl_required(vd)) {
vd->vdev_degraded = 1ULL;
vd->vdev_faulted = 0ULL;
/*
* If we reopen the device and it's not dead, only then do we
* mark it degraded.
*/
vdev_reopen(tvd);
if (vdev_readable(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, aux);
}
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Mark the given vdev degraded. A degraded vdev is purely an indication to the
* user that something is wrong. The vdev continues to operate as normal as far
* as I/O is concerned.
*/
int
vdev_degrade(spa_t *spa, uint64_t guid, vdev_aux_t aux)
{
vdev_t *vd;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
/*
* If the vdev is already faulted, then don't do anything.
*/
if (vd->vdev_faulted || vd->vdev_degraded)
return (spa_vdev_state_exit(spa, NULL, 0));
vd->vdev_degraded = 1ULL;
if (!vdev_is_dead(vd))
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED,
aux);
return (spa_vdev_state_exit(spa, vd, 0));
}
/*
* Online the given vdev.
*
* If 'ZFS_ONLINE_UNSPARE' is set, it implies two things. First, any attached
* spare device should be detached when the device finishes resilvering.
* Second, the online should be treated like a 'test' online case, so no FMA
* events are generated if the device fails to open.
*/
int
vdev_online(spa_t *spa, uint64_t guid, uint64_t flags, vdev_state_t *newstate)
{
vdev_t *vd, *tvd, *pvd, *rvd = spa->spa_root_vdev;
boolean_t wasoffline;
vdev_state_t oldstate;
spa_vdev_state_enter(spa, SCL_NONE);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
wasoffline = (vd->vdev_offline || vd->vdev_tmpoffline);
oldstate = vd->vdev_state;
tvd = vd->vdev_top;
vd->vdev_offline = B_FALSE;
vd->vdev_tmpoffline = B_FALSE;
vd->vdev_checkremove = !!(flags & ZFS_ONLINE_CHECKREMOVE);
vd->vdev_forcefault = !!(flags & ZFS_ONLINE_FORCEFAULT);
/* XXX - L2ARC 1.0 does not support expansion */
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = !!(flags & ZFS_ONLINE_EXPAND);
}
vdev_reopen(tvd);
vd->vdev_checkremove = vd->vdev_forcefault = B_FALSE;
if (!vd->vdev_aux) {
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
pvd->vdev_expanding = B_FALSE;
}
if (newstate)
*newstate = vd->vdev_state;
if ((flags & ZFS_ONLINE_UNSPARE) &&
!vdev_is_dead(vd) && vd->vdev_parent &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
if ((flags & ZFS_ONLINE_EXPAND) || spa->spa_autoexpand) {
/* XXX - L2ARC 1.0 does not support expansion */
if (vd->vdev_aux)
return (spa_vdev_state_exit(spa, vd, ENOTSUP));
spa_async_request(spa, SPA_ASYNC_CONFIG_UPDATE);
}
if (wasoffline ||
(oldstate < VDEV_STATE_DEGRADED &&
vd->vdev_state >= VDEV_STATE_DEGRADED))
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_ONLINE);
return (spa_vdev_state_exit(spa, vd, 0));
}
static int
vdev_offline_locked(spa_t *spa, uint64_t guid, uint64_t flags)
{
vdev_t *vd, *tvd;
int error = 0;
uint64_t generation;
metaslab_group_t *mg;
top:
spa_vdev_state_enter(spa, SCL_ALLOC);
if ((vd = spa_lookup_by_guid(spa, guid, B_TRUE)) == NULL)
return (spa_vdev_state_exit(spa, NULL, ENODEV));
if (!vd->vdev_ops->vdev_op_leaf)
return (spa_vdev_state_exit(spa, NULL, ENOTSUP));
tvd = vd->vdev_top;
mg = tvd->vdev_mg;
generation = spa->spa_config_generation + 1;
/*
* If the device isn't already offline, try to offline it.
*/
if (!vd->vdev_offline) {
/*
* If this device has the only valid copy of some data,
* don't allow it to be offlined. Log devices are always
* expendable.
*/
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_dtl_required(vd))
return (spa_vdev_state_exit(spa, NULL, EBUSY));
/*
* If the top-level is a slog and it has had allocations
* then proceed. We check that the vdev's metaslab group
* is not NULL since it's possible that we may have just
* added this vdev but not yet initialized its metaslabs.
*/
if (tvd->vdev_islog && mg != NULL) {
/*
* Prevent any future allocations.
*/
metaslab_group_passivate(mg);
(void) spa_vdev_state_exit(spa, vd, 0);
- error = spa_offline_log(spa);
+ error = spa_reset_logs(spa);
spa_vdev_state_enter(spa, SCL_ALLOC);
/*
* Check to see if the config has changed.
*/
if (error || generation != spa->spa_config_generation) {
metaslab_group_activate(mg);
if (error)
return (spa_vdev_state_exit(spa,
vd, error));
(void) spa_vdev_state_exit(spa, vd, 0);
goto top;
}
ASSERT0(tvd->vdev_stat.vs_alloc);
}
/*
* Offline this device and reopen its top-level vdev.
* If the top-level vdev is a log device then just offline
* it. Otherwise, if this action results in the top-level
* vdev becoming unusable, undo it and fail the request.
*/
vd->vdev_offline = B_TRUE;
vdev_reopen(tvd);
if (!tvd->vdev_islog && vd->vdev_aux == NULL &&
vdev_is_dead(tvd)) {
vd->vdev_offline = B_FALSE;
vdev_reopen(tvd);
return (spa_vdev_state_exit(spa, NULL, EBUSY));
}
/*
* Add the device back into the metaslab rotor so that
* once we online the device it's open for business.
*/
if (tvd->vdev_islog && mg != NULL)
metaslab_group_activate(mg);
}
vd->vdev_tmpoffline = !!(flags & ZFS_OFFLINE_TEMPORARY);
return (spa_vdev_state_exit(spa, vd, 0));
}
int
vdev_offline(spa_t *spa, uint64_t guid, uint64_t flags)
{
int error;
mutex_enter(&spa->spa_vdev_top_lock);
error = vdev_offline_locked(spa, guid, flags);
mutex_exit(&spa->spa_vdev_top_lock);
return (error);
}
/*
* Clear the error counts associated with this vdev. Unlike vdev_online() and
* vdev_offline(), we assume the spa config is locked. We also clear all
* children. If 'vd' is NULL, then the user wants to clear all vdevs.
*/
void
vdev_clear(spa_t *spa, vdev_t *vd)
{
vdev_t *rvd = spa->spa_root_vdev;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (vd == NULL)
vd = rvd;
vd->vdev_stat.vs_read_errors = 0;
vd->vdev_stat.vs_write_errors = 0;
vd->vdev_stat.vs_checksum_errors = 0;
for (int c = 0; c < vd->vdev_children; c++)
vdev_clear(spa, vd->vdev_child[c]);
+ /*
+ * It makes no sense to "clear" an indirect vdev.
+ */
+ if (!vdev_is_concrete(vd))
+ return;
+
/*
* If we're in the FAULTED state or have experienced failed I/O, then
* clear the persistent state and attempt to reopen the device. We
* also mark the vdev config dirty, so that the new faulted state is
* written out to disk.
*/
if (vd->vdev_faulted || vd->vdev_degraded ||
!vdev_readable(vd) || !vdev_writeable(vd)) {
/*
* When reopening in response to a clear event, it may be due to
* a fmadm repair request. In this case, if the device is
* still broken, we want to still post the ereport again.
*/
vd->vdev_forcefault = B_TRUE;
vd->vdev_faulted = vd->vdev_degraded = 0ULL;
vd->vdev_cant_read = B_FALSE;
vd->vdev_cant_write = B_FALSE;
vd->vdev_stat.vs_aux = 0;
vdev_reopen(vd == rvd ? rvd : vd->vdev_top);
vd->vdev_forcefault = B_FALSE;
if (vd != rvd && vdev_writeable(vd->vdev_top))
vdev_state_dirty(vd->vdev_top);
if (vd->vdev_aux == NULL && !vdev_is_dead(vd))
spa_async_request(spa, SPA_ASYNC_RESILVER);
spa_event_notify(spa, vd, NULL, ESC_ZFS_VDEV_CLEAR);
}
/*
* When clearing a FMA-diagnosed fault, we always want to
* unspare the device, as we assume that the original spare was
* done in response to the FMA fault.
*/
if (!vdev_is_dead(vd) && vd->vdev_parent != NULL &&
vd->vdev_parent->vdev_ops == &vdev_spare_ops &&
vd->vdev_parent->vdev_child[0] == vd)
vd->vdev_unspare = B_TRUE;
}
boolean_t
vdev_is_dead(vdev_t *vd)
{
/*
* Holes and missing devices are always considered "dead".
* This simplifies the code since we don't have to check for
* these types of devices in the various code paths.
* Instead we rely on the fact that we skip over dead devices
* before issuing I/O to them.
*/
- return (vd->vdev_state < VDEV_STATE_DEGRADED || vd->vdev_ishole ||
+ return (vd->vdev_state < VDEV_STATE_DEGRADED ||
+ vd->vdev_ops == &vdev_hole_ops ||
vd->vdev_ops == &vdev_missing_ops);
}
boolean_t
vdev_readable(vdev_t *vd)
{
return (!vdev_is_dead(vd) && !vd->vdev_cant_read);
}
boolean_t
vdev_writeable(vdev_t *vd)
{
- return (!vdev_is_dead(vd) && !vd->vdev_cant_write);
+ return (!vdev_is_dead(vd) && !vd->vdev_cant_write &&
+ vdev_is_concrete(vd));
}
boolean_t
vdev_allocatable(vdev_t *vd)
{
uint64_t state = vd->vdev_state;
/*
* We currently allow allocations from vdevs which may be in the
* process of reopening (i.e. VDEV_STATE_CLOSED). If the device
* fails to reopen then we'll catch it later when we're holding
* the proper locks. Note that we have to get the vdev state
* in a local variable because although it changes atomically,
* we're asking two separate questions about it.
*/
return (!(state < VDEV_STATE_DEGRADED && state != VDEV_STATE_CLOSED) &&
- !vd->vdev_cant_write && !vd->vdev_ishole &&
+ !vd->vdev_cant_write && vdev_is_concrete(vd) &&
vd->vdev_mg->mg_initialized);
}
boolean_t
vdev_accessible(vdev_t *vd, zio_t *zio)
{
ASSERT(zio->io_vd == vd);
if (vdev_is_dead(vd) || vd->vdev_remove_wanted)
return (B_FALSE);
if (zio->io_type == ZIO_TYPE_READ)
return (!vd->vdev_cant_read);
if (zio->io_type == ZIO_TYPE_WRITE)
return (!vd->vdev_cant_write);
return (B_TRUE);
}
static void
vdev_get_child_stat(vdev_t *cvd, vdev_stat_t *vs, vdev_stat_t *cvs)
{
int t;
for (t = 0; t < ZIO_TYPES; t++) {
vs->vs_ops[t] += cvs->vs_ops[t];
vs->vs_bytes[t] += cvs->vs_bytes[t];
}
cvs->vs_scan_removing = cvd->vdev_removing;
}
/*
* Get extended stats
*/
static void
vdev_get_child_stat_ex(vdev_t *cvd, vdev_stat_ex_t *vsx, vdev_stat_ex_t *cvsx)
{
int t, b;
for (t = 0; t < ZIO_TYPES; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_disk_histo[0]); b++)
vsx->vsx_disk_histo[t][b] += cvsx->vsx_disk_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_total_histo[0]); b++) {
vsx->vsx_total_histo[t][b] +=
cvsx->vsx_total_histo[t][b];
}
}
for (t = 0; t < ZIO_PRIORITY_NUM_QUEUEABLE; t++) {
for (b = 0; b < ARRAY_SIZE(vsx->vsx_queue_histo[0]); b++) {
vsx->vsx_queue_histo[t][b] +=
cvsx->vsx_queue_histo[t][b];
}
vsx->vsx_active_queue[t] += cvsx->vsx_active_queue[t];
vsx->vsx_pend_queue[t] += cvsx->vsx_pend_queue[t];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_ind_histo[0]); b++)
vsx->vsx_ind_histo[t][b] += cvsx->vsx_ind_histo[t][b];
for (b = 0; b < ARRAY_SIZE(vsx->vsx_agg_histo[0]); b++)
vsx->vsx_agg_histo[t][b] += cvsx->vsx_agg_histo[t][b];
}
}
/*
* Get statistics for the given vdev.
*/
static void
vdev_get_stats_ex_impl(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
int t;
/*
* If we're getting stats on the root vdev, aggregate the I/O counts
* over all top-level vdevs (i.e. the direct children of the root).
*/
if (!vd->vdev_ops->vdev_op_leaf) {
if (vs) {
memset(vs->vs_ops, 0, sizeof (vs->vs_ops));
memset(vs->vs_bytes, 0, sizeof (vs->vs_bytes));
}
if (vsx)
memset(vsx, 0, sizeof (*vsx));
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_stat_t *cvs = &cvd->vdev_stat;
vdev_stat_ex_t *cvsx = &cvd->vdev_stat_ex;
vdev_get_stats_ex_impl(cvd, cvs, cvsx);
if (vs)
vdev_get_child_stat(cvd, vs, cvs);
if (vsx)
vdev_get_child_stat_ex(cvd, vsx, cvsx);
}
} else {
/*
* We're a leaf. Just copy our ZIO active queue stats in. The
* other leaf stats are updated in vdev_stat_update().
*/
if (!vsx)
return;
memcpy(vsx, &vd->vdev_stat_ex, sizeof (vd->vdev_stat_ex));
for (t = 0; t < ARRAY_SIZE(vd->vdev_queue.vq_class); t++) {
vsx->vsx_active_queue[t] =
vd->vdev_queue.vq_class[t].vqc_active;
vsx->vsx_pend_queue[t] = avl_numnodes(
&vd->vdev_queue.vq_class[t].vqc_queued_tree);
}
}
}
void
vdev_get_stats_ex(vdev_t *vd, vdev_stat_t *vs, vdev_stat_ex_t *vsx)
{
vdev_t *tvd = vd->vdev_top;
mutex_enter(&vd->vdev_stat_lock);
if (vs) {
bcopy(&vd->vdev_stat, vs, sizeof (*vs));
vs->vs_timestamp = gethrtime() - vs->vs_timestamp;
vs->vs_state = vd->vdev_state;
vs->vs_rsize = vdev_get_min_asize(vd);
if (vd->vdev_ops->vdev_op_leaf)
vs->vs_rsize += VDEV_LABEL_START_SIZE +
VDEV_LABEL_END_SIZE;
/*
* Report expandable space on top-level, non-auxillary devices
* only. The expandable space is reported in terms of metaslab
* sized units since that determines how much space the pool
* can expand.
*/
if (vd->vdev_aux == NULL && tvd != NULL) {
vs->vs_esize = P2ALIGN(
vd->vdev_max_asize - vd->vdev_asize,
1ULL << tvd->vdev_ms_shift);
}
vs->vs_esize = vd->vdev_max_asize - vd->vdev_asize;
if (vd->vdev_aux == NULL && vd == vd->vdev_top &&
- !vd->vdev_ishole) {
+ vdev_is_concrete(vd)) {
vs->vs_fragmentation = vd->vdev_mg->mg_fragmentation;
}
}
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_READER) != 0);
vdev_get_stats_ex_impl(vd, vs, vsx);
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_get_stats(vdev_t *vd, vdev_stat_t *vs)
{
return (vdev_get_stats_ex(vd, vs, NULL));
}
void
vdev_clear_stats(vdev_t *vd)
{
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_space = 0;
vd->vdev_stat.vs_dspace = 0;
vd->vdev_stat.vs_alloc = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_scan_stat_init(vdev_t *vd)
{
vdev_stat_t *vs = &vd->vdev_stat;
for (int c = 0; c < vd->vdev_children; c++)
vdev_scan_stat_init(vd->vdev_child[c]);
mutex_enter(&vd->vdev_stat_lock);
vs->vs_scan_processed = 0;
mutex_exit(&vd->vdev_stat_lock);
}
void
vdev_stat_update(zio_t *zio, uint64_t psize)
{
spa_t *spa = zio->io_spa;
vdev_t *rvd = spa->spa_root_vdev;
vdev_t *vd = zio->io_vd ? zio->io_vd : rvd;
vdev_t *pvd;
uint64_t txg = zio->io_txg;
vdev_stat_t *vs = &vd->vdev_stat;
vdev_stat_ex_t *vsx = &vd->vdev_stat_ex;
zio_type_t type = zio->io_type;
int flags = zio->io_flags;
/*
* If this i/o is a gang leader, it didn't do any actual work.
*/
if (zio->io_gang_tree)
return;
if (zio->io_error == 0) {
/*
* If this is a root i/o, don't count it -- we've already
* counted the top-level vdevs, and vdev_get_stats() will
* aggregate them when asked. This reduces contention on
* the root vdev_stat_lock and implicitly handles blocks
* that compress away to holes, for which there is no i/o.
* (Holes never create vdev children, so all the counters
* remain zero, which is what we want.)
*
* Note: this only applies to successful i/o (io_error == 0)
* because unlike i/o counts, errors are not additive.
* When reading a ditto block, for example, failure of
* one top-level vdev does not imply a root-level error.
*/
if (vd == rvd)
return;
ASSERT(vd == zio->io_vd);
if (flags & ZIO_FLAG_IO_BYPASS)
return;
mutex_enter(&vd->vdev_stat_lock);
if (flags & ZIO_FLAG_IO_REPAIR) {
if (flags & ZIO_FLAG_SCAN_THREAD) {
dsl_scan_phys_t *scn_phys =
&spa->spa_dsl_pool->dp_scan->scn_phys;
uint64_t *processed = &scn_phys->scn_processed;
/* XXX cleanup? */
if (vd->vdev_ops->vdev_op_leaf)
atomic_add_64(processed, psize);
vs->vs_scan_processed += psize;
}
if (flags & ZIO_FLAG_SELF_HEAL)
vs->vs_self_healed += psize;
}
/*
* The bytes/ops/histograms are recorded at the leaf level and
* aggregated into the higher level vdevs in vdev_get_stats().
*/
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_priority < ZIO_PRIORITY_NUM_QUEUEABLE)) {
vs->vs_ops[type]++;
vs->vs_bytes[type] += psize;
if (flags & ZIO_FLAG_DELEGATED) {
vsx->vsx_agg_histo[zio->io_priority]
[RQ_HISTO(zio->io_size)]++;
} else {
vsx->vsx_ind_histo[zio->io_priority]
[RQ_HISTO(zio->io_size)]++;
}
if (zio->io_delta && zio->io_delay) {
vsx->vsx_queue_histo[zio->io_priority]
[L_HISTO(zio->io_delta - zio->io_delay)]++;
vsx->vsx_disk_histo[type]
[L_HISTO(zio->io_delay)]++;
vsx->vsx_total_histo[type]
[L_HISTO(zio->io_delta)]++;
}
}
mutex_exit(&vd->vdev_stat_lock);
return;
}
if (flags & ZIO_FLAG_SPECULATIVE)
return;
/*
* If this is an I/O error that is going to be retried, then ignore the
* error. Otherwise, the user may interpret B_FAILFAST I/O errors as
* hard errors, when in reality they can happen for any number of
* innocuous reasons (bus resets, MPxIO link failure, etc).
*/
if (zio->io_error == EIO &&
!(zio->io_flags & ZIO_FLAG_IO_RETRY))
return;
/*
* Intent logs writes won't propagate their error to the root
* I/O so don't mark these types of failures as pool-level
* errors.
*/
if (zio->io_vd == NULL && (zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
return;
mutex_enter(&vd->vdev_stat_lock);
if (type == ZIO_TYPE_READ && !vdev_is_dead(vd)) {
if (zio->io_error == ECKSUM)
vs->vs_checksum_errors++;
else
vs->vs_read_errors++;
}
if (type == ZIO_TYPE_WRITE && !vdev_is_dead(vd))
vs->vs_write_errors++;
mutex_exit(&vd->vdev_stat_lock);
- if (type == ZIO_TYPE_WRITE && txg != 0 &&
+ if (spa->spa_load_state == SPA_LOAD_NONE &&
+ type == ZIO_TYPE_WRITE && txg != 0 &&
(!(flags & ZIO_FLAG_IO_REPAIR) ||
(flags & ZIO_FLAG_SCAN_THREAD) ||
spa->spa_claiming)) {
/*
* This is either a normal write (not a repair), or it's
* a repair induced by the scrub thread, or it's a repair
* made by zil_claim() during spa_load() in the first txg.
* In the normal case, we commit the DTL change in the same
* txg as the block was born. In the scrub-induced repair
* case, we know that scrubs run in first-pass syncing context,
* so we commit the DTL change in spa_syncing_txg(spa).
* In the zil_claim() case, we commit in spa_first_txg(spa).
*
* We currently do not make DTL entries for failed spontaneous
* self-healing writes triggered by normal (non-scrubbing)
* reads, because we have no transactional context in which to
* do so -- and it's not clear that it'd be desirable anyway.
*/
if (vd->vdev_ops->vdev_op_leaf) {
uint64_t commit_txg = txg;
if (flags & ZIO_FLAG_SCAN_THREAD) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
ASSERT(spa_sync_pass(spa) == 1);
vdev_dtl_dirty(vd, DTL_SCRUB, txg, 1);
commit_txg = spa_syncing_txg(spa);
} else if (spa->spa_claiming) {
ASSERT(flags & ZIO_FLAG_IO_REPAIR);
commit_txg = spa_first_txg(spa);
}
ASSERT(commit_txg >= spa_syncing_txg(spa));
if (vdev_dtl_contains(vd, DTL_MISSING, txg, 1))
return;
for (pvd = vd; pvd != rvd; pvd = pvd->vdev_parent)
vdev_dtl_dirty(pvd, DTL_PARTIAL, txg, 1);
vdev_dirty(vd->vdev_top, VDD_DTL, vd, commit_txg);
}
if (vd != rvd)
vdev_dtl_dirty(vd, DTL_MISSING, txg, 1);
}
}
/*
* Update the in-core space usage stats for this vdev, its metaslab class,
* and the root vdev.
*/
void
vdev_space_update(vdev_t *vd, int64_t alloc_delta, int64_t defer_delta,
int64_t space_delta)
{
int64_t dspace_delta = space_delta;
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
metaslab_group_t *mg = vd->vdev_mg;
metaslab_class_t *mc = mg ? mg->mg_class : NULL;
ASSERT(vd == vd->vdev_top);
/*
* Apply the inverse of the psize-to-asize (ie. RAID-Z) space-expansion
* factor. We must calculate this here and not at the root vdev
* because the root vdev's psize-to-asize is simply the max of its
* childrens', thus not accurate enough for us.
*/
ASSERT((dspace_delta & (SPA_MINBLOCKSIZE-1)) == 0);
ASSERT(vd->vdev_deflate_ratio != 0 || vd->vdev_isl2cache);
dspace_delta = (dspace_delta >> SPA_MINBLOCKSHIFT) *
vd->vdev_deflate_ratio;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_alloc += alloc_delta;
vd->vdev_stat.vs_space += space_delta;
vd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&vd->vdev_stat_lock);
if (mc == spa_normal_class(spa)) {
mutex_enter(&rvd->vdev_stat_lock);
rvd->vdev_stat.vs_alloc += alloc_delta;
rvd->vdev_stat.vs_space += space_delta;
rvd->vdev_stat.vs_dspace += dspace_delta;
mutex_exit(&rvd->vdev_stat_lock);
}
if (mc != NULL) {
ASSERT(rvd == vd->vdev_parent);
ASSERT(vd->vdev_ms_count != 0);
metaslab_class_space_update(mc,
alloc_delta, defer_delta, space_delta, dspace_delta);
}
}
/*
* Mark a top-level vdev's config as dirty, placing it on the dirty list
* so that it will be written out next time the vdev configuration is synced.
* If the root vdev is specified (vdev_top == NULL), dirty all top-level vdevs.
*/
void
vdev_config_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int c;
ASSERT(spa_writeable(spa));
/*
* If this is an aux vdev (as with l2cache and spare devices), then we
* update the vdev config manually and set the sync flag.
*/
if (vd->vdev_aux != NULL) {
spa_aux_vdev_t *sav = vd->vdev_aux;
nvlist_t **aux;
uint_t naux;
for (c = 0; c < sav->sav_count; c++) {
if (sav->sav_vdevs[c] == vd)
break;
}
if (c == sav->sav_count) {
/*
* We're being removed. There's nothing more to do.
*/
ASSERT(sav->sav_sync == B_TRUE);
return;
}
sav->sav_sync = B_TRUE;
if (nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_L2CACHE, &aux, &naux) != 0) {
VERIFY(nvlist_lookup_nvlist_array(sav->sav_config,
ZPOOL_CONFIG_SPARES, &aux, &naux) == 0);
}
ASSERT(c < naux);
/*
* Setting the nvlist in the middle if the array is a little
* sketchy, but it will work.
*/
nvlist_free(aux[c]);
aux[c] = vdev_config_generate(spa, vd, B_TRUE, 0);
return;
}
/*
* The dirty list is protected by the SCL_CONFIG lock. The caller
* must either hold SCL_CONFIG as writer, or must be the sync thread
* (which holds SCL_CONFIG as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
if (vd == rvd) {
for (c = 0; c < rvd->vdev_children; c++)
vdev_config_dirty(rvd->vdev_child[c]);
} else {
ASSERT(vd == vd->vdev_top);
if (!list_link_active(&vd->vdev_config_dirty_node) &&
- !vd->vdev_ishole)
+ vdev_is_concrete(vd)) {
list_insert_head(&spa->spa_config_dirty_list, vd);
+ }
}
}
void
vdev_config_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_CONFIG, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_CONFIG, RW_READER)));
ASSERT(list_link_active(&vd->vdev_config_dirty_node));
list_remove(&spa->spa_config_dirty_list, vd);
}
/*
* Mark a top-level vdev's state as dirty, so that the next pass of
* spa_sync() can convert this into vdev_config_dirty(). We distinguish
* the state changes from larger config changes because they require
* much less locking, and are often needed for administrative actions.
*/
void
vdev_state_dirty(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_writeable(spa));
ASSERT(vd == vd->vdev_top);
/*
* The state list is protected by the SCL_STATE lock. The caller
* must either hold SCL_STATE as writer, or must be the sync thread
* (which holds SCL_STATE as reader). There's only one sync thread,
* so this is sufficient to ensure mutual exclusion.
*/
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
- if (!list_link_active(&vd->vdev_state_dirty_node) && !vd->vdev_ishole)
+ if (!list_link_active(&vd->vdev_state_dirty_node) &&
+ vdev_is_concrete(vd))
list_insert_head(&spa->spa_state_dirty_list, vd);
}
void
vdev_state_clean(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
ASSERT(spa_config_held(spa, SCL_STATE, RW_WRITER) ||
(dsl_pool_sync_context(spa_get_dsl(spa)) &&
spa_config_held(spa, SCL_STATE, RW_READER)));
ASSERT(list_link_active(&vd->vdev_state_dirty_node));
list_remove(&spa->spa_state_dirty_list, vd);
}
/*
* Propagate vdev state up from children to parent.
*/
void
vdev_propagate_state(vdev_t *vd)
{
spa_t *spa = vd->vdev_spa;
vdev_t *rvd = spa->spa_root_vdev;
int degraded = 0, faulted = 0;
int corrupted = 0;
vdev_t *child;
if (vd->vdev_children > 0) {
for (int c = 0; c < vd->vdev_children; c++) {
child = vd->vdev_child[c];
/*
- * Don't factor holes into the decision.
+ * Don't factor holes or indirect vdevs into the
+ * decision.
*/
- if (child->vdev_ishole)
+ if (!vdev_is_concrete(child))
continue;
if (!vdev_readable(child) ||
(!vdev_writeable(child) && spa_writeable(spa))) {
/*
* Root special: if there is a top-level log
* device, treat the root vdev as if it were
* degraded.
*/
if (child->vdev_islog && vd == rvd)
degraded++;
else
faulted++;
} else if (child->vdev_state <= VDEV_STATE_DEGRADED) {
degraded++;
}
if (child->vdev_stat.vs_aux == VDEV_AUX_CORRUPT_DATA)
corrupted++;
}
vd->vdev_ops->vdev_op_state_change(vd, faulted, degraded);
/*
* Root special: if there is a top-level vdev that cannot be
* opened due to corrupted metadata, then propagate the root
* vdev's aux state as 'corrupt' rather than 'insufficient
* replicas'.
*/
if (corrupted && vd == rvd &&
rvd->vdev_state == VDEV_STATE_CANT_OPEN)
vdev_set_state(rvd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_CORRUPT_DATA);
}
if (vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Set a vdev's state. If this is during an open, we don't update the parent
* state, because we're in the process of opening children depth-first.
* Otherwise, we propagate the change to the parent.
*
* If this routine places a device in a faulted state, an appropriate ereport is
* generated.
*/
void
vdev_set_state(vdev_t *vd, boolean_t isopen, vdev_state_t state, vdev_aux_t aux)
{
uint64_t save_state;
spa_t *spa = vd->vdev_spa;
if (state == vd->vdev_state) {
/*
* Since vdev_offline() code path is already in an offline
* state we can miss a statechange event to OFFLINE. Check
* the previous state to catch this condition.
*/
if (vd->vdev_ops->vdev_op_leaf &&
(state == VDEV_STATE_OFFLINE) &&
(vd->vdev_prevstate >= VDEV_STATE_FAULTED)) {
/* post an offline state change */
zfs_post_state_change(spa, vd, vd->vdev_prevstate);
}
vd->vdev_stat.vs_aux = aux;
return;
}
save_state = vd->vdev_state;
vd->vdev_state = state;
vd->vdev_stat.vs_aux = aux;
/*
* If we are setting the vdev state to anything but an open state, then
* always close the underlying device unless the device has requested
* a delayed close (i.e. we're about to remove or fault the device).
* Otherwise, we keep accessible but invalid devices open forever.
* We don't call vdev_close() itself, because that implies some extra
* checks (offline, etc) that we don't want here. This is limited to
* leaf devices, because otherwise closing the device will affect other
* children.
*/
if (!vd->vdev_delayed_close && vdev_is_dead(vd) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_ops->vdev_op_close(vd);
if (vd->vdev_removed &&
state == VDEV_STATE_CANT_OPEN &&
(aux == VDEV_AUX_OPEN_FAILED || vd->vdev_checkremove)) {
/*
* If the previous state is set to VDEV_STATE_REMOVED, then this
* device was previously marked removed and someone attempted to
* reopen it. If this failed due to a nonexistent device, then
* keep the device in the REMOVED state. We also let this be if
* it is one of our special test online cases, which is only
* attempting to online the device and shouldn't generate an FMA
* fault.
*/
vd->vdev_state = VDEV_STATE_REMOVED;
vd->vdev_stat.vs_aux = VDEV_AUX_NONE;
} else if (state == VDEV_STATE_REMOVED) {
vd->vdev_removed = B_TRUE;
} else if (state == VDEV_STATE_CANT_OPEN) {
/*
* If we fail to open a vdev during an import or recovery, we
* mark it as "not available", which signifies that it was
* never there to begin with. Failure to open such a device
* is not considered an error.
*/
if ((spa_load_state(spa) == SPA_LOAD_IMPORT ||
spa_load_state(spa) == SPA_LOAD_RECOVER) &&
vd->vdev_ops->vdev_op_leaf)
vd->vdev_not_present = 1;
/*
* Post the appropriate ereport. If the 'prevstate' field is
* set to something other than VDEV_STATE_UNKNOWN, it indicates
* that this is part of a vdev_reopen(). In this case, we don't
* want to post the ereport if the device was already in the
* CANT_OPEN state beforehand.
*
* If the 'checkremove' flag is set, then this is an attempt to
* online the device in response to an insertion event. If we
* hit this case, then we have detected an insertion event for a
* faulted or offline device that wasn't in the removed state.
* In this scenario, we don't post an ereport because we are
* about to replace the device, or attempt an online with
* vdev_forcefault, which will generate the fault for us.
*/
if ((vd->vdev_prevstate != state || vd->vdev_forcefault) &&
!vd->vdev_not_present && !vd->vdev_checkremove &&
vd != spa->spa_root_vdev) {
const char *class;
switch (aux) {
case VDEV_AUX_OPEN_FAILED:
class = FM_EREPORT_ZFS_DEVICE_OPEN_FAILED;
break;
case VDEV_AUX_CORRUPT_DATA:
class = FM_EREPORT_ZFS_DEVICE_CORRUPT_DATA;
break;
case VDEV_AUX_NO_REPLICAS:
class = FM_EREPORT_ZFS_DEVICE_NO_REPLICAS;
break;
case VDEV_AUX_BAD_GUID_SUM:
class = FM_EREPORT_ZFS_DEVICE_BAD_GUID_SUM;
break;
case VDEV_AUX_TOO_SMALL:
class = FM_EREPORT_ZFS_DEVICE_TOO_SMALL;
break;
case VDEV_AUX_BAD_LABEL:
class = FM_EREPORT_ZFS_DEVICE_BAD_LABEL;
break;
case VDEV_AUX_BAD_ASHIFT:
class = FM_EREPORT_ZFS_DEVICE_BAD_ASHIFT;
break;
default:
class = FM_EREPORT_ZFS_DEVICE_UNKNOWN;
}
zfs_ereport_post(class, spa, vd, NULL, NULL,
save_state, 0);
}
/* Erase any notion of persistent removed state */
vd->vdev_removed = B_FALSE;
} else {
vd->vdev_removed = B_FALSE;
}
/*
* Notify ZED of any significant state-change on a leaf vdev.
*
*/
if (vd->vdev_ops->vdev_op_leaf) {
/* preserve original state from a vdev_reopen() */
if ((vd->vdev_prevstate != VDEV_STATE_UNKNOWN) &&
(vd->vdev_prevstate != vd->vdev_state) &&
(save_state <= VDEV_STATE_CLOSED))
save_state = vd->vdev_prevstate;
/* filter out state change due to initial vdev_open */
if (save_state > VDEV_STATE_CLOSED)
zfs_post_state_change(spa, vd, save_state);
}
if (!isopen && vd->vdev_parent)
vdev_propagate_state(vd->vdev_parent);
}
/*
* Check the vdev configuration to ensure that it's capable of supporting
* a root pool. We do not support partial configuration.
*/
boolean_t
vdev_is_bootable(vdev_t *vd)
{
if (!vd->vdev_ops->vdev_op_leaf) {
const char *vdev_type = vd->vdev_ops->vdev_op_type;
- if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0)
+ if (strcmp(vdev_type, VDEV_TYPE_MISSING) == 0 ||
+ strcmp(vdev_type, VDEV_TYPE_INDIRECT) == 0) {
return (B_FALSE);
+ }
}
for (int c = 0; c < vd->vdev_children; c++) {
if (!vdev_is_bootable(vd->vdev_child[c]))
return (B_FALSE);
}
return (B_TRUE);
}
+boolean_t
+vdev_is_concrete(vdev_t *vd)
+{
+ vdev_ops_t *ops = vd->vdev_ops;
+ if (ops == &vdev_indirect_ops || ops == &vdev_hole_ops ||
+ ops == &vdev_missing_ops || ops == &vdev_root_ops) {
+ return (B_FALSE);
+ } else {
+ return (B_TRUE);
+ }
+}
+
/*
* Load the state from the original vdev tree (ovd) which
* we've retrieved from the MOS config object. If the original
* vdev was offline or faulted then we transfer that state to the
* device in the current vdev tree (nvd).
*/
void
vdev_load_log_state(vdev_t *nvd, vdev_t *ovd)
{
ASSERT(nvd->vdev_top->vdev_islog);
ASSERT(spa_config_held(nvd->vdev_spa,
SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
ASSERT3U(nvd->vdev_guid, ==, ovd->vdev_guid);
for (int c = 0; c < nvd->vdev_children; c++)
vdev_load_log_state(nvd->vdev_child[c], ovd->vdev_child[c]);
if (nvd->vdev_ops->vdev_op_leaf) {
/*
* Restore the persistent vdev state
*/
nvd->vdev_offline = ovd->vdev_offline;
nvd->vdev_faulted = ovd->vdev_faulted;
nvd->vdev_degraded = ovd->vdev_degraded;
nvd->vdev_removed = ovd->vdev_removed;
}
}
/*
* Determine if a log device has valid content. If the vdev was
* removed or faulted in the MOS config then we know that
* the content on the log device has already been written to the pool.
*/
boolean_t
vdev_log_state_valid(vdev_t *vd)
{
if (vd->vdev_ops->vdev_op_leaf && !vd->vdev_faulted &&
!vd->vdev_removed)
return (B_TRUE);
for (int c = 0; c < vd->vdev_children; c++)
if (vdev_log_state_valid(vd->vdev_child[c]))
return (B_TRUE);
return (B_FALSE);
}
/*
* Expand a vdev if possible.
*/
void
vdev_expand(vdev_t *vd, uint64_t txg)
{
ASSERT(vd->vdev_top == vd);
ASSERT(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER) == SCL_ALL);
- if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count) {
+ vdev_set_deflate_ratio(vd);
+
+ if ((vd->vdev_asize >> vd->vdev_ms_shift) > vd->vdev_ms_count &&
+ vdev_is_concrete(vd)) {
VERIFY(vdev_metaslab_init(vd, txg) == 0);
vdev_config_dirty(vd);
}
}
/*
* Split a vdev.
*/
void
vdev_split(vdev_t *vd)
{
vdev_t *cvd, *pvd = vd->vdev_parent;
vdev_remove_child(pvd, vd);
vdev_compact_children(pvd);
cvd = pvd->vdev_child[0];
if (pvd->vdev_children == 1) {
vdev_remove_parent(cvd);
cvd->vdev_splitting = B_TRUE;
}
vdev_propagate_state(cvd);
}
void
vdev_deadman(vdev_t *vd, char *tag)
{
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
vdev_deadman(cvd, tag);
}
if (vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
mutex_enter(&vq->vq_lock);
if (avl_numnodes(&vq->vq_active_tree) > 0) {
spa_t *spa = vd->vdev_spa;
zio_t *fio;
uint64_t delta;
zfs_dbgmsg("slow vdev: %s has %d active IOs",
vd->vdev_path, avl_numnodes(&vq->vq_active_tree));
/*
* Look at the head of all the pending queues,
* if any I/O has been outstanding for longer than
* the spa_deadman_synctime invoke the deadman logic.
*/
fio = avl_first(&vq->vq_active_tree);
delta = gethrtime() - fio->io_timestamp;
if (delta > spa_deadman_synctime(spa))
zio_deadman(fio, tag);
}
mutex_exit(&vq->vq_lock);
}
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(vdev_fault);
EXPORT_SYMBOL(vdev_degrade);
EXPORT_SYMBOL(vdev_online);
EXPORT_SYMBOL(vdev_offline);
EXPORT_SYMBOL(vdev_clear);
/* BEGIN CSTYLED */
module_param(metaslabs_per_vdev, int, 0644);
MODULE_PARM_DESC(metaslabs_per_vdev,
"Divide added vdev into approximately (but no more than) this number "
"of metaslabs");
module_param(zfs_delays_per_second, uint, 0644);
MODULE_PARM_DESC(zfs_delays_per_second, "Rate limit delay events to this many "
"IO delays per second");
module_param(zfs_checksums_per_second, uint, 0644);
MODULE_PARM_DESC(zfs_checksums_per_second, "Rate limit checksum events "
"to this many checksum errors per second (do not set below zed"
"threshold).");
module_param(zfs_scan_ignore_errors, int, 0644);
MODULE_PARM_DESC(zfs_scan_ignore_errors,
"Ignore errors during resilver/scrub");
/* END CSTYLED */
#endif
diff --git a/module/zfs/vdev_disk.c b/module/zfs/vdev_disk.c
index 788503bcdb67..056381c9d830 100644
--- a/module/zfs/vdev_disk.c
+++ b/module/zfs/vdev_disk.c
@@ -1,852 +1,853 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (C) 2008-2010 Lawrence Livermore National Security, LLC.
* Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER).
* Rewritten for Linux by Brian Behlendorf <behlendorf1@llnl.gov>.
* LLNL-CODE-403049.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa_impl.h>
#include <sys/vdev_disk.h>
#include <sys/vdev_impl.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
#include <sys/sunldi.h>
#include <linux/mod_compat.h>
char *zfs_vdev_scheduler = VDEV_SCHEDULER;
static void *zfs_vdev_holder = VDEV_HOLDER;
/*
* Virtual device vector for disks.
*/
typedef struct dio_request {
zio_t *dr_zio; /* Parent ZIO */
atomic_t dr_ref; /* References */
int dr_error; /* Bio error */
int dr_bio_count; /* Count of bio's */
struct bio *dr_bio[0]; /* Attached bio's */
} dio_request_t;
#ifdef HAVE_OPEN_BDEV_EXCLUSIVE
static fmode_t
vdev_bdev_mode(int smode)
{
fmode_t mode = 0;
ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
if (smode & FREAD)
mode |= FMODE_READ;
if (smode & FWRITE)
mode |= FMODE_WRITE;
return (mode);
}
#else
static int
vdev_bdev_mode(int smode)
{
int mode = 0;
ASSERT3S(smode & (FREAD | FWRITE), !=, 0);
if ((smode & FREAD) && !(smode & FWRITE))
mode = MS_RDONLY;
return (mode);
}
#endif /* HAVE_OPEN_BDEV_EXCLUSIVE */
static uint64_t
bdev_capacity(struct block_device *bdev)
{
struct hd_struct *part = bdev->bd_part;
/* The partition capacity referenced by the block device */
if (part)
return (part->nr_sects << 9);
/* Otherwise assume the full device capacity */
return (get_capacity(bdev->bd_disk) << 9);
}
static void
vdev_disk_error(zio_t *zio)
{
#ifdef ZFS_DEBUG
printk(KERN_WARNING "ZFS: zio error=%d type=%d offset=%llu size=%llu "
"flags=%x\n", zio->io_error, zio->io_type,
(u_longlong_t)zio->io_offset, (u_longlong_t)zio->io_size,
zio->io_flags);
#endif
}
/*
* Use the Linux 'noop' elevator for zfs managed block devices. This
* strikes the ideal balance by allowing the zfs elevator to do all
* request ordering and prioritization. While allowing the Linux
* elevator to do the maximum front/back merging allowed by the
* physical device. This yields the largest possible requests for
* the device with the lowest total overhead.
*/
static void
vdev_elevator_switch(vdev_t *v, char *elevator)
{
vdev_disk_t *vd = v->vdev_tsd;
struct request_queue *q;
char *device;
int error;
for (int c = 0; c < v->vdev_children; c++)
vdev_elevator_switch(v->vdev_child[c], elevator);
if (!v->vdev_ops->vdev_op_leaf || vd->vd_bdev == NULL)
return;
q = bdev_get_queue(vd->vd_bdev);
device = vd->vd_bdev->bd_disk->disk_name;
/*
* Skip devices which are not whole disks (partitions).
* Device-mapper devices are excepted since they may be whole
* disks despite the vdev_wholedisk flag, in which case we can
* and should switch the elevator. If the device-mapper device
* does not have an elevator (i.e. dm-raid, dm-crypt, etc.) the
* "Skip devices without schedulers" check below will fail.
*/
if (!v->vdev_wholedisk && strncmp(device, "dm-", 3) != 0)
return;
/* Skip devices without schedulers (loop, ram, dm, etc) */
if (!q->elevator || !blk_queue_stackable(q))
return;
/* Leave existing scheduler when set to "none" */
if ((strncmp(elevator, "none", 4) == 0) && (strlen(elevator) == 4))
return;
#ifdef HAVE_ELEVATOR_CHANGE
error = elevator_change(q, elevator);
#else
/*
* For pre-2.6.36 kernels elevator_change() is not available.
* Therefore we fall back to using a usermodehelper to echo the
* elevator into sysfs; This requires /bin/echo and sysfs to be
* mounted which may not be true early in the boot process.
*/
#define SET_SCHEDULER_CMD \
"exec 0</dev/null " \
" 1>/sys/block/%s/queue/scheduler " \
" 2>/dev/null; " \
"echo %s"
char *argv[] = { "/bin/sh", "-c", NULL, NULL };
char *envp[] = { NULL };
argv[2] = kmem_asprintf(SET_SCHEDULER_CMD, device, elevator);
error = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
strfree(argv[2]);
#endif /* HAVE_ELEVATOR_CHANGE */
if (error)
printk(KERN_NOTICE "ZFS: Unable to set \"%s\" scheduler"
" for %s (%s): %d\n", elevator, v->vdev_path, device,
error);
}
/*
* Expanding a whole disk vdev involves invoking BLKRRPART on the
* whole disk device. This poses a problem, because BLKRRPART will
* return EBUSY if one of the disk's partitions is open. That's why
* we have to do it here, just before opening the data partition.
* Unfortunately, BLKRRPART works by dropping all partitions and
* recreating them, which means that for a short time window, all
* /dev/sdxN device files disappear (until udev recreates them).
* This means two things:
* - When we open the data partition just after a BLKRRPART, we
* can't do it using the normal device file path because of the
* obvious race condition with udev. Instead, we use reliable
* kernel APIs to get a handle to the new partition device from
* the whole disk device.
* - Because vdev_disk_open() initially needs to find the device
* using its path, multiple vdev_disk_open() invocations in
* short succession on the same disk with BLKRRPARTs in the
* middle have a high probability of failure (because of the
* race condition with udev). A typical situation where this
* might happen is when the zpool userspace tool does a
* TRYIMPORT immediately followed by an IMPORT. For this
* reason, we only invoke BLKRRPART in the module when strictly
* necessary (zpool online -e case), and rely on userspace to
* do it when possible.
*/
static struct block_device *
vdev_disk_rrpart(const char *path, int mode, vdev_disk_t *vd)
{
#if defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK)
struct block_device *bdev, *result = ERR_PTR(-ENXIO);
struct gendisk *disk;
int error, partno;
bdev = vdev_bdev_open(path, vdev_bdev_mode(mode), zfs_vdev_holder);
if (IS_ERR(bdev))
return (bdev);
disk = get_gendisk(bdev->bd_dev, &partno);
vdev_bdev_close(bdev, vdev_bdev_mode(mode));
if (disk) {
bdev = bdget(disk_devt(disk));
if (bdev) {
error = blkdev_get(bdev, vdev_bdev_mode(mode), vd);
if (error == 0)
error = ioctl_by_bdev(bdev, BLKRRPART, 0);
vdev_bdev_close(bdev, vdev_bdev_mode(mode));
}
bdev = bdget_disk(disk, partno);
if (bdev) {
error = blkdev_get(bdev,
vdev_bdev_mode(mode) | FMODE_EXCL, vd);
if (error == 0)
result = bdev;
}
put_disk(disk);
}
return (result);
#else
return (ERR_PTR(-EOPNOTSUPP));
#endif /* defined(HAVE_3ARG_BLKDEV_GET) && defined(HAVE_GET_GENDISK) */
}
static int
vdev_disk_open(vdev_t *v, uint64_t *psize, uint64_t *max_psize,
uint64_t *ashift)
{
struct block_device *bdev = ERR_PTR(-ENXIO);
vdev_disk_t *vd;
int count = 0, mode, block_size;
/* Must have a pathname and it must be absolute. */
if (v->vdev_path == NULL || v->vdev_path[0] != '/') {
v->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
/*
* Reopen the device if it's not currently open. Otherwise,
* just update the physical size of the device.
*/
if (v->vdev_tsd != NULL) {
ASSERT(v->vdev_reopening);
vd = v->vdev_tsd;
goto skip_open;
}
vd = kmem_zalloc(sizeof (vdev_disk_t), KM_SLEEP);
if (vd == NULL)
return (SET_ERROR(ENOMEM));
/*
* Devices are always opened by the path provided at configuration
* time. This means that if the provided path is a udev by-id path
* then drives may be recabled without an issue. If the provided
* path is a udev by-path path, then the physical location information
* will be preserved. This can be critical for more complicated
* configurations where drives are located in specific physical
* locations to maximize the systems tolerence to component failure.
* Alternatively, you can provide your own udev rule to flexibly map
* the drives as you see fit. It is not advised that you use the
* /dev/[hd]d devices which may be reordered due to probing order.
* Devices in the wrong locations will be detected by the higher
* level vdev validation.
*
* The specified paths may be briefly removed and recreated in
* response to udev events. This should be exceptionally unlikely
* because the zpool command makes every effort to verify these paths
* have already settled prior to reaching this point. Therefore,
* a ENOENT failure at this point is highly likely to be transient
* and it is reasonable to sleep and retry before giving up. In
* practice delays have been observed to be on the order of 100ms.
*/
mode = spa_mode(v->vdev_spa);
if (v->vdev_wholedisk && v->vdev_expanding)
bdev = vdev_disk_rrpart(v->vdev_path, mode, vd);
while (IS_ERR(bdev) && count < 50) {
bdev = vdev_bdev_open(v->vdev_path,
vdev_bdev_mode(mode), zfs_vdev_holder);
if (unlikely(PTR_ERR(bdev) == -ENOENT)) {
msleep(10);
count++;
} else if (IS_ERR(bdev)) {
break;
}
}
if (IS_ERR(bdev)) {
dprintf("failed open v->vdev_path=%s, error=%d count=%d\n",
v->vdev_path, -PTR_ERR(bdev), count);
kmem_free(vd, sizeof (vdev_disk_t));
return (SET_ERROR(-PTR_ERR(bdev)));
}
v->vdev_tsd = vd;
vd->vd_bdev = bdev;
skip_open:
/* Determine the physical block size */
block_size = vdev_bdev_block_size(vd->vd_bdev);
/* Clear the nowritecache bit, causes vdev_reopen() to try again. */
v->vdev_nowritecache = B_FALSE;
/* Inform the ZIO pipeline that we are non-rotational */
v->vdev_nonrot = blk_queue_nonrot(bdev_get_queue(vd->vd_bdev));
/* Physical volume size in bytes */
*psize = bdev_capacity(vd->vd_bdev);
/* TODO: report possible expansion size */
*max_psize = *psize;
/* Based on the minimum sector size set the block size */
*ashift = highbit64(MAX(block_size, SPA_MINBLOCKSIZE)) - 1;
/* Try to set the io scheduler elevator algorithm */
(void) vdev_elevator_switch(v, zfs_vdev_scheduler);
return (0);
}
static void
vdev_disk_close(vdev_t *v)
{
vdev_disk_t *vd = v->vdev_tsd;
if (v->vdev_reopening || vd == NULL)
return;
if (vd->vd_bdev != NULL)
vdev_bdev_close(vd->vd_bdev,
vdev_bdev_mode(spa_mode(v->vdev_spa)));
kmem_free(vd, sizeof (vdev_disk_t));
v->vdev_tsd = NULL;
}
static dio_request_t *
vdev_disk_dio_alloc(int bio_count)
{
dio_request_t *dr;
int i;
dr = kmem_zalloc(sizeof (dio_request_t) +
sizeof (struct bio *) * bio_count, KM_SLEEP);
if (dr) {
atomic_set(&dr->dr_ref, 0);
dr->dr_bio_count = bio_count;
dr->dr_error = 0;
for (i = 0; i < dr->dr_bio_count; i++)
dr->dr_bio[i] = NULL;
}
return (dr);
}
static void
vdev_disk_dio_free(dio_request_t *dr)
{
int i;
for (i = 0; i < dr->dr_bio_count; i++)
if (dr->dr_bio[i])
bio_put(dr->dr_bio[i]);
kmem_free(dr, sizeof (dio_request_t) +
sizeof (struct bio *) * dr->dr_bio_count);
}
static void
vdev_disk_dio_get(dio_request_t *dr)
{
atomic_inc(&dr->dr_ref);
}
static int
vdev_disk_dio_put(dio_request_t *dr)
{
int rc = atomic_dec_return(&dr->dr_ref);
/*
* Free the dio_request when the last reference is dropped and
* ensure zio_interpret is called only once with the correct zio
*/
if (rc == 0) {
zio_t *zio = dr->dr_zio;
int error = dr->dr_error;
vdev_disk_dio_free(dr);
if (zio) {
zio->io_error = error;
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_delay_interrupt(zio);
}
}
return (rc);
}
BIO_END_IO_PROTO(vdev_disk_physio_completion, bio, error)
{
dio_request_t *dr = bio->bi_private;
int rc;
if (dr->dr_error == 0) {
#ifdef HAVE_1ARG_BIO_END_IO_T
dr->dr_error = BIO_END_IO_ERROR(bio);
#else
if (error)
dr->dr_error = -(error);
else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
dr->dr_error = EIO;
#endif
}
/* Drop reference acquired by __vdev_disk_physio */
rc = vdev_disk_dio_put(dr);
}
static unsigned int
bio_map(struct bio *bio, void *bio_ptr, unsigned int bio_size)
{
unsigned int offset, size, i;
struct page *page;
offset = offset_in_page(bio_ptr);
for (i = 0; i < bio->bi_max_vecs; i++) {
size = PAGE_SIZE - offset;
if (bio_size <= 0)
break;
if (size > bio_size)
size = bio_size;
if (is_vmalloc_addr(bio_ptr))
page = vmalloc_to_page(bio_ptr);
else
page = virt_to_page(bio_ptr);
/*
* Some network related block device uses tcp_sendpage, which
* doesn't behave well when using 0-count page, this is a
* safety net to catch them.
*/
ASSERT3S(page_count(page), >, 0);
if (bio_add_page(bio, page, size, offset) != size)
break;
bio_ptr += size;
bio_size -= size;
offset = 0;
}
return (bio_size);
}
static unsigned int
bio_map_abd_off(struct bio *bio, abd_t *abd, unsigned int size, size_t off)
{
if (abd_is_linear(abd))
return (bio_map(bio, ((char *)abd_to_buf(abd)) + off, size));
return (abd_scatter_bio_map_off(bio, abd, size, off));
}
static inline void
vdev_submit_bio_impl(struct bio *bio)
{
#ifdef HAVE_1ARG_SUBMIT_BIO
submit_bio(bio);
#else
submit_bio(0, bio);
#endif
}
#ifndef HAVE_BIO_SET_DEV
static inline void
bio_set_dev(struct bio *bio, struct block_device *bdev)
{
bio->bi_bdev = bdev;
}
#endif /* !HAVE_BIO_SET_DEV */
static inline void
vdev_submit_bio(struct bio *bio)
{
#ifdef HAVE_CURRENT_BIO_TAIL
struct bio **bio_tail = current->bio_tail;
current->bio_tail = NULL;
vdev_submit_bio_impl(bio);
current->bio_tail = bio_tail;
#else
struct bio_list *bio_list = current->bio_list;
current->bio_list = NULL;
vdev_submit_bio_impl(bio);
current->bio_list = bio_list;
#endif
}
static int
__vdev_disk_physio(struct block_device *bdev, zio_t *zio,
size_t io_size, uint64_t io_offset, int rw, int flags)
{
dio_request_t *dr;
uint64_t abd_offset;
uint64_t bio_offset;
int bio_size, bio_count = 16;
int i = 0, error = 0;
#if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
struct blk_plug plug;
#endif
ASSERT(zio != NULL);
ASSERT3U(io_offset + io_size, <=, bdev->bd_inode->i_size);
retry:
dr = vdev_disk_dio_alloc(bio_count);
if (dr == NULL)
return (SET_ERROR(ENOMEM));
if (zio && !(zio->io_flags & (ZIO_FLAG_IO_RETRY | ZIO_FLAG_TRYHARD)))
bio_set_flags_failfast(bdev, &flags);
dr->dr_zio = zio;
/*
* When the IO size exceeds the maximum bio size for the request
* queue we are forced to break the IO in multiple bio's and wait
* for them all to complete. Ideally, all pool users will set
* their volume block size to match the maximum request size and
* the common case will be one bio per vdev IO request.
*/
abd_offset = 0;
bio_offset = io_offset;
bio_size = io_size;
for (i = 0; i <= dr->dr_bio_count; i++) {
/* Finished constructing bio's for given buffer */
if (bio_size <= 0)
break;
/*
* By default only 'bio_count' bio's per dio are allowed.
* However, if we find ourselves in a situation where more
* are needed we allocate a larger dio and warn the user.
*/
if (dr->dr_bio_count == i) {
vdev_disk_dio_free(dr);
bio_count *= 2;
goto retry;
}
/* bio_alloc() with __GFP_WAIT never returns NULL */
dr->dr_bio[i] = bio_alloc(GFP_NOIO,
MIN(abd_nr_pages_off(zio->io_abd, bio_size, abd_offset),
BIO_MAX_PAGES));
if (unlikely(dr->dr_bio[i] == NULL)) {
vdev_disk_dio_free(dr);
return (SET_ERROR(ENOMEM));
}
/* Matching put called by vdev_disk_physio_completion */
vdev_disk_dio_get(dr);
bio_set_dev(dr->dr_bio[i], bdev);
BIO_BI_SECTOR(dr->dr_bio[i]) = bio_offset >> 9;
dr->dr_bio[i]->bi_end_io = vdev_disk_physio_completion;
dr->dr_bio[i]->bi_private = dr;
bio_set_op_attrs(dr->dr_bio[i], rw, flags);
/* Remaining size is returned to become the new size */
bio_size = bio_map_abd_off(dr->dr_bio[i], zio->io_abd,
bio_size, abd_offset);
/* Advance in buffer and construct another bio if needed */
abd_offset += BIO_BI_SIZE(dr->dr_bio[i]);
bio_offset += BIO_BI_SIZE(dr->dr_bio[i]);
}
/* Extra reference to protect dio_request during vdev_submit_bio */
vdev_disk_dio_get(dr);
#if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
if (dr->dr_bio_count > 1)
blk_start_plug(&plug);
#endif
/* Submit all bio's associated with this dio */
for (i = 0; i < dr->dr_bio_count; i++)
if (dr->dr_bio[i])
vdev_submit_bio(dr->dr_bio[i]);
#if defined(HAVE_BLK_QUEUE_HAVE_BLK_PLUG)
if (dr->dr_bio_count > 1)
blk_finish_plug(&plug);
#endif
(void) vdev_disk_dio_put(dr);
return (error);
}
BIO_END_IO_PROTO(vdev_disk_io_flush_completion, bio, error)
{
zio_t *zio = bio->bi_private;
#ifdef HAVE_1ARG_BIO_END_IO_T
zio->io_error = BIO_END_IO_ERROR(bio);
#else
zio->io_error = -error;
#endif
if (zio->io_error && (zio->io_error == EOPNOTSUPP))
zio->io_vd->vdev_nowritecache = B_TRUE;
bio_put(bio);
ASSERT3S(zio->io_error, >=, 0);
if (zio->io_error)
vdev_disk_error(zio);
zio_interrupt(zio);
}
static int
vdev_disk_io_flush(struct block_device *bdev, zio_t *zio)
{
struct request_queue *q;
struct bio *bio;
q = bdev_get_queue(bdev);
if (!q)
return (SET_ERROR(ENXIO));
bio = bio_alloc(GFP_NOIO, 0);
/* bio_alloc() with __GFP_WAIT never returns NULL */
if (unlikely(bio == NULL))
return (SET_ERROR(ENOMEM));
bio->bi_end_io = vdev_disk_io_flush_completion;
bio->bi_private = zio;
bio_set_dev(bio, bdev);
bio_set_flush(bio);
vdev_submit_bio(bio);
invalidate_bdev(bdev);
return (0);
}
static void
vdev_disk_io_start(zio_t *zio)
{
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
int rw, flags, error;
switch (zio->io_type) {
case ZIO_TYPE_IOCTL:
if (!vdev_readable(v)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush)
break;
if (v->vdev_nowritecache) {
zio->io_error = SET_ERROR(ENOTSUP);
break;
}
error = vdev_disk_io_flush(vd->vd_bdev, zio);
if (error == 0)
return;
zio->io_error = error;
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
zio_execute(zio);
return;
case ZIO_TYPE_WRITE:
rw = WRITE;
#if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG)
flags = (1 << BIO_RW_UNPLUG);
#elif defined(REQ_UNPLUG)
flags = REQ_UNPLUG;
#else
flags = 0;
#endif
break;
case ZIO_TYPE_READ:
rw = READ;
#if defined(HAVE_BLK_QUEUE_HAVE_BIO_RW_UNPLUG)
flags = (1 << BIO_RW_UNPLUG);
#elif defined(REQ_UNPLUG)
flags = REQ_UNPLUG;
#else
flags = 0;
#endif
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
zio_interrupt(zio);
return;
}
zio->io_target_timestamp = zio_handle_io_delay(zio);
error = __vdev_disk_physio(vd->vd_bdev, zio,
zio->io_size, zio->io_offset, rw, flags);
if (error) {
zio->io_error = error;
zio_interrupt(zio);
return;
}
}
static void
vdev_disk_io_done(zio_t *zio)
{
/*
* If the device returned EIO, we revalidate the media. If it is
* determined the media has changed this triggers the asynchronous
* removal of the device from the configuration.
*/
if (zio->io_error == EIO) {
vdev_t *v = zio->io_vd;
vdev_disk_t *vd = v->vdev_tsd;
if (check_disk_change(vd->vd_bdev)) {
vdev_bdev_invalidate(vd->vd_bdev);
v->vdev_remove_wanted = B_TRUE;
spa_async_request(zio->io_spa, SPA_ASYNC_REMOVE);
}
}
}
static void
vdev_disk_hold(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* We must have a pathname, and it must be absolute. */
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/')
return;
/*
* Only prefetch path and devid info if the device has
* never been opened.
*/
if (vd->vdev_tsd != NULL)
return;
/* XXX: Implement me as a vnode lookup for the device */
vd->vdev_name_vp = NULL;
vd->vdev_devid_vp = NULL;
}
static void
vdev_disk_rele(vdev_t *vd)
{
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_WRITER));
/* XXX: Implement me as a vnode rele for the device */
}
static int
param_set_vdev_scheduler(const char *val, zfs_kernel_param_t *kp)
{
spa_t *spa = NULL;
char *p;
if (val == NULL)
return (SET_ERROR(-EINVAL));
if ((p = strchr(val, '\n')) != NULL)
*p = '\0';
mutex_enter(&spa_namespace_lock);
while ((spa = spa_next(spa)) != NULL) {
if (spa_state(spa) != POOL_STATE_ACTIVE ||
!spa_writeable(spa) || spa_suspended(spa))
continue;
spa_open_ref(spa, FTAG);
mutex_exit(&spa_namespace_lock);
vdev_elevator_switch(spa->spa_root_vdev, (char *)val);
mutex_enter(&spa_namespace_lock);
spa_close(spa, FTAG);
}
mutex_exit(&spa_namespace_lock);
return (param_set_charp(val, kp));
}
vdev_ops_t vdev_disk_ops = {
vdev_disk_open,
vdev_disk_close,
vdev_default_asize,
vdev_disk_io_start,
vdev_disk_io_done,
NULL,
NULL,
vdev_disk_hold,
vdev_disk_rele,
+ NULL,
VDEV_TYPE_DISK, /* name of this vdev type */
B_TRUE /* leaf vdev */
};
module_param_call(zfs_vdev_scheduler, param_set_vdev_scheduler,
param_get_charp, &zfs_vdev_scheduler, 0644);
MODULE_PARM_DESC(zfs_vdev_scheduler, "I/O scheduler");
diff --git a/module/zfs/vdev_file.c b/module/zfs/vdev_file.c
index 13c32e0836f5..bd7e0bc2e392 100644
--- a/module/zfs/vdev_file.c
+++ b/module/zfs/vdev_file.c
@@ -1,294 +1,296 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev_file.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/abd.h>
/*
* Virtual device vector for files.
*/
static taskq_t *vdev_file_taskq;
static void
vdev_file_hold(vdev_t *vd)
{
ASSERT(vd->vdev_path != NULL);
}
static void
vdev_file_rele(vdev_t *vd)
{
ASSERT(vd->vdev_path != NULL);
}
static int
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *ashift)
{
vdev_file_t *vf;
vnode_t *vp;
vattr_t vattr;
int error;
/* Rotational optimizations only make sense on block devices */
vd->vdev_nonrot = B_TRUE;
/*
* We must have a pathname, and it must be absolute.
*/
if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
/*
* Reopen the device if it's not currently open. Otherwise,
* just update the physical size of the device.
*/
if (vd->vdev_tsd != NULL) {
ASSERT(vd->vdev_reopening);
vf = vd->vdev_tsd;
goto skip_open;
}
vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP);
/*
* We always open the files from the root of the global zone, even if
* we're in a local zone. If the user has gotten to this point, the
* administrator has already decided that the pool should be available
* to local zone users, so the underlying devices should be as well.
*/
ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/');
error = vn_openat(vd->vdev_path + 1, UIO_SYSSPACE,
spa_mode(vd->vdev_spa) | FOFFMAX, 0, &vp, 0, 0, rootdir, -1);
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (error);
}
vf->vf_vnode = vp;
#ifdef _KERNEL
/*
* Make sure it's a regular file.
*/
if (vp->v_type != VREG) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (SET_ERROR(ENODEV));
}
#endif
skip_open:
/*
* Determine the physical size of the file.
*/
vattr.va_mask = AT_SIZE;
error = VOP_GETATTR(vf->vf_vnode, &vattr, 0, kcred, NULL);
if (error) {
vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
return (error);
}
*max_psize = *psize = vattr.va_size;
*ashift = SPA_MINBLOCKSHIFT;
return (0);
}
static void
vdev_file_close(vdev_t *vd)
{
vdev_file_t *vf = vd->vdev_tsd;
if (vd->vdev_reopening || vf == NULL)
return;
if (vf->vf_vnode != NULL) {
(void) VOP_PUTPAGE(vf->vf_vnode, 0, 0, B_INVAL, kcred, NULL);
(void) VOP_CLOSE(vf->vf_vnode, spa_mode(vd->vdev_spa), 1, 0,
kcred, NULL);
}
vd->vdev_delayed_close = B_FALSE;
kmem_free(vf, sizeof (vdev_file_t));
vd->vdev_tsd = NULL;
}
static void
vdev_file_io_strategy(void *arg)
{
zio_t *zio = (zio_t *)arg;
vdev_t *vd = zio->io_vd;
vdev_file_t *vf = vd->vdev_tsd;
ssize_t resid;
void *buf;
if (zio->io_type == ZIO_TYPE_READ)
buf = abd_borrow_buf(zio->io_abd, zio->io_size);
else
buf = abd_borrow_buf_copy(zio->io_abd, zio->io_size);
zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ?
UIO_READ : UIO_WRITE, vf->vf_vnode, buf, zio->io_size,
zio->io_offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid);
if (zio->io_type == ZIO_TYPE_READ)
abd_return_buf_copy(zio->io_abd, buf, zio->io_size);
else
abd_return_buf(zio->io_abd, buf, zio->io_size);
if (resid != 0 && zio->io_error == 0)
zio->io_error = SET_ERROR(ENOSPC);
zio_delay_interrupt(zio);
}
static void
vdev_file_io_fsync(void *arg)
{
zio_t *zio = (zio_t *)arg;
vdev_file_t *vf = zio->io_vd->vdev_tsd;
zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, kcred, NULL);
zio_interrupt(zio);
}
static void
vdev_file_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_file_t *vf = vd->vdev_tsd;
if (zio->io_type == ZIO_TYPE_IOCTL) {
/* XXPOLICY */
if (!vdev_readable(vd)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return;
}
switch (zio->io_cmd) {
case DKIOCFLUSHWRITECACHE:
if (zfs_nocacheflush)
break;
/*
* We cannot safely call vfs_fsync() when PF_FSTRANS
* is set in the current context. Filesystems like
* XFS include sanity checks to verify it is not
* already set, see xfs_vm_writepage(). Therefore
* the sync must be dispatched to a different context.
*/
if (__spl_pf_fstrans_check()) {
VERIFY3U(taskq_dispatch(vdev_file_taskq,
vdev_file_io_fsync, zio, TQ_SLEEP), !=,
TASKQID_INVALID);
return;
}
zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC,
kcred, NULL);
break;
default:
zio->io_error = SET_ERROR(ENOTSUP);
}
zio_execute(zio);
return;
}
zio->io_target_timestamp = zio_handle_io_delay(zio);
VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio,
TQ_SLEEP), !=, TASKQID_INVALID);
}
/* ARGSUSED */
static void
vdev_file_io_done(zio_t *zio)
{
}
vdev_ops_t vdev_file_ops = {
vdev_file_open,
vdev_file_close,
vdev_default_asize,
vdev_file_io_start,
vdev_file_io_done,
NULL,
NULL,
vdev_file_hold,
vdev_file_rele,
+ NULL,
VDEV_TYPE_FILE, /* name of this vdev type */
B_TRUE /* leaf vdev */
};
void
vdev_file_init(void)
{
vdev_file_taskq = taskq_create("z_vdev_file", MAX(boot_ncpus, 16),
minclsyspri, boot_ncpus, INT_MAX, TASKQ_DYNAMIC);
VERIFY(vdev_file_taskq);
}
void
vdev_file_fini(void)
{
taskq_destroy(vdev_file_taskq);
}
/*
* From userland we access disks just like files.
*/
#ifndef _KERNEL
vdev_ops_t vdev_disk_ops = {
vdev_file_open,
vdev_file_close,
vdev_default_asize,
vdev_file_io_start,
vdev_file_io_done,
NULL,
NULL,
vdev_file_hold,
vdev_file_rele,
+ NULL,
VDEV_TYPE_DISK, /* name of this vdev type */
B_TRUE /* leaf vdev */
};
#endif
diff --git a/module/zfs/vdev_indirect.c b/module/zfs/vdev_indirect.c
new file mode 100644
index 000000000000..86a05daa87a3
--- /dev/null
+++ b/module/zfs/vdev_indirect.c
@@ -0,0 +1,1064 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2014, 2015 by Delphix. All rights reserved.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/spa.h>
+#include <sys/spa_impl.h>
+#include <sys/vdev_impl.h>
+#include <sys/fs/zfs.h>
+#include <sys/zio.h>
+#include <sys/metaslab.h>
+#include <sys/refcount.h>
+#include <sys/dmu.h>
+#include <sys/vdev_indirect_mapping.h>
+#include <sys/dmu_tx.h>
+#include <sys/dsl_synctask.h>
+#include <sys/zap.h>
+
+/*
+ * An indirect vdev corresponds to a vdev that has been removed. Since
+ * we cannot rewrite block pointers of snapshots, etc., we keep a
+ * mapping from old location on the removed device to the new location
+ * on another device in the pool and use this mapping whenever we need
+ * to access the DVA. Unfortunately, this mapping did not respect
+ * logical block boundaries when it was first created, and so a DVA on
+ * this indirect vdev may be "split" into multiple sections that each
+ * map to a different location. As a consequence, not all DVAs can be
+ * translated to an equivalent new DVA. Instead we must provide a
+ * "vdev_remap" operation that executes a callback on each contiguous
+ * segment of the new location. This function is used in multiple ways:
+ *
+ * - reads and repair writes to this device use the callback to create
+ * a child io for each mapped segment.
+ *
+ * - frees and claims to this device use the callback to free or claim
+ * each mapped segment. (Note that we don't actually need to claim
+ * log blocks on indirect vdevs, because we don't allocate to
+ * removing vdevs. However, zdb uses zio_claim() for its leak
+ * detection.)
+ */
+
+/*
+ * "Big theory statement" for how we mark blocks obsolete.
+ *
+ * When a block on an indirect vdev is freed or remapped, a section of
+ * that vdev's mapping may no longer be referenced (aka "obsolete"). We
+ * keep track of how much of each mapping entry is obsolete. When
+ * an entry becomes completely obsolete, we can remove it, thus reducing
+ * the memory used by the mapping. The complete picture of obsolescence
+ * is given by the following data structures, described below:
+ * - the entry-specific obsolete count
+ * - the vdev-specific obsolete spacemap
+ * - the pool-specific obsolete bpobj
+ *
+ * == On disk data structures used ==
+ *
+ * We track the obsolete space for the pool using several objects. Each
+ * of these objects is created on demand and freed when no longer
+ * needed, and is assumed to be empty if it does not exist.
+ * SPA_FEATURE_OBSOLETE_COUNTS includes the count of these objects.
+ *
+ * - Each vic_mapping_object (associated with an indirect vdev) can
+ * have a vimp_counts_object. This is an array of uint32_t's
+ * with the same number of entries as the vic_mapping_object. When
+ * the mapping is condensed, entries from the vic_obsolete_sm_object
+ * (see below) are folded into the counts. Therefore, each
+ * obsolete_counts entry tells us the number of bytes in the
+ * corresponding mapping entry that were not referenced when the
+ * mapping was last condensed.
+ *
+ * - Each indirect or removing vdev can have a vic_obsolete_sm_object.
+ * This is a space map containing an alloc entry for every DVA that
+ * has been obsoleted since the last time this indirect vdev was
+ * condensed. We use this object in order to improve performance
+ * when marking a DVA as obsolete. Instead of modifying an arbitrary
+ * offset of the vimp_counts_object, we only need to append an entry
+ * to the end of this object. When a DVA becomes obsolete, it is
+ * added to the obsolete space map. This happens when the DVA is
+ * freed, remapped and not referenced by a snapshot, or the last
+ * snapshot referencing it is destroyed.
+ *
+ * - Each dataset can have a ds_remap_deadlist object. This is a
+ * deadlist object containing all blocks that were remapped in this
+ * dataset but referenced in a previous snapshot. Blocks can *only*
+ * appear on this list if they were remapped (dsl_dataset_block_remapped);
+ * blocks that were killed in a head dataset are put on the normal
+ * ds_deadlist and marked obsolete when they are freed.
+ *
+ * - The pool can have a dp_obsolete_bpobj. This is a list of blocks
+ * in the pool that need to be marked obsolete. When a snapshot is
+ * destroyed, we move some of the ds_remap_deadlist to the obsolete
+ * bpobj (see dsl_destroy_snapshot_handle_remaps()). We then
+ * asynchronously process the obsolete bpobj, moving its entries to
+ * the specific vdevs' obsolete space maps.
+ *
+ * == Summary of how we mark blocks as obsolete ==
+ *
+ * - When freeing a block: if any DVA is on an indirect vdev, append to
+ * vic_obsolete_sm_object.
+ * - When remapping a block, add dva to ds_remap_deadlist (if prev snap
+ * references; otherwise append to vic_obsolete_sm_object).
+ * - When freeing a snapshot: move parts of ds_remap_deadlist to
+ * dp_obsolete_bpobj (same algorithm as ds_deadlist).
+ * - When syncing the spa: process dp_obsolete_bpobj, moving ranges to
+ * individual vdev's vic_obsolete_sm_object.
+ */
+
+/*
+ * "Big theory statement" for how we condense indirect vdevs.
+ *
+ * Condensing an indirect vdev's mapping is the process of determining
+ * the precise counts of obsolete space for each mapping entry (by
+ * integrating the obsolete spacemap into the obsolete counts) and
+ * writing out a new mapping that contains only referenced entries.
+ *
+ * We condense a vdev when we expect the mapping to shrink (see
+ * vdev_indirect_should_condense()), but only perform one condense at a
+ * time to limit the memory usage. In addition, we use a separate
+ * open-context thread (spa_condense_indirect_thread) to incrementally
+ * create the new mapping object in a way that minimizes the impact on
+ * the rest of the system.
+ *
+ * == Generating a new mapping ==
+ *
+ * To generate a new mapping, we follow these steps:
+ *
+ * 1. Save the old obsolete space map and create a new mapping object
+ * (see spa_condense_indirect_start_sync()). This initializes the
+ * spa_condensing_indirect_phys with the "previous obsolete space map",
+ * which is now read only. Newly obsolete DVAs will be added to a
+ * new (initially empty) obsolete space map, and will not be
+ * considered as part of this condense operation.
+ *
+ * 2. Construct in memory the precise counts of obsolete space for each
+ * mapping entry, by incorporating the obsolete space map into the
+ * counts. (See vdev_indirect_mapping_load_obsolete_{counts,spacemap}().)
+ *
+ * 3. Iterate through each mapping entry, writing to the new mapping any
+ * entries that are not completely obsolete (i.e. which don't have
+ * obsolete count == mapping length). (See
+ * spa_condense_indirect_generate_new_mapping().)
+ *
+ * 4. Destroy the old mapping object and switch over to the new one
+ * (spa_condense_indirect_complete_sync).
+ *
+ * == Restarting from failure ==
+ *
+ * To restart the condense when we import/open the pool, we must start
+ * at the 2nd step above: reconstruct the precise counts in memory,
+ * based on the space map + counts. Then in the 3rd step, we start
+ * iterating where we left off: at vimp_max_offset of the new mapping
+ * object.
+ */
+
+boolean_t zfs_condense_indirect_vdevs_enable = B_TRUE;
+
+/*
+ * Condense if at least this percent of the bytes in the mapping is
+ * obsolete. With the default of 25%, the amount of space mapped
+ * will be reduced to 1% of its original size after at most 16
+ * condenses. Higher values will condense less often (causing less
+ * i/o); lower values will reduce the mapping size more quickly.
+ */
+int zfs_indirect_condense_obsolete_pct = 25;
+
+/*
+ * Condense if the obsolete space map takes up more than this amount of
+ * space on disk (logically). This limits the amount of disk space
+ * consumed by the obsolete space map; the default of 1GB is small enough
+ * that we typically don't mind "wasting" it.
+ */
+uint64_t zfs_condense_max_obsolete_bytes = 1024 * 1024 * 1024;
+
+/*
+ * Don't bother condensing if the mapping uses less than this amount of
+ * memory. The default of 128KB is considered a "trivial" amount of
+ * memory and not worth reducing.
+ */
+unsigned long zfs_condense_min_mapping_bytes = 128 * 1024;
+
+/*
+ * This is used by the test suite so that it can ensure that certain
+ * actions happen while in the middle of a condense (which might otherwise
+ * complete too quickly). If used to reduce the performance impact of
+ * condensing in production, a maximum value of 1 should be sufficient.
+ */
+int zfs_condense_indirect_commit_entry_delay_ms = 0;
+
+/*
+ * Mark the given offset and size as being obsolete in the given txg.
+ */
+void
+vdev_indirect_mark_obsolete(vdev_t *vd, uint64_t offset, uint64_t size,
+ uint64_t txg)
+{
+ spa_t *spa = vd->vdev_spa;
+ ASSERT3U(spa_syncing_txg(spa), ==, txg);
+ ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, !=, 0);
+ ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
+ ASSERT(size > 0);
+ VERIFY(vdev_indirect_mapping_entry_for_offset(
+ vd->vdev_indirect_mapping, offset) != NULL);
+
+ if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
+ mutex_enter(&vd->vdev_obsolete_lock);
+ range_tree_add(vd->vdev_obsolete_segments, offset, size);
+ mutex_exit(&vd->vdev_obsolete_lock);
+ vdev_dirty(vd, 0, NULL, txg);
+ }
+}
+
+/*
+ * Mark the DVA vdev_id:offset:size as being obsolete in the given tx. This
+ * wrapper is provided because the DMU does not know about vdev_t's and
+ * cannot directly call vdev_indirect_mark_obsolete.
+ */
+void
+spa_vdev_indirect_mark_obsolete(spa_t *spa, uint64_t vdev_id, uint64_t offset,
+ uint64_t size, dmu_tx_t *tx)
+{
+ vdev_t *vd = vdev_lookup_top(spa, vdev_id);
+ ASSERT(dmu_tx_is_syncing(tx));
+
+ /* The DMU can only remap indirect vdevs. */
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+ vdev_indirect_mark_obsolete(vd, offset, size, dmu_tx_get_txg(tx));
+}
+
+static spa_condensing_indirect_t *
+spa_condensing_indirect_create(spa_t *spa)
+{
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+ spa_condensing_indirect_t *sci = kmem_zalloc(sizeof (*sci), KM_SLEEP);
+ objset_t *mos = spa->spa_meta_objset;
+
+ for (int i = 0; i < TXG_SIZE; i++) {
+ list_create(&sci->sci_new_mapping_entries[i],
+ sizeof (vdev_indirect_mapping_entry_t),
+ offsetof(vdev_indirect_mapping_entry_t, vime_node));
+ }
+
+ sci->sci_new_mapping =
+ vdev_indirect_mapping_open(mos, scip->scip_next_mapping_object);
+
+ return (sci);
+}
+
+static void
+spa_condensing_indirect_destroy(spa_condensing_indirect_t *sci)
+{
+ for (int i = 0; i < TXG_SIZE; i++)
+ list_destroy(&sci->sci_new_mapping_entries[i]);
+
+ if (sci->sci_new_mapping != NULL)
+ vdev_indirect_mapping_close(sci->sci_new_mapping);
+
+ kmem_free(sci, sizeof (*sci));
+}
+
+boolean_t
+vdev_indirect_should_condense(vdev_t *vd)
+{
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ spa_t *spa = vd->vdev_spa;
+
+ ASSERT(dsl_pool_sync_context(spa->spa_dsl_pool));
+
+ if (!zfs_condense_indirect_vdevs_enable)
+ return (B_FALSE);
+
+ /*
+ * We can only condense one indirect vdev at a time.
+ */
+ if (spa->spa_condensing_indirect != NULL)
+ return (B_FALSE);
+
+ if (spa_shutting_down(spa))
+ return (B_FALSE);
+
+ /*
+ * The mapping object size must not change while we are
+ * condensing, so we can only condense indirect vdevs
+ * (not vdevs that are still in the middle of being removed).
+ */
+ if (vd->vdev_ops != &vdev_indirect_ops)
+ return (B_FALSE);
+
+ /*
+ * If nothing new has been marked obsolete, there is no
+ * point in condensing.
+ */
+ if (vd->vdev_obsolete_sm == NULL) {
+ ASSERT0(vdev_obsolete_sm_object(vd));
+ return (B_FALSE);
+ }
+
+ ASSERT(vd->vdev_obsolete_sm != NULL);
+
+ ASSERT3U(vdev_obsolete_sm_object(vd), ==,
+ space_map_object(vd->vdev_obsolete_sm));
+
+ uint64_t bytes_mapped = vdev_indirect_mapping_bytes_mapped(vim);
+ uint64_t bytes_obsolete = space_map_allocated(vd->vdev_obsolete_sm);
+ uint64_t mapping_size = vdev_indirect_mapping_size(vim);
+ uint64_t obsolete_sm_size = space_map_length(vd->vdev_obsolete_sm);
+
+ ASSERT3U(bytes_obsolete, <=, bytes_mapped);
+
+ /*
+ * If a high percentage of the bytes that are mapped have become
+ * obsolete, condense (unless the mapping is already small enough).
+ * This has a good chance of reducing the amount of memory used
+ * by the mapping.
+ */
+ if (bytes_obsolete * 100 / bytes_mapped >=
+ zfs_indirect_condense_obsolete_pct &&
+ mapping_size > zfs_condense_min_mapping_bytes) {
+ zfs_dbgmsg("should condense vdev %llu because obsolete "
+ "spacemap covers %d%% of %lluMB mapping",
+ (u_longlong_t)vd->vdev_id,
+ (int)(bytes_obsolete * 100 / bytes_mapped),
+ (u_longlong_t)bytes_mapped / 1024 / 1024);
+ return (B_TRUE);
+ }
+
+ /*
+ * If the obsolete space map takes up too much space on disk,
+ * condense in order to free up this disk space.
+ */
+ if (obsolete_sm_size >= zfs_condense_max_obsolete_bytes) {
+ zfs_dbgmsg("should condense vdev %llu because obsolete sm "
+ "length %lluMB >= max size %lluMB",
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)obsolete_sm_size / 1024 / 1024,
+ (u_longlong_t)zfs_condense_max_obsolete_bytes /
+ 1024 / 1024);
+ return (B_TRUE);
+ }
+
+ return (B_FALSE);
+}
+
+/*
+ * This sync task completes (finishes) a condense, deleting the old
+ * mapping and replacing it with the new one.
+ */
+static void
+spa_condense_indirect_complete_sync(void *arg, dmu_tx_t *tx)
+{
+ spa_condensing_indirect_t *sci = arg;
+ spa_t *spa = dmu_tx_pool(tx)->dp_spa;
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+ vdev_t *vd = vdev_lookup_top(spa, scip->scip_vdev);
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+ objset_t *mos = spa->spa_meta_objset;
+ vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
+ uint64_t old_count = vdev_indirect_mapping_num_entries(old_mapping);
+ uint64_t new_count =
+ vdev_indirect_mapping_num_entries(sci->sci_new_mapping);
+
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+ ASSERT3P(sci, ==, spa->spa_condensing_indirect);
+ for (int i = 0; i < TXG_SIZE; i++) {
+ ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
+ }
+ ASSERT(vic->vic_mapping_object != 0);
+ ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
+ ASSERT(scip->scip_next_mapping_object != 0);
+ ASSERT(scip->scip_prev_obsolete_sm_object != 0);
+
+ /*
+ * Reset vdev_indirect_mapping to refer to the new object.
+ */
+ rw_enter(&vd->vdev_indirect_rwlock, RW_WRITER);
+ vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
+ vd->vdev_indirect_mapping = sci->sci_new_mapping;
+ rw_exit(&vd->vdev_indirect_rwlock);
+
+ sci->sci_new_mapping = NULL;
+ vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
+ vic->vic_mapping_object = scip->scip_next_mapping_object;
+ scip->scip_next_mapping_object = 0;
+
+ space_map_free_obj(mos, scip->scip_prev_obsolete_sm_object, tx);
+ spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ scip->scip_prev_obsolete_sm_object = 0;
+
+ scip->scip_vdev = 0;
+
+ VERIFY0(zap_remove(mos, DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_CONDENSING_INDIRECT, tx));
+ spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
+ spa->spa_condensing_indirect = NULL;
+
+ zfs_dbgmsg("finished condense of vdev %llu in txg %llu: "
+ "new mapping object %llu has %llu entries "
+ "(was %llu entries)",
+ vd->vdev_id, dmu_tx_get_txg(tx), vic->vic_mapping_object,
+ new_count, old_count);
+
+ vdev_config_dirty(spa->spa_root_vdev);
+}
+
+/*
+ * This sync task appends entries to the new mapping object.
+ */
+static void
+spa_condense_indirect_commit_sync(void *arg, dmu_tx_t *tx)
+{
+ spa_condensing_indirect_t *sci = arg;
+ uint64_t txg = dmu_tx_get_txg(tx);
+ ASSERTV(spa_t *spa = dmu_tx_pool(tx)->dp_spa);
+
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT3P(sci, ==, spa->spa_condensing_indirect);
+
+ vdev_indirect_mapping_add_entries(sci->sci_new_mapping,
+ &sci->sci_new_mapping_entries[txg & TXG_MASK], tx);
+ ASSERT(list_is_empty(&sci->sci_new_mapping_entries[txg & TXG_MASK]));
+}
+
+/*
+ * Open-context function to add one entry to the new mapping. The new
+ * entry will be remembered and written from syncing context.
+ */
+static void
+spa_condense_indirect_commit_entry(spa_t *spa,
+ vdev_indirect_mapping_entry_phys_t *vimep, uint32_t count)
+{
+ spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
+
+ ASSERT3U(count, <, DVA_GET_ASIZE(&vimep->vimep_dst));
+
+ dmu_tx_t *tx = dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
+ dmu_tx_hold_space(tx, sizeof (*vimep) + sizeof (count));
+ VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
+ int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
+
+ /*
+ * If we are the first entry committed this txg, kick off the sync
+ * task to write to the MOS on our behalf.
+ */
+ if (list_is_empty(&sci->sci_new_mapping_entries[txgoff])) {
+ dsl_sync_task_nowait(dmu_tx_pool(tx),
+ spa_condense_indirect_commit_sync, sci,
+ 0, ZFS_SPACE_CHECK_NONE, tx);
+ }
+
+ vdev_indirect_mapping_entry_t *vime =
+ kmem_alloc(sizeof (*vime), KM_SLEEP);
+ vime->vime_mapping = *vimep;
+ vime->vime_obsolete_count = count;
+ list_insert_tail(&sci->sci_new_mapping_entries[txgoff], vime);
+
+ dmu_tx_commit(tx);
+}
+
+static void
+spa_condense_indirect_generate_new_mapping(vdev_t *vd,
+ uint32_t *obsolete_counts, uint64_t start_index)
+{
+ spa_t *spa = vd->vdev_spa;
+ uint64_t mapi = start_index;
+ vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
+ uint64_t old_num_entries =
+ vdev_indirect_mapping_num_entries(old_mapping);
+
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+ ASSERT3U(vd->vdev_id, ==, spa->spa_condensing_indirect_phys.scip_vdev);
+
+ zfs_dbgmsg("starting condense of vdev %llu from index %llu",
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)mapi);
+
+ while (mapi < old_num_entries && !spa_shutting_down(spa)) {
+ vdev_indirect_mapping_entry_phys_t *entry =
+ &old_mapping->vim_entries[mapi];
+ uint64_t entry_size = DVA_GET_ASIZE(&entry->vimep_dst);
+ ASSERT3U(obsolete_counts[mapi], <=, entry_size);
+ if (obsolete_counts[mapi] < entry_size) {
+ spa_condense_indirect_commit_entry(spa, entry,
+ obsolete_counts[mapi]);
+
+ /*
+ * This delay may be requested for testing, debugging,
+ * or performance reasons.
+ */
+ hrtime_t now = gethrtime();
+ hrtime_t sleep_until = now + MSEC2NSEC(
+ zfs_condense_indirect_commit_entry_delay_ms);
+ zfs_sleep_until(sleep_until);
+ }
+
+ mapi++;
+ }
+ if (spa_shutting_down(spa)) {
+ zfs_dbgmsg("pausing condense of vdev %llu at index %llu",
+ (u_longlong_t)vd->vdev_id,
+ (u_longlong_t)mapi);
+ }
+}
+
+static void
+spa_condense_indirect_thread(void *arg)
+{
+ vdev_t *vd = arg;
+ spa_t *spa = vd->vdev_spa;
+ spa_condensing_indirect_t *sci = spa->spa_condensing_indirect;
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+ uint32_t *counts;
+ uint64_t start_index;
+ vdev_indirect_mapping_t *old_mapping = vd->vdev_indirect_mapping;
+ space_map_t *prev_obsolete_sm = NULL;
+
+ ASSERT3U(vd->vdev_id, ==, scip->scip_vdev);
+ ASSERT(scip->scip_next_mapping_object != 0);
+ ASSERT(scip->scip_prev_obsolete_sm_object != 0);
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ for (int i = 0; i < TXG_SIZE; i++) {
+ /*
+ * The list must start out empty in order for the
+ * _commit_sync() sync task to be properly registered
+ * on the first call to _commit_entry(); so it's wise
+ * to double check and ensure we actually are starting
+ * with empty lists.
+ */
+ ASSERT(list_is_empty(&sci->sci_new_mapping_entries[i]));
+ }
+
+ VERIFY0(space_map_open(&prev_obsolete_sm, spa->spa_meta_objset,
+ scip->scip_prev_obsolete_sm_object, 0, vd->vdev_asize, 0));
+ space_map_update(prev_obsolete_sm);
+ counts = vdev_indirect_mapping_load_obsolete_counts(old_mapping);
+ if (prev_obsolete_sm != NULL) {
+ vdev_indirect_mapping_load_obsolete_spacemap(old_mapping,
+ counts, prev_obsolete_sm);
+ }
+ space_map_close(prev_obsolete_sm);
+
+ /*
+ * Generate new mapping. Determine what index to continue from
+ * based on the max offset that we've already written in the
+ * new mapping.
+ */
+ uint64_t max_offset =
+ vdev_indirect_mapping_max_offset(sci->sci_new_mapping);
+ if (max_offset == 0) {
+ /* We haven't written anything to the new mapping yet. */
+ start_index = 0;
+ } else {
+ /*
+ * Pick up from where we left off. _entry_for_offset()
+ * returns a pointer into the vim_entries array. If
+ * max_offset is greater than any of the mappings
+ * contained in the table NULL will be returned and
+ * that indicates we've exhausted our iteration of the
+ * old_mapping.
+ */
+
+ vdev_indirect_mapping_entry_phys_t *entry =
+ vdev_indirect_mapping_entry_for_offset_or_next(old_mapping,
+ max_offset);
+
+ if (entry == NULL) {
+ /*
+ * We've already written the whole new mapping.
+ * This special value will cause us to skip the
+ * generate_new_mapping step and just do the sync
+ * task to complete the condense.
+ */
+ start_index = UINT64_MAX;
+ } else {
+ start_index = entry - old_mapping->vim_entries;
+ ASSERT3U(start_index, <,
+ vdev_indirect_mapping_num_entries(old_mapping));
+ }
+ }
+
+ spa_condense_indirect_generate_new_mapping(vd, counts, start_index);
+
+ vdev_indirect_mapping_free_obsolete_counts(old_mapping, counts);
+
+ /*
+ * We may have bailed early from generate_new_mapping(), if
+ * the spa is shutting down. In this case, do not complete
+ * the condense.
+ */
+ if (!spa_shutting_down(spa)) {
+ VERIFY0(dsl_sync_task(spa_name(spa), NULL,
+ spa_condense_indirect_complete_sync, sci, 0,
+ ZFS_SPACE_CHECK_NONE));
+ }
+
+ mutex_enter(&spa->spa_async_lock);
+ spa->spa_condense_thread = NULL;
+ cv_broadcast(&spa->spa_async_cv);
+ mutex_exit(&spa->spa_async_lock);
+}
+
+/*
+ * Sync task to begin the condensing process.
+ */
+void
+spa_condense_indirect_start_sync(vdev_t *vd, dmu_tx_t *tx)
+{
+ spa_t *spa = vd->vdev_spa;
+ spa_condensing_indirect_phys_t *scip =
+ &spa->spa_condensing_indirect_phys;
+
+ ASSERT0(scip->scip_next_mapping_object);
+ ASSERT0(scip->scip_prev_obsolete_sm_object);
+ ASSERT0(scip->scip_vdev);
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+ ASSERT(spa_feature_is_active(spa, SPA_FEATURE_OBSOLETE_COUNTS));
+ ASSERT(vdev_indirect_mapping_num_entries(vd->vdev_indirect_mapping));
+
+ uint64_t obsolete_sm_obj = vdev_obsolete_sm_object(vd);
+ ASSERT(obsolete_sm_obj != 0);
+
+ scip->scip_vdev = vd->vdev_id;
+ scip->scip_next_mapping_object =
+ vdev_indirect_mapping_alloc(spa->spa_meta_objset, tx);
+
+ scip->scip_prev_obsolete_sm_object = obsolete_sm_obj;
+
+ /*
+ * We don't need to allocate a new space map object, since
+ * vdev_indirect_sync_obsolete will allocate one when needed.
+ */
+ space_map_close(vd->vdev_obsolete_sm);
+ vd->vdev_obsolete_sm = NULL;
+ VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
+
+ VERIFY0(zap_add(spa->spa_dsl_pool->dp_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
+ sizeof (*scip) / sizeof (uint64_t), scip, tx));
+
+ ASSERT3P(spa->spa_condensing_indirect, ==, NULL);
+ spa->spa_condensing_indirect = spa_condensing_indirect_create(spa);
+
+ zfs_dbgmsg("starting condense of vdev %llu in txg %llu: "
+ "posm=%llu nm=%llu",
+ vd->vdev_id, dmu_tx_get_txg(tx),
+ (u_longlong_t)scip->scip_prev_obsolete_sm_object,
+ (u_longlong_t)scip->scip_next_mapping_object);
+
+ ASSERT3P(spa->spa_condense_thread, ==, NULL);
+ spa->spa_condense_thread = thread_create(NULL, 0,
+ spa_condense_indirect_thread, vd, 0, &p0, TS_RUN, minclsyspri);
+}
+
+/*
+ * Sync to the given vdev's obsolete space map any segments that are no longer
+ * referenced as of the given txg.
+ *
+ * If the obsolete space map doesn't exist yet, create and open it.
+ */
+void
+vdev_indirect_sync_obsolete(vdev_t *vd, dmu_tx_t *tx)
+{
+ spa_t *spa = vd->vdev_spa;
+ ASSERTV(vdev_indirect_config_t *vic = &vd->vdev_indirect_config);
+
+ ASSERT3U(vic->vic_mapping_object, !=, 0);
+ ASSERT(range_tree_space(vd->vdev_obsolete_segments) > 0);
+ ASSERT(vd->vdev_removing || vd->vdev_ops == &vdev_indirect_ops);
+ ASSERT(spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS));
+
+ if (vdev_obsolete_sm_object(vd) == 0) {
+ uint64_t obsolete_sm_object =
+ space_map_alloc(spa->spa_meta_objset, tx);
+
+ ASSERT(vd->vdev_top_zap != 0);
+ VERIFY0(zap_add(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM,
+ sizeof (obsolete_sm_object), 1, &obsolete_sm_object, tx));
+ ASSERT3U(vdev_obsolete_sm_object(vd), !=, 0);
+
+ spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ VERIFY0(space_map_open(&vd->vdev_obsolete_sm,
+ spa->spa_meta_objset, obsolete_sm_object,
+ 0, vd->vdev_asize, 0));
+ space_map_update(vd->vdev_obsolete_sm);
+ }
+
+ ASSERT(vd->vdev_obsolete_sm != NULL);
+ ASSERT3U(vdev_obsolete_sm_object(vd), ==,
+ space_map_object(vd->vdev_obsolete_sm));
+
+ space_map_write(vd->vdev_obsolete_sm,
+ vd->vdev_obsolete_segments, SM_ALLOC, tx);
+ space_map_update(vd->vdev_obsolete_sm);
+ range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
+}
+
+int
+spa_condense_init(spa_t *spa)
+{
+ int error = zap_lookup(spa->spa_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_CONDENSING_INDIRECT, sizeof (uint64_t),
+ sizeof (spa->spa_condensing_indirect_phys) / sizeof (uint64_t),
+ &spa->spa_condensing_indirect_phys);
+ if (error == 0) {
+ if (spa_writeable(spa)) {
+ spa->spa_condensing_indirect =
+ spa_condensing_indirect_create(spa);
+ }
+ return (0);
+ } else if (error == ENOENT) {
+ return (0);
+ } else {
+ return (error);
+ }
+}
+
+void
+spa_condense_fini(spa_t *spa)
+{
+ if (spa->spa_condensing_indirect != NULL) {
+ spa_condensing_indirect_destroy(spa->spa_condensing_indirect);
+ spa->spa_condensing_indirect = NULL;
+ }
+}
+
+/*
+ * Restart the condense - called when the pool is opened.
+ */
+void
+spa_condense_indirect_restart(spa_t *spa)
+{
+ vdev_t *vd;
+ ASSERT(spa->spa_condensing_indirect != NULL);
+ spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER);
+ vd = vdev_lookup_top(spa,
+ spa->spa_condensing_indirect_phys.scip_vdev);
+ ASSERT(vd != NULL);
+ spa_config_exit(spa, SCL_VDEV, FTAG);
+
+ ASSERT3P(spa->spa_condense_thread, ==, NULL);
+ spa->spa_condense_thread = thread_create(NULL, 0,
+ spa_condense_indirect_thread, vd, 0, &p0, TS_RUN,
+ minclsyspri);
+}
+
+/*
+ * Gets the obsolete spacemap object from the vdev's ZAP.
+ * Returns the spacemap object, or 0 if it wasn't in the ZAP or the ZAP doesn't
+ * exist yet.
+ */
+int
+vdev_obsolete_sm_object(vdev_t *vd)
+{
+ ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
+ if (vd->vdev_top_zap == 0) {
+ return (0);
+ }
+
+ uint64_t sm_obj = 0;
+ int err;
+ err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, sizeof (sm_obj), 1, &sm_obj);
+
+ ASSERT(err == 0 || err == ENOENT);
+
+ return (sm_obj);
+}
+
+boolean_t
+vdev_obsolete_counts_are_precise(vdev_t *vd)
+{
+ ASSERT0(spa_config_held(vd->vdev_spa, SCL_ALL, RW_WRITER));
+ if (vd->vdev_top_zap == 0) {
+ return (B_FALSE);
+ }
+
+ uint64_t val = 0;
+ int err;
+ err = zap_lookup(vd->vdev_spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (val), 1, &val);
+
+ ASSERT(err == 0 || err == ENOENT);
+
+ return (val != 0);
+}
+
+/* ARGSUSED */
+static void
+vdev_indirect_close(vdev_t *vd)
+{
+}
+
+/* ARGSUSED */
+static void
+vdev_indirect_io_done(zio_t *zio)
+{
+}
+
+/* ARGSUSED */
+static int
+vdev_indirect_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
+ uint64_t *ashift)
+{
+ *psize = *max_psize = vd->vdev_asize +
+ VDEV_LABEL_START_SIZE + VDEV_LABEL_END_SIZE;
+ *ashift = vd->vdev_ashift;
+ return (0);
+}
+
+typedef struct remap_segment {
+ vdev_t *rs_vd;
+ uint64_t rs_offset;
+ uint64_t rs_asize;
+ uint64_t rs_split_offset;
+ list_node_t rs_node;
+} remap_segment_t;
+
+remap_segment_t *
+rs_alloc(vdev_t *vd, uint64_t offset, uint64_t asize, uint64_t split_offset)
+{
+ remap_segment_t *rs = kmem_alloc(sizeof (remap_segment_t), KM_SLEEP);
+ rs->rs_vd = vd;
+ rs->rs_offset = offset;
+ rs->rs_asize = asize;
+ rs->rs_split_offset = split_offset;
+ return (rs);
+}
+
+/*
+ * Goes through the relevant indirect mappings until it hits a concrete vdev
+ * and issues the callback. On the way to the concrete vdev, if any other
+ * indirect vdevs are encountered, then the callback will also be called on
+ * each of those indirect vdevs. For example, if the segment is mapped to
+ * segment A on indirect vdev 1, and then segment A on indirect vdev 1 is
+ * mapped to segment B on concrete vdev 2, then the callback will be called on
+ * both vdev 1 and vdev 2.
+ *
+ * While the callback passed to vdev_indirect_remap() is called on every vdev
+ * the function encounters, certain callbacks only care about concrete vdevs.
+ * These types of callbacks should return immediately and explicitly when they
+ * are called on an indirect vdev.
+ *
+ * Because there is a possibility that a DVA section in the indirect device
+ * has been split into multiple sections in our mapping, we keep track
+ * of the relevant contiguous segments of the new location (remap_segment_t)
+ * in a stack. This way we can call the callback for each of the new sections
+ * created by a single section of the indirect device. Note though, that in
+ * this scenario the callbacks in each split block won't occur in-order in
+ * terms of offset, so callers should not make any assumptions about that.
+ *
+ * For callbacks that don't handle split blocks and immediately return when
+ * they encounter them (as is the case for remap_blkptr_cb), the caller can
+ * assume that its callback will be applied from the first indirect vdev
+ * encountered to the last one and then the concrete vdev, in that order.
+ */
+static void
+vdev_indirect_remap(vdev_t *vd, uint64_t offset, uint64_t asize,
+ void (*func)(uint64_t, vdev_t *, uint64_t, uint64_t, void *), void *arg)
+{
+ list_t stack;
+ spa_t *spa = vd->vdev_spa;
+
+ list_create(&stack, sizeof (remap_segment_t),
+ offsetof(remap_segment_t, rs_node));
+
+ for (remap_segment_t *rs = rs_alloc(vd, offset, asize, 0);
+ rs != NULL; rs = list_remove_head(&stack)) {
+ vdev_t *v = rs->rs_vd;
+
+ /*
+ * Note: this can be called from open context
+ * (eg. zio_read()), so we need the rwlock to prevent
+ * the mapping from being changed by condensing.
+ */
+ rw_enter(&v->vdev_indirect_rwlock, RW_READER);
+ vdev_indirect_mapping_t *vim = v->vdev_indirect_mapping;
+ ASSERT3P(vim, !=, NULL);
+
+ ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
+ ASSERT(rs->rs_asize > 0);
+
+ vdev_indirect_mapping_entry_phys_t *mapping =
+ vdev_indirect_mapping_entry_for_offset(vim, rs->rs_offset);
+ ASSERT3P(mapping, !=, NULL);
+
+ while (rs->rs_asize > 0) {
+ /*
+ * Note: the vdev_indirect_mapping can not change
+ * while we are running. It only changes while the
+ * removal is in progress, and then only from syncing
+ * context. While a removal is in progress, this
+ * function is only called for frees, which also only
+ * happen from syncing context.
+ */
+
+ uint64_t size = DVA_GET_ASIZE(&mapping->vimep_dst);
+ uint64_t dst_offset =
+ DVA_GET_OFFSET(&mapping->vimep_dst);
+ uint64_t dst_vdev = DVA_GET_VDEV(&mapping->vimep_dst);
+
+ ASSERT3U(rs->rs_offset, >=,
+ DVA_MAPPING_GET_SRC_OFFSET(mapping));
+ ASSERT3U(rs->rs_offset, <,
+ DVA_MAPPING_GET_SRC_OFFSET(mapping) + size);
+ ASSERT3U(dst_vdev, !=, v->vdev_id);
+
+ uint64_t inner_offset = rs->rs_offset -
+ DVA_MAPPING_GET_SRC_OFFSET(mapping);
+ uint64_t inner_size =
+ MIN(rs->rs_asize, size - inner_offset);
+
+ vdev_t *dst_v = vdev_lookup_top(spa, dst_vdev);
+ ASSERT3P(dst_v, !=, NULL);
+
+ if (dst_v->vdev_ops == &vdev_indirect_ops) {
+ list_insert_head(&stack,
+ rs_alloc(dst_v, dst_offset + inner_offset,
+ inner_size, rs->rs_split_offset));
+
+ }
+
+ if ((zfs_flags & ZFS_DEBUG_INDIRECT_REMAP) &&
+ IS_P2ALIGNED(inner_size, 2 * SPA_MINBLOCKSIZE)) {
+ /*
+ * Note: This clause exists only solely for
+ * testing purposes. We use it to ensure that
+ * split blocks work and that the callbacks
+ * using them yield the same result if issued
+ * in reverse order.
+ */
+ uint64_t inner_half = inner_size / 2;
+
+ func(rs->rs_split_offset + inner_half, dst_v,
+ dst_offset + inner_offset + inner_half,
+ inner_half, arg);
+
+ func(rs->rs_split_offset, dst_v,
+ dst_offset + inner_offset,
+ inner_half, arg);
+ } else {
+ func(rs->rs_split_offset, dst_v,
+ dst_offset + inner_offset,
+ inner_size, arg);
+ }
+
+ rs->rs_offset += inner_size;
+ rs->rs_asize -= inner_size;
+ rs->rs_split_offset += inner_size;
+ mapping++;
+ }
+
+ rw_exit(&v->vdev_indirect_rwlock);
+ kmem_free(rs, sizeof (remap_segment_t));
+ }
+ list_destroy(&stack);
+}
+
+static void
+vdev_indirect_child_io_done(zio_t *zio)
+{
+ zio_t *pio = zio->io_private;
+
+ mutex_enter(&pio->io_lock);
+ pio->io_error = zio_worst_error(pio->io_error, zio->io_error);
+ mutex_exit(&pio->io_lock);
+
+ abd_put(zio->io_abd);
+}
+
+static void
+vdev_indirect_io_start_cb(uint64_t split_offset, vdev_t *vd, uint64_t offset,
+ uint64_t size, void *arg)
+{
+ zio_t *zio = arg;
+
+ ASSERT3P(vd, !=, NULL);
+
+ if (vd->vdev_ops == &vdev_indirect_ops)
+ return;
+
+ zio_nowait(zio_vdev_child_io(zio, NULL, vd, offset,
+ abd_get_offset(zio->io_abd, split_offset),
+ size, zio->io_type, zio->io_priority,
+ 0, vdev_indirect_child_io_done, zio));
+}
+
+static void
+vdev_indirect_io_start(zio_t *zio)
+{
+ ASSERTV(spa_t *spa = zio->io_spa);
+
+ ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0);
+ if (zio->io_type != ZIO_TYPE_READ) {
+ ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
+ ASSERT((zio->io_flags &
+ (ZIO_FLAG_SELF_HEAL | ZIO_FLAG_INDUCE_DAMAGE)) != 0);
+ }
+
+ vdev_indirect_remap(zio->io_vd, zio->io_offset, zio->io_size,
+ vdev_indirect_io_start_cb, zio);
+
+ zio_execute(zio);
+}
+
+vdev_ops_t vdev_indirect_ops = {
+ vdev_indirect_open,
+ vdev_indirect_close,
+ vdev_default_asize,
+ vdev_indirect_io_start,
+ vdev_indirect_io_done,
+ NULL,
+ NULL,
+ NULL,
+ NULL,
+ vdev_indirect_remap,
+ VDEV_TYPE_INDIRECT, /* name of this vdev type */
+ B_FALSE /* leaf vdev */
+};
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(rs_alloc);
+EXPORT_SYMBOL(spa_condense_fini);
+EXPORT_SYMBOL(spa_condense_indirect_restart);
+EXPORT_SYMBOL(spa_condense_indirect_start_sync);
+EXPORT_SYMBOL(spa_condense_init);
+EXPORT_SYMBOL(spa_vdev_indirect_mark_obsolete);
+EXPORT_SYMBOL(vdev_indirect_mark_obsolete);
+EXPORT_SYMBOL(vdev_indirect_should_condense);
+EXPORT_SYMBOL(vdev_indirect_sync_obsolete);
+EXPORT_SYMBOL(vdev_obsolete_counts_are_precise);
+EXPORT_SYMBOL(vdev_obsolete_sm_object);
+
+/* CSTYLED */
+module_param(zfs_condense_min_mapping_bytes, ulong, 0644);
+MODULE_PARM_DESC(zfs_condense_min_mapping_bytes,
+ "Minimum size of vdev mapping to condense");
+
+module_param(zfs_condense_indirect_commit_entry_delay_ms, int, 0644);
+MODULE_PARM_DESC(zfs_condense_indirect_commit_entry_delay_ms,
+ "Delay while condensing vdev mapping");
+#endif
diff --git a/module/zfs/vdev_indirect_births.c b/module/zfs/vdev_indirect_births.c
new file mode 100644
index 000000000000..a0163b2e5d63
--- /dev/null
+++ b/module/zfs/vdev_indirect_births.c
@@ -0,0 +1,226 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2015 by Delphix. All rights reserved.
+ */
+
+#include <sys/dmu_tx.h>
+#include <sys/spa.h>
+#include <sys/dmu.h>
+#include <sys/dsl_pool.h>
+#include <sys/vdev_indirect_births.h>
+
+#ifdef ZFS_DEBUG
+static boolean_t
+vdev_indirect_births_verify(vdev_indirect_births_t *vib)
+{
+ ASSERT(vib != NULL);
+
+ ASSERT(vib->vib_object != 0);
+ ASSERT(vib->vib_objset != NULL);
+ ASSERT(vib->vib_phys != NULL);
+ ASSERT(vib->vib_dbuf != NULL);
+
+ EQUIV(vib->vib_phys->vib_count > 0, vib->vib_entries != NULL);
+
+ return (B_TRUE);
+}
+#endif
+
+uint64_t
+vdev_indirect_births_count(vdev_indirect_births_t *vib)
+{
+ ASSERT(vdev_indirect_births_verify(vib));
+
+ return (vib->vib_phys->vib_count);
+}
+
+uint64_t
+vdev_indirect_births_object(vdev_indirect_births_t *vib)
+{
+ ASSERT(vdev_indirect_births_verify(vib));
+
+ return (vib->vib_object);
+}
+
+static uint64_t
+vdev_indirect_births_size_impl(vdev_indirect_births_t *vib)
+{
+ return (vib->vib_phys->vib_count * sizeof (*vib->vib_entries));
+}
+
+void
+vdev_indirect_births_close(vdev_indirect_births_t *vib)
+{
+ ASSERT(vdev_indirect_births_verify(vib));
+
+ if (vib->vib_phys->vib_count > 0) {
+ uint64_t births_size = vdev_indirect_births_size_impl(vib);
+
+ kmem_free(vib->vib_entries, births_size);
+ vib->vib_entries = NULL;
+ }
+
+ dmu_buf_rele(vib->vib_dbuf, vib);
+
+ vib->vib_objset = NULL;
+ vib->vib_object = 0;
+ vib->vib_dbuf = NULL;
+ vib->vib_phys = NULL;
+
+ kmem_free(vib, sizeof (*vib));
+}
+
+uint64_t
+vdev_indirect_births_alloc(objset_t *os, dmu_tx_t *tx)
+{
+ ASSERT(dmu_tx_is_syncing(tx));
+
+ return (dmu_object_alloc(os,
+ DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
+ DMU_OTN_UINT64_METADATA, sizeof (vdev_indirect_birth_phys_t),
+ tx));
+}
+
+vdev_indirect_births_t *
+vdev_indirect_births_open(objset_t *os, uint64_t births_object)
+{
+ vdev_indirect_births_t *vib = kmem_zalloc(sizeof (*vib), KM_SLEEP);
+
+ vib->vib_objset = os;
+ vib->vib_object = births_object;
+
+ VERIFY0(dmu_bonus_hold(os, vib->vib_object, vib, &vib->vib_dbuf));
+ vib->vib_phys = vib->vib_dbuf->db_data;
+
+ if (vib->vib_phys->vib_count > 0) {
+ uint64_t births_size = vdev_indirect_births_size_impl(vib);
+ vib->vib_entries = kmem_alloc(births_size, KM_SLEEP);
+ VERIFY0(dmu_read(vib->vib_objset, vib->vib_object, 0,
+ births_size, vib->vib_entries, DMU_READ_PREFETCH));
+ }
+
+ ASSERT(vdev_indirect_births_verify(vib));
+
+ return (vib);
+}
+
+void
+vdev_indirect_births_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
+{
+ VERIFY0(dmu_object_free(os, object, tx));
+}
+
+void
+vdev_indirect_births_add_entry(vdev_indirect_births_t *vib,
+ uint64_t max_offset, uint64_t txg, dmu_tx_t *tx)
+{
+ vdev_indirect_birth_entry_phys_t vibe;
+ uint64_t old_size;
+ uint64_t new_size;
+ vdev_indirect_birth_entry_phys_t *new_entries;
+
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT(dsl_pool_sync_context(dmu_tx_pool(tx)));
+ ASSERT(vdev_indirect_births_verify(vib));
+
+ dmu_buf_will_dirty(vib->vib_dbuf, tx);
+
+ vibe.vibe_offset = max_offset;
+ vibe.vibe_phys_birth_txg = txg;
+
+ old_size = vdev_indirect_births_size_impl(vib);
+ dmu_write(vib->vib_objset, vib->vib_object, old_size, sizeof (vibe),
+ &vibe, tx);
+ vib->vib_phys->vib_count++;
+ new_size = vdev_indirect_births_size_impl(vib);
+
+ new_entries = kmem_alloc(new_size, KM_SLEEP);
+ if (old_size > 0) {
+ bcopy(vib->vib_entries, new_entries, old_size);
+ kmem_free(vib->vib_entries, old_size);
+ }
+ new_entries[vib->vib_phys->vib_count - 1] = vibe;
+ vib->vib_entries = new_entries;
+}
+
+uint64_t
+vdev_indirect_births_last_entry_txg(vdev_indirect_births_t *vib)
+{
+ ASSERT(vdev_indirect_births_verify(vib));
+ ASSERT(vib->vib_phys->vib_count > 0);
+
+ vdev_indirect_birth_entry_phys_t *last =
+ &vib->vib_entries[vib->vib_phys->vib_count - 1];
+ return (last->vibe_phys_birth_txg);
+}
+
+/*
+ * Return the txg in which the given range was copied (i.e. its physical
+ * birth txg). The specified offset+asize must be contiguously mapped
+ * (i.e. not a split block).
+ *
+ * The entries are sorted by increasing phys_birth, and also by increasing
+ * offset. We find the specified offset by binary search. Note that we
+ * can not use bsearch() because looking at each entry independently is
+ * insufficient to find the correct entry. Each entry implicitly relies
+ * on the previous entry: an entry indicates that the offsets from the
+ * end of the previous entry to the end of this entry were written in the
+ * specified txg.
+ */
+uint64_t
+vdev_indirect_births_physbirth(vdev_indirect_births_t *vib, uint64_t offset,
+ uint64_t asize)
+{
+ vdev_indirect_birth_entry_phys_t *base;
+ vdev_indirect_birth_entry_phys_t *last;
+
+ ASSERT(vdev_indirect_births_verify(vib));
+ ASSERT(vib->vib_phys->vib_count > 0);
+
+ base = vib->vib_entries;
+ last = base + vib->vib_phys->vib_count - 1;
+
+ ASSERT3U(offset, <, last->vibe_offset);
+
+ while (last >= base) {
+ vdev_indirect_birth_entry_phys_t *p =
+ base + ((last - base) / 2);
+ if (offset >= p->vibe_offset) {
+ base = p + 1;
+ } else if (p == vib->vib_entries ||
+ offset >= (p - 1)->vibe_offset) {
+ ASSERT3U(offset + asize, <=, p->vibe_offset);
+ return (p->vibe_phys_birth_txg);
+ } else {
+ last = p - 1;
+ }
+ }
+ ASSERT(!"offset not found");
+ return (-1);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(vdev_indirect_births_add_entry);
+EXPORT_SYMBOL(vdev_indirect_births_alloc);
+EXPORT_SYMBOL(vdev_indirect_births_close);
+EXPORT_SYMBOL(vdev_indirect_births_count);
+EXPORT_SYMBOL(vdev_indirect_births_free);
+EXPORT_SYMBOL(vdev_indirect_births_last_entry_txg);
+EXPORT_SYMBOL(vdev_indirect_births_object);
+EXPORT_SYMBOL(vdev_indirect_births_open);
+EXPORT_SYMBOL(vdev_indirect_births_physbirth);
+#endif
diff --git a/module/zfs/vdev_indirect_mapping.c b/module/zfs/vdev_indirect_mapping.c
new file mode 100644
index 000000000000..dbd6a7635b49
--- /dev/null
+++ b/module/zfs/vdev_indirect_mapping.c
@@ -0,0 +1,616 @@
+/*
+ * CDDL HEADER START
+ *
+ * This file and its contents are supplied under the terms of the
+ * Common Development and Distribution License ("CDDL"), version 1.0.
+ * You may only use this file in accordance with the terms of version
+ * 1.0 of the CDDL.
+ *
+ * A full copy of the text of the CDDL should have accompanied this
+ * source. A copy of the CDDL is also available via the Internet at
+ * http://www.illumos.org/license/CDDL.
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2015 by Delphix. All rights reserved.
+ */
+
+#include <sys/dmu_tx.h>
+#include <sys/dsl_pool.h>
+#include <sys/spa.h>
+#include <sys/vdev_impl.h>
+#include <sys/vdev_indirect_mapping.h>
+#include <sys/zfeature.h>
+#include <sys/dmu_objset.h>
+
+#ifdef ZFS_DEBUG
+static boolean_t
+vdev_indirect_mapping_verify(vdev_indirect_mapping_t *vim)
+{
+ ASSERT(vim != NULL);
+
+ ASSERT(vim->vim_object != 0);
+ ASSERT(vim->vim_objset != NULL);
+ ASSERT(vim->vim_phys != NULL);
+ ASSERT(vim->vim_dbuf != NULL);
+
+ EQUIV(vim->vim_phys->vimp_num_entries > 0,
+ vim->vim_entries != NULL);
+ if (vim->vim_phys->vimp_num_entries > 0) {
+ ASSERTV(vdev_indirect_mapping_entry_phys_t *last_entry =
+ &vim->vim_entries[vim->vim_phys->vimp_num_entries - 1]);
+ ASSERTV(uint64_t offset =
+ DVA_MAPPING_GET_SRC_OFFSET(last_entry));
+ ASSERTV(uint64_t size = DVA_GET_ASIZE(&last_entry->vimep_dst));
+
+ ASSERT3U(vim->vim_phys->vimp_max_offset, >=, offset + size);
+ }
+ if (vim->vim_havecounts) {
+ ASSERT(vim->vim_phys->vimp_counts_object != 0);
+ }
+
+ return (B_TRUE);
+}
+#endif
+
+uint64_t
+vdev_indirect_mapping_num_entries(vdev_indirect_mapping_t *vim)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ return (vim->vim_phys->vimp_num_entries);
+}
+
+uint64_t
+vdev_indirect_mapping_max_offset(vdev_indirect_mapping_t *vim)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ return (vim->vim_phys->vimp_max_offset);
+}
+
+uint64_t
+vdev_indirect_mapping_object(vdev_indirect_mapping_t *vim)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ return (vim->vim_object);
+}
+
+uint64_t
+vdev_indirect_mapping_bytes_mapped(vdev_indirect_mapping_t *vim)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ return (vim->vim_phys->vimp_bytes_mapped);
+}
+
+/*
+ * The length (in bytes) of the mapping object array in memory and
+ * (logically) on disk.
+ *
+ * Note that unlike most of our accessor functions,
+ * we don't assert that the struct is consistent; therefore it can be
+ * called while there may be concurrent changes, if we don't care about
+ * the value being immediately stale (e.g. from spa_removal_get_stats()).
+ */
+uint64_t
+vdev_indirect_mapping_size(vdev_indirect_mapping_t *vim)
+{
+ return (vim->vim_phys->vimp_num_entries * sizeof (*vim->vim_entries));
+}
+
+/*
+ * Compare an offset with an indirect mapping entry; there are three
+ * possible scenarios:
+ *
+ * 1. The offset is "less than" the mapping entry; meaning the
+ * offset is less than the source offset of the mapping entry. In
+ * this case, there is no overlap between the offset and the
+ * mapping entry and -1 will be returned.
+ *
+ * 2. The offset is "greater than" the mapping entry; meaning the
+ * offset is greater than the mapping entry's source offset plus
+ * the entry's size. In this case, there is no overlap between
+ * the offset and the mapping entry and 1 will be returned.
+ *
+ * NOTE: If the offset is actually equal to the entry's offset
+ * plus size, this is considered to be "greater" than the entry,
+ * and this case applies (i.e. 1 will be returned). Thus, the
+ * entry's "range" can be considered to be inclusive at its
+ * start, but exclusive at its end: e.g. [src, src + size).
+ *
+ * 3. The last case to consider is if the offset actually falls
+ * within the mapping entry's range. If this is the case, the
+ * offset is considered to be "equal to" the mapping entry and
+ * 0 will be returned.
+ *
+ * NOTE: If the offset is equal to the entry's source offset,
+ * this case applies and 0 will be returned. If the offset is
+ * equal to the entry's source plus its size, this case does
+ * *not* apply (see "NOTE" above for scenario 2), and 1 will be
+ * returned.
+ */
+static int
+dva_mapping_overlap_compare(const void *v_key, const void *v_array_elem)
+{
+ const uint64_t * const key = v_key;
+ const vdev_indirect_mapping_entry_phys_t * const array_elem =
+ v_array_elem;
+ uint64_t src_offset = DVA_MAPPING_GET_SRC_OFFSET(array_elem);
+
+ if (*key < src_offset) {
+ return (-1);
+ } else if (*key < src_offset + DVA_GET_ASIZE(&array_elem->vimep_dst)) {
+ return (0);
+ } else {
+ return (1);
+ }
+}
+
+/*
+ * Returns the mapping entry for the given offset.
+ *
+ * It's possible that the given offset will not be in the mapping table
+ * (i.e. no mapping entries contain this offset), in which case, the
+ * return value value depends on the "next_if_missing" parameter.
+ *
+ * If the offset is not found in the table and "next_if_missing" is
+ * B_FALSE, then NULL will always be returned. The behavior is intended
+ * to allow consumers to get the entry corresponding to the offset
+ * parameter, iff the offset overlaps with an entry in the table.
+ *
+ * If the offset is not found in the table and "next_if_missing" is
+ * B_TRUE, then the entry nearest to the given offset will be returned,
+ * such that the entry's source offset is greater than the offset
+ * passed in (i.e. the "next" mapping entry in the table is returned, if
+ * the offset is missing from the table). If there are no entries whose
+ * source offset is greater than the passed in offset, NULL is returned.
+ */
+static vdev_indirect_mapping_entry_phys_t *
+vdev_indirect_mapping_entry_for_offset_impl(vdev_indirect_mapping_t *vim,
+ uint64_t offset, boolean_t next_if_missing)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+ ASSERT(vim->vim_phys->vimp_num_entries > 0);
+
+ vdev_indirect_mapping_entry_phys_t *entry = NULL;
+
+ uint64_t last = vim->vim_phys->vimp_num_entries - 1;
+ uint64_t base = 0;
+
+ /*
+ * We don't define these inside of the while loop because we use
+ * their value in the case that offset isn't in the mapping.
+ */
+ uint64_t mid;
+ int result;
+
+ while (last >= base) {
+ mid = base + ((last - base) >> 1);
+
+ result = dva_mapping_overlap_compare(&offset,
+ &vim->vim_entries[mid]);
+
+ if (result == 0) {
+ entry = &vim->vim_entries[mid];
+ break;
+ } else if (result < 0) {
+ last = mid - 1;
+ } else {
+ base = mid + 1;
+ }
+ }
+
+ if (entry == NULL && next_if_missing) {
+ ASSERT3U(base, ==, last + 1);
+ ASSERT(mid == base || mid == last);
+ ASSERT3S(result, !=, 0);
+
+ /*
+ * The offset we're looking for isn't actually contained
+ * in the mapping table, thus we need to return the
+ * closest mapping entry that is greater than the
+ * offset. We reuse the result of the last comparison,
+ * comparing the mapping entry at index "mid" and the
+ * offset. The offset is guaranteed to lie between
+ * indices one less than "mid", and one greater than
+ * "mid"; we just need to determine if offset is greater
+ * than, or less than the mapping entry contained at
+ * index "mid".
+ */
+
+ uint64_t index;
+ if (result < 0)
+ index = mid;
+ else
+ index = mid + 1;
+
+ ASSERT3U(index, <=, vim->vim_phys->vimp_num_entries);
+
+ if (index == vim->vim_phys->vimp_num_entries) {
+ /*
+ * If "index" is past the end of the entries
+ * array, then not only is the offset not in the
+ * mapping table, but it's actually greater than
+ * all entries in the table. In this case, we
+ * can't return a mapping entry greater than the
+ * offset (since none exist), so we return NULL.
+ */
+
+ ASSERT3S(dva_mapping_overlap_compare(&offset,
+ &vim->vim_entries[index - 1]), >, 0);
+
+ return (NULL);
+ } else {
+ /*
+ * Just to be safe, we verify the offset falls
+ * in between the mapping entries at index and
+ * one less than index. Since we know the offset
+ * doesn't overlap an entry, and we're supposed
+ * to return the entry just greater than the
+ * offset, both of the following tests must be
+ * true.
+ */
+ ASSERT3S(dva_mapping_overlap_compare(&offset,
+ &vim->vim_entries[index]), <, 0);
+ IMPLY(index >= 1, dva_mapping_overlap_compare(&offset,
+ &vim->vim_entries[index - 1]) > 0);
+
+ return (&vim->vim_entries[index]);
+ }
+ } else {
+ return (entry);
+ }
+}
+
+vdev_indirect_mapping_entry_phys_t *
+vdev_indirect_mapping_entry_for_offset(vdev_indirect_mapping_t *vim,
+ uint64_t offset)
+{
+ return (vdev_indirect_mapping_entry_for_offset_impl(vim, offset,
+ B_FALSE));
+}
+
+vdev_indirect_mapping_entry_phys_t *
+vdev_indirect_mapping_entry_for_offset_or_next(vdev_indirect_mapping_t *vim,
+ uint64_t offset)
+{
+ return (vdev_indirect_mapping_entry_for_offset_impl(vim, offset,
+ B_TRUE));
+}
+
+
+void
+vdev_indirect_mapping_close(vdev_indirect_mapping_t *vim)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ if (vim->vim_phys->vimp_num_entries > 0) {
+ uint64_t map_size = vdev_indirect_mapping_size(vim);
+ vmem_free(vim->vim_entries, map_size);
+ vim->vim_entries = NULL;
+ }
+
+ dmu_buf_rele(vim->vim_dbuf, vim);
+
+ vim->vim_objset = NULL;
+ vim->vim_object = 0;
+ vim->vim_dbuf = NULL;
+ vim->vim_phys = NULL;
+
+ kmem_free(vim, sizeof (*vim));
+}
+
+uint64_t
+vdev_indirect_mapping_alloc(objset_t *os, dmu_tx_t *tx)
+{
+ uint64_t object;
+ ASSERT(dmu_tx_is_syncing(tx));
+ uint64_t bonus_size = VDEV_INDIRECT_MAPPING_SIZE_V0;
+
+ if (spa_feature_is_enabled(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
+ bonus_size = sizeof (vdev_indirect_mapping_phys_t);
+ }
+
+ object = dmu_object_alloc(os,
+ DMU_OTN_UINT64_METADATA, SPA_OLD_MAXBLOCKSIZE,
+ DMU_OTN_UINT64_METADATA, bonus_size,
+ tx);
+
+ if (spa_feature_is_enabled(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
+ dmu_buf_t *dbuf;
+ vdev_indirect_mapping_phys_t *vimp;
+
+ VERIFY0(dmu_bonus_hold(os, object, FTAG, &dbuf));
+ dmu_buf_will_dirty(dbuf, tx);
+ vimp = dbuf->db_data;
+ vimp->vimp_counts_object = dmu_object_alloc(os,
+ DMU_OTN_UINT32_METADATA, SPA_OLD_MAXBLOCKSIZE,
+ DMU_OT_NONE, 0, tx);
+ spa_feature_incr(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ dmu_buf_rele(dbuf, FTAG);
+ }
+
+ return (object);
+}
+
+
+vdev_indirect_mapping_t *
+vdev_indirect_mapping_open(objset_t *os, uint64_t mapping_object)
+{
+ vdev_indirect_mapping_t *vim = kmem_zalloc(sizeof (*vim), KM_SLEEP);
+ dmu_object_info_t doi;
+ VERIFY0(dmu_object_info(os, mapping_object, &doi));
+
+ vim->vim_objset = os;
+ vim->vim_object = mapping_object;
+
+ VERIFY0(dmu_bonus_hold(os, vim->vim_object, vim,
+ &vim->vim_dbuf));
+ vim->vim_phys = vim->vim_dbuf->db_data;
+
+ vim->vim_havecounts =
+ (doi.doi_bonus_size > VDEV_INDIRECT_MAPPING_SIZE_V0);
+
+ if (vim->vim_phys->vimp_num_entries > 0) {
+ uint64_t map_size = vdev_indirect_mapping_size(vim);
+ vim->vim_entries = vmem_alloc(map_size, KM_SLEEP);
+ VERIFY0(dmu_read(os, vim->vim_object, 0, map_size,
+ vim->vim_entries, DMU_READ_PREFETCH));
+ }
+
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ return (vim);
+}
+
+void
+vdev_indirect_mapping_free(objset_t *os, uint64_t object, dmu_tx_t *tx)
+{
+ vdev_indirect_mapping_t *vim = vdev_indirect_mapping_open(os, object);
+ if (vim->vim_havecounts) {
+ VERIFY0(dmu_object_free(os, vim->vim_phys->vimp_counts_object,
+ tx));
+ spa_feature_decr(os->os_spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ }
+ vdev_indirect_mapping_close(vim);
+
+ VERIFY0(dmu_object_free(os, object, tx));
+}
+
+/*
+ * Append the list of vdev_indirect_mapping_entry_t's to the on-disk
+ * mapping object. Also remove the entries from the list and free them.
+ * This also implicitly extends the max_offset of the mapping (to the end
+ * of the last entry).
+ */
+void
+vdev_indirect_mapping_add_entries(vdev_indirect_mapping_t *vim,
+ list_t *list, dmu_tx_t *tx)
+{
+ vdev_indirect_mapping_entry_phys_t *mapbuf;
+ uint64_t old_size;
+ uint32_t *countbuf = NULL;
+ vdev_indirect_mapping_entry_phys_t *old_entries;
+ uint64_t old_count;
+ uint64_t entries_written = 0;
+
+ ASSERT(vdev_indirect_mapping_verify(vim));
+ ASSERT(dmu_tx_is_syncing(tx));
+ ASSERT(dsl_pool_sync_context(dmu_tx_pool(tx)));
+ ASSERT(!list_is_empty(list));
+
+ old_size = vdev_indirect_mapping_size(vim);
+ old_entries = vim->vim_entries;
+ old_count = vim->vim_phys->vimp_num_entries;
+
+ dmu_buf_will_dirty(vim->vim_dbuf, tx);
+
+ mapbuf = vmem_alloc(SPA_OLD_MAXBLOCKSIZE, KM_SLEEP);
+ if (vim->vim_havecounts) {
+ countbuf = vmem_alloc(SPA_OLD_MAXBLOCKSIZE, KM_SLEEP);
+ ASSERT(spa_feature_is_active(vim->vim_objset->os_spa,
+ SPA_FEATURE_OBSOLETE_COUNTS));
+ }
+ while (!list_is_empty(list)) {
+ uint64_t i;
+ /*
+ * Write entries from the list to the
+ * vdev_im_object in batches of size SPA_OLD_MAXBLOCKSIZE.
+ */
+ for (i = 0; i < SPA_OLD_MAXBLOCKSIZE / sizeof (*mapbuf); i++) {
+ vdev_indirect_mapping_entry_t *entry =
+ list_remove_head(list);
+ if (entry == NULL)
+ break;
+
+ uint64_t size =
+ DVA_GET_ASIZE(&entry->vime_mapping.vimep_dst);
+ uint64_t src_offset =
+ DVA_MAPPING_GET_SRC_OFFSET(&entry->vime_mapping);
+
+ /*
+ * We shouldn't be adding an entry which is fully
+ * obsolete.
+ */
+ ASSERT3U(entry->vime_obsolete_count, <, size);
+ IMPLY(entry->vime_obsolete_count != 0,
+ vim->vim_havecounts);
+
+ mapbuf[i] = entry->vime_mapping;
+ if (vim->vim_havecounts)
+ countbuf[i] = entry->vime_obsolete_count;
+
+ vim->vim_phys->vimp_bytes_mapped += size;
+ ASSERT3U(src_offset, >=,
+ vim->vim_phys->vimp_max_offset);
+ vim->vim_phys->vimp_max_offset = src_offset + size;
+
+ entries_written++;
+
+ vmem_free(entry, sizeof (*entry));
+ }
+ dmu_write(vim->vim_objset, vim->vim_object,
+ vim->vim_phys->vimp_num_entries * sizeof (*mapbuf),
+ i * sizeof (*mapbuf),
+ mapbuf, tx);
+ if (vim->vim_havecounts) {
+ dmu_write(vim->vim_objset,
+ vim->vim_phys->vimp_counts_object,
+ vim->vim_phys->vimp_num_entries *
+ sizeof (*countbuf),
+ i * sizeof (*countbuf), countbuf, tx);
+ }
+ vim->vim_phys->vimp_num_entries += i;
+ }
+ vmem_free(mapbuf, SPA_OLD_MAXBLOCKSIZE);
+ if (vim->vim_havecounts)
+ vmem_free(countbuf, SPA_OLD_MAXBLOCKSIZE);
+
+ /*
+ * Update the entry array to reflect the new entries. First, copy
+ * over any old entries then read back the new entries we just wrote.
+ */
+ uint64_t new_size = vdev_indirect_mapping_size(vim);
+ ASSERT3U(new_size, >, old_size);
+ ASSERT3U(new_size - old_size, ==,
+ entries_written * sizeof (vdev_indirect_mapping_entry_phys_t));
+ vim->vim_entries = vmem_alloc(new_size, KM_SLEEP);
+ if (old_size > 0) {
+ bcopy(old_entries, vim->vim_entries, old_size);
+ vmem_free(old_entries, old_size);
+ }
+ VERIFY0(dmu_read(vim->vim_objset, vim->vim_object, old_size,
+ new_size - old_size, &vim->vim_entries[old_count],
+ DMU_READ_PREFETCH));
+
+ zfs_dbgmsg("txg %llu: wrote %llu entries to "
+ "indirect mapping obj %llu; max offset=0x%llx",
+ (u_longlong_t)dmu_tx_get_txg(tx),
+ (u_longlong_t)entries_written,
+ (u_longlong_t)vim->vim_object,
+ (u_longlong_t)vim->vim_phys->vimp_max_offset);
+}
+
+/*
+ * Increment the relevant counts for the specified offset and length.
+ * The counts array must be obtained from
+ * vdev_indirect_mapping_load_obsolete_counts().
+ */
+void
+vdev_indirect_mapping_increment_obsolete_count(vdev_indirect_mapping_t *vim,
+ uint64_t offset, uint64_t length, uint32_t *counts)
+{
+ vdev_indirect_mapping_entry_phys_t *mapping;
+ uint64_t index;
+
+ mapping = vdev_indirect_mapping_entry_for_offset(vim, offset);
+
+ ASSERT(length > 0);
+ ASSERT3P(mapping, !=, NULL);
+
+ index = mapping - vim->vim_entries;
+
+ while (length > 0) {
+ ASSERT3U(index, <, vdev_indirect_mapping_num_entries(vim));
+
+ uint64_t size = DVA_GET_ASIZE(&mapping->vimep_dst);
+ uint64_t inner_offset = offset -
+ DVA_MAPPING_GET_SRC_OFFSET(mapping);
+ VERIFY3U(inner_offset, <, size);
+ uint64_t inner_size = MIN(length, size - inner_offset);
+
+ VERIFY3U(counts[index] + inner_size, <=, size);
+ counts[index] += inner_size;
+
+ offset += inner_size;
+ length -= inner_size;
+ mapping++;
+ index++;
+ }
+}
+
+typedef struct load_obsolete_space_map_arg {
+ vdev_indirect_mapping_t *losma_vim;
+ uint32_t *losma_counts;
+} load_obsolete_space_map_arg_t;
+
+static int
+load_obsolete_sm_callback(maptype_t type, uint64_t offset, uint64_t size,
+ void *arg)
+{
+ load_obsolete_space_map_arg_t *losma = arg;
+ ASSERT3S(type, ==, SM_ALLOC);
+
+ vdev_indirect_mapping_increment_obsolete_count(losma->losma_vim,
+ offset, size, losma->losma_counts);
+
+ return (0);
+}
+
+/*
+ * Modify the counts (increment them) based on the spacemap.
+ */
+void
+vdev_indirect_mapping_load_obsolete_spacemap(vdev_indirect_mapping_t *vim,
+ uint32_t *counts, space_map_t *obsolete_space_sm)
+{
+ load_obsolete_space_map_arg_t losma;
+ losma.losma_counts = counts;
+ losma.losma_vim = vim;
+ VERIFY0(space_map_iterate(obsolete_space_sm,
+ load_obsolete_sm_callback, &losma));
+}
+
+/*
+ * Read the obsolete counts from disk, returning them in an array.
+ */
+uint32_t *
+vdev_indirect_mapping_load_obsolete_counts(vdev_indirect_mapping_t *vim)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ uint64_t counts_size =
+ vim->vim_phys->vimp_num_entries * sizeof (uint32_t);
+ uint32_t *counts = vmem_alloc(counts_size, KM_SLEEP);
+ if (vim->vim_havecounts) {
+ VERIFY0(dmu_read(vim->vim_objset,
+ vim->vim_phys->vimp_counts_object,
+ 0, counts_size,
+ counts, DMU_READ_PREFETCH));
+ } else {
+ bzero(counts, counts_size);
+ }
+ return (counts);
+}
+
+extern void
+vdev_indirect_mapping_free_obsolete_counts(vdev_indirect_mapping_t *vim,
+ uint32_t *counts)
+{
+ ASSERT(vdev_indirect_mapping_verify(vim));
+
+ vmem_free(counts, vim->vim_phys->vimp_num_entries * sizeof (uint32_t));
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+EXPORT_SYMBOL(vdev_indirect_mapping_add_entries);
+EXPORT_SYMBOL(vdev_indirect_mapping_alloc);
+EXPORT_SYMBOL(vdev_indirect_mapping_bytes_mapped);
+EXPORT_SYMBOL(vdev_indirect_mapping_close);
+EXPORT_SYMBOL(vdev_indirect_mapping_entry_for_offset);
+EXPORT_SYMBOL(vdev_indirect_mapping_entry_for_offset_or_next);
+EXPORT_SYMBOL(vdev_indirect_mapping_free);
+EXPORT_SYMBOL(vdev_indirect_mapping_free_obsolete_counts);
+EXPORT_SYMBOL(vdev_indirect_mapping_increment_obsolete_count);
+EXPORT_SYMBOL(vdev_indirect_mapping_load_obsolete_counts);
+EXPORT_SYMBOL(vdev_indirect_mapping_load_obsolete_spacemap);
+EXPORT_SYMBOL(vdev_indirect_mapping_max_offset);
+EXPORT_SYMBOL(vdev_indirect_mapping_num_entries);
+EXPORT_SYMBOL(vdev_indirect_mapping_object);
+EXPORT_SYMBOL(vdev_indirect_mapping_open);
+EXPORT_SYMBOL(vdev_indirect_mapping_size);
+#endif
diff --git a/module/zfs/vdev_label.c b/module/zfs/vdev_label.c
index 4fee4bc7a759..70d68d9031fc 100644
--- a/module/zfs/vdev_label.c
+++ b/module/zfs/vdev_label.c
@@ -1,1515 +1,1592 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
/*
* Virtual Device Labels
* ---------------------
*
* The vdev label serves several distinct purposes:
*
* 1. Uniquely identify this device as part of a ZFS pool and confirm its
* identity within the pool.
*
* 2. Verify that all the devices given in a configuration are present
* within the pool.
*
* 3. Determine the uberblock for the pool.
*
* 4. In case of an import operation, determine the configuration of the
* toplevel vdev of which it is a part.
*
* 5. If an import operation cannot find all the devices in the pool,
* provide enough information to the administrator to determine which
* devices are missing.
*
* It is important to note that while the kernel is responsible for writing the
* label, it only consumes the information in the first three cases. The
* latter information is only consumed in userland when determining the
* configuration to import a pool.
*
*
* Label Organization
* ------------------
*
* Before describing the contents of the label, it's important to understand how
* the labels are written and updated with respect to the uberblock.
*
* When the pool configuration is altered, either because it was newly created
* or a device was added, we want to update all the labels such that we can deal
* with fatal failure at any point. To this end, each disk has two labels which
* are updated before and after the uberblock is synced. Assuming we have
* labels and an uberblock with the following transaction groups:
*
* L1 UB L2
* +------+ +------+ +------+
* | | | | | |
* | t10 | | t10 | | t10 |
* | | | | | |
* +------+ +------+ +------+
*
* In this stable state, the labels and the uberblock were all updated within
* the same transaction group (10). Each label is mirrored and checksummed, so
* that we can detect when we fail partway through writing the label.
*
* In order to identify which labels are valid, the labels are written in the
* following manner:
*
* 1. For each vdev, update 'L1' to the new label
* 2. Update the uberblock
* 3. For each vdev, update 'L2' to the new label
*
* Given arbitrary failure, we can determine the correct label to use based on
* the transaction group. If we fail after updating L1 but before updating the
* UB, we will notice that L1's transaction group is greater than the uberblock,
* so L2 must be valid. If we fail after writing the uberblock but before
* writing L2, we will notice that L2's transaction group is less than L1, and
* therefore L1 is valid.
*
* Another added complexity is that not every label is updated when the config
* is synced. If we add a single device, we do not want to have to re-write
* every label for every device in the pool. This means that both L1 and L2 may
* be older than the pool uberblock, because the necessary information is stored
* on another vdev.
*
*
* On-disk Format
* --------------
*
* The vdev label consists of two distinct parts, and is wrapped within the
* vdev_label_t structure. The label includes 8k of padding to permit legacy
* VTOC disk labels, but is otherwise ignored.
*
* The first half of the label is a packed nvlist which contains pool wide
* properties, per-vdev properties, and configuration information. It is
* described in more detail below.
*
* The latter half of the label consists of a redundant array of uberblocks.
* These uberblocks are updated whenever a transaction group is committed,
* or when the configuration is updated. When a pool is loaded, we scan each
* vdev for the 'best' uberblock.
*
*
* Configuration Information
* -------------------------
*
* The nvlist describing the pool and vdev contains the following elements:
*
* version ZFS on-disk version
* name Pool name
* state Pool state
* txg Transaction group in which this label was written
* pool_guid Unique identifier for this pool
* vdev_tree An nvlist describing vdev tree.
* features_for_read
* An nvlist of the features necessary for reading the MOS.
*
* Each leaf device label also contains the following:
*
* top_guid Unique ID for top-level vdev in which this is contained
* guid Unique ID for the leaf vdev
*
* The 'vs' configuration follows the format described in 'spa_config.c'.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/uberblock_impl.h>
#include <sys/metaslab.h>
+#include <sys/metaslab_impl.h>
#include <sys/zio.h>
#include <sys/dsl_scan.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
/*
* Basic routines to read and write from a vdev label.
* Used throughout the rest of this file.
*/
uint64_t
vdev_label_offset(uint64_t psize, int l, uint64_t offset)
{
ASSERT(offset < sizeof (vdev_label_t));
ASSERT(P2PHASE_TYPED(psize, sizeof (vdev_label_t), uint64_t) == 0);
return (offset + l * sizeof (vdev_label_t) + (l < VDEV_LABELS / 2 ?
0 : psize - VDEV_LABELS * sizeof (vdev_label_t)));
}
/*
* Returns back the vdev label associated with the passed in offset.
*/
int
vdev_label_number(uint64_t psize, uint64_t offset)
{
int l;
if (offset >= psize - VDEV_LABEL_END_SIZE) {
offset -= psize - VDEV_LABEL_END_SIZE;
offset += (VDEV_LABELS / 2) * sizeof (vdev_label_t);
}
l = offset / sizeof (vdev_label_t);
return (l < VDEV_LABELS ? l : -1);
}
static void
vdev_label_read(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_read_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_READ, flags, B_TRUE));
}
void
vdev_label_write(zio_t *zio, vdev_t *vd, int l, abd_t *buf, uint64_t offset,
uint64_t size, zio_done_func_t *done, void *private, int flags)
{
ASSERT(
spa_config_held(zio->io_spa, SCL_STATE, RW_READER) == SCL_STATE ||
spa_config_held(zio->io_spa, SCL_STATE, RW_WRITER) == SCL_STATE);
ASSERT(flags & ZIO_FLAG_CONFIG_WRITER);
zio_nowait(zio_write_phys(zio, vd,
vdev_label_offset(vd->vdev_psize, l, offset),
size, buf, ZIO_CHECKSUM_LABEL, done, private,
ZIO_PRIORITY_SYNC_WRITE, flags, B_TRUE));
}
/*
* Generate the nvlist representing this vdev's stats
*/
void
vdev_config_generate_stats(vdev_t *vd, nvlist_t *nv)
{
nvlist_t *nvx;
vdev_stat_t *vs;
vdev_stat_ex_t *vsx;
vs = kmem_alloc(sizeof (*vs), KM_SLEEP);
vsx = kmem_alloc(sizeof (*vsx), KM_SLEEP);
vdev_get_stats_ex(vd, vs, vsx);
fnvlist_add_uint64_array(nv, ZPOOL_CONFIG_VDEV_STATS,
(uint64_t *)vs, sizeof (*vs) / sizeof (uint64_t));
kmem_free(vs, sizeof (*vs));
/*
* Add extended stats into a special extended stats nvlist. This keeps
* all the extended stats nicely grouped together. The extended stats
* nvlist is then added to the main nvlist.
*/
nvx = fnvlist_alloc();
/* ZIOs in flight to disk */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_ACTIVE_QUEUE,
vsx->vsx_active_queue[ZIO_PRIORITY_SCRUB]);
/* ZIOs pending */
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_READ]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_ASYNC_WRITE]);
fnvlist_add_uint64(nvx, ZPOOL_CONFIG_VDEV_SCRUB_PEND_QUEUE,
vsx->vsx_pend_queue[ZIO_PRIORITY_SCRUB]);
/* Histograms */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_R_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_TOT_W_LAT_HISTO,
vsx->vsx_total_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_total_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_R_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_READ],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_DISK_W_LAT_HISTO,
vsx->vsx_disk_histo[ZIO_TYPE_WRITE],
ARRAY_SIZE(vsx->vsx_disk_histo[ZIO_TYPE_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_R_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_W_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SCRUB_LAT_HISTO,
vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_queue_histo[ZIO_PRIORITY_SCRUB]));
/* Request sizes */
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_R_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_IND_W_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_IND_SCRUB_HISTO,
vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_ind_histo[ZIO_PRIORITY_SCRUB]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_SYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_R_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_READ]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_ASYNC_AGG_W_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_ASYNC_WRITE]));
fnvlist_add_uint64_array(nvx, ZPOOL_CONFIG_VDEV_AGG_SCRUB_HISTO,
vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB],
ARRAY_SIZE(vsx->vsx_agg_histo[ZIO_PRIORITY_SCRUB]));
/* Add extended stats nvlist to main nvlist */
fnvlist_add_nvlist(nv, ZPOOL_CONFIG_VDEV_STATS_EX, nvx);
fnvlist_free(nvx);
kmem_free(vsx, sizeof (*vsx));
}
/*
* Generate the nvlist representing this vdev's config.
*/
nvlist_t *
vdev_config_generate(spa_t *spa, vdev_t *vd, boolean_t getstats,
vdev_config_flag_t flags)
{
nvlist_t *nv = NULL;
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+
nv = fnvlist_alloc();
fnvlist_add_string(nv, ZPOOL_CONFIG_TYPE, vd->vdev_ops->vdev_op_type);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)))
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ID, vd->vdev_id);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_GUID, vd->vdev_guid);
if (vd->vdev_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PATH, vd->vdev_path);
if (vd->vdev_devid != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_DEVID, vd->vdev_devid);
if (vd->vdev_physpath != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_PHYS_PATH,
vd->vdev_physpath);
if (vd->vdev_enc_sysfs_path != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_VDEV_ENC_SYSFS_PATH,
vd->vdev_enc_sysfs_path);
if (vd->vdev_fru != NULL)
fnvlist_add_string(nv, ZPOOL_CONFIG_FRU, vd->vdev_fru);
if (vd->vdev_nparity != 0) {
ASSERT(strcmp(vd->vdev_ops->vdev_op_type,
VDEV_TYPE_RAIDZ) == 0);
/*
* Make sure someone hasn't managed to sneak a fancy new vdev
* into a crufty old storage pool.
*/
ASSERT(vd->vdev_nparity == 1 ||
(vd->vdev_nparity <= 2 &&
spa_version(spa) >= SPA_VERSION_RAIDZ2) ||
(vd->vdev_nparity <= 3 &&
spa_version(spa) >= SPA_VERSION_RAIDZ3));
/*
* Note that we'll add the nparity tag even on storage pools
* that only support a single parity device -- older software
* will just ignore it.
*/
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NPARITY, vd->vdev_nparity);
}
if (vd->vdev_wholedisk != -1ULL)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_WHOLE_DISK,
vd->vdev_wholedisk);
if (vd->vdev_not_present)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_NOT_PRESENT, 1);
if (vd->vdev_isspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_SPARE, 1);
if (!(flags & (VDEV_CONFIG_SPARE | VDEV_CONFIG_L2CACHE)) &&
vd == vd->vdev_top) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_ARRAY,
vd->vdev_ms_array);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_METASLAB_SHIFT,
vd->vdev_ms_shift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASHIFT, vd->vdev_ashift);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ASIZE,
vd->vdev_asize);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_LOG, vd->vdev_islog);
- if (vd->vdev_removing)
+ if (vd->vdev_removing) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVING,
vd->vdev_removing);
+ }
}
if (vd->vdev_dtl_sm != NULL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DTL,
space_map_object(vd->vdev_dtl_sm));
}
+ if (vic->vic_mapping_object != 0) {
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_OBJECT,
+ vic->vic_mapping_object);
+ }
+
+ if (vic->vic_births_object != 0) {
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_BIRTHS,
+ vic->vic_births_object);
+ }
+
+ if (vic->vic_prev_indirect_vdev != UINT64_MAX) {
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_PREV_INDIRECT_VDEV,
+ vic->vic_prev_indirect_vdev);
+ }
+
if (vd->vdev_crtxg)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_CREATE_TXG, vd->vdev_crtxg);
if (flags & VDEV_CONFIG_MOS) {
if (vd->vdev_leaf_zap != 0) {
ASSERT(vd->vdev_ops->vdev_op_leaf);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_LEAF_ZAP,
vd->vdev_leaf_zap);
}
if (vd->vdev_top_zap != 0) {
ASSERT(vd == vd->vdev_top);
fnvlist_add_uint64(nv, ZPOOL_CONFIG_VDEV_TOP_ZAP,
vd->vdev_top_zap);
}
}
if (getstats) {
- pool_scan_stat_t ps;
-
vdev_config_generate_stats(vd, nv);
/* provide either current or previous scan information */
+ pool_scan_stat_t ps;
if (spa_scan_get_stats(spa, &ps) == 0) {
fnvlist_add_uint64_array(nv,
ZPOOL_CONFIG_SCAN_STATS, (uint64_t *)&ps,
sizeof (pool_scan_stat_t) / sizeof (uint64_t));
}
+
+ pool_removal_stat_t prs;
+ if (spa_removal_get_stats(spa, &prs) == 0) {
+ fnvlist_add_uint64_array(nv,
+ ZPOOL_CONFIG_REMOVAL_STATS, (uint64_t *)&prs,
+ sizeof (prs) / sizeof (uint64_t));
+ }
+
+ /*
+ * Note: this can be called from open context
+ * (spa_get_stats()), so we need the rwlock to prevent
+ * the mapping from being changed by condensing.
+ */
+ rw_enter(&vd->vdev_indirect_rwlock, RW_READER);
+ if (vd->vdev_indirect_mapping != NULL) {
+ ASSERT(vd->vdev_indirect_births != NULL);
+ vdev_indirect_mapping_t *vim =
+ vd->vdev_indirect_mapping;
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
+ vdev_indirect_mapping_size(vim));
+ }
+ rw_exit(&vd->vdev_indirect_rwlock);
+ if (vd->vdev_mg != NULL &&
+ vd->vdev_mg->mg_fragmentation != ZFS_FRAG_INVALID) {
+ /*
+ * Compute approximately how much memory would be used
+ * for the indirect mapping if this device were to
+ * be removed.
+ *
+ * Note: If the frag metric is invalid, then not
+ * enough metaslabs have been converted to have
+ * histograms.
+ */
+ uint64_t seg_count = 0;
+
+ /*
+ * There are the same number of allocated segments
+ * as free segments, so we will have at least one
+ * entry per free segment.
+ */
+ for (int i = 0; i < RANGE_TREE_HISTOGRAM_SIZE; i++) {
+ seg_count += vd->vdev_mg->mg_histogram[i];
+ }
+
+ /*
+ * The maximum length of a mapping is SPA_MAXBLOCKSIZE,
+ * so we need at least one entry per SPA_MAXBLOCKSIZE
+ * of allocated data.
+ */
+ seg_count += vd->vdev_stat.vs_alloc / SPA_MAXBLOCKSIZE;
+
+ fnvlist_add_uint64(nv, ZPOOL_CONFIG_INDIRECT_SIZE,
+ seg_count *
+ sizeof (vdev_indirect_mapping_entry_phys_t));
+ }
}
if (!vd->vdev_ops->vdev_op_leaf) {
nvlist_t **child;
int c, idx;
ASSERT(!vd->vdev_ishole);
child = kmem_alloc(vd->vdev_children * sizeof (nvlist_t *),
KM_SLEEP);
for (c = 0, idx = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
/*
* If we're generating an nvlist of removing
* vdevs then skip over any device which is
* not being removed.
*/
if ((flags & VDEV_CONFIG_REMOVING) &&
!cvd->vdev_removing)
continue;
child[idx++] = vdev_config_generate(spa, cvd,
getstats, flags);
}
if (idx) {
fnvlist_add_nvlist_array(nv, ZPOOL_CONFIG_CHILDREN,
child, idx);
}
for (c = 0; c < idx; c++)
nvlist_free(child[c]);
kmem_free(child, vd->vdev_children * sizeof (nvlist_t *));
} else {
const char *aux = NULL;
if (vd->vdev_offline && !vd->vdev_tmpoffline)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_OFFLINE, B_TRUE);
if (vd->vdev_resilver_txg != 0)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_RESILVER_TXG,
vd->vdev_resilver_txg);
if (vd->vdev_faulted)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_FAULTED, B_TRUE);
if (vd->vdev_degraded)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_DEGRADED, B_TRUE);
if (vd->vdev_removed)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_REMOVED, B_TRUE);
if (vd->vdev_unspare)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_UNSPARE, B_TRUE);
if (vd->vdev_ishole)
fnvlist_add_uint64(nv, ZPOOL_CONFIG_IS_HOLE, B_TRUE);
/* Set the reason why we're FAULTED/DEGRADED. */
switch (vd->vdev_stat.vs_aux) {
case VDEV_AUX_ERR_EXCEEDED:
aux = "err_exceeded";
break;
case VDEV_AUX_EXTERNAL:
aux = "external";
break;
}
if (aux != NULL && !vd->vdev_tmpoffline) {
fnvlist_add_string(nv, ZPOOL_CONFIG_AUX_STATE, aux);
} else {
/*
* We're healthy - clear any previous AUX_STATE values.
*/
if (nvlist_exists(nv, ZPOOL_CONFIG_AUX_STATE))
nvlist_remove_all(nv, ZPOOL_CONFIG_AUX_STATE);
}
if (vd->vdev_splitting && vd->vdev_orig_guid != 0LL) {
fnvlist_add_uint64(nv, ZPOOL_CONFIG_ORIG_GUID,
vd->vdev_orig_guid);
}
}
return (nv);
}
/*
* Generate a view of the top-level vdevs. If we currently have holes
* in the namespace, then generate an array which contains a list of holey
* vdevs. Additionally, add the number of top-level children that currently
* exist.
*/
void
vdev_top_config_generate(spa_t *spa, nvlist_t *config)
{
vdev_t *rvd = spa->spa_root_vdev;
uint64_t *array;
uint_t c, idx;
array = kmem_alloc(rvd->vdev_children * sizeof (uint64_t), KM_SLEEP);
for (c = 0, idx = 0; c < rvd->vdev_children; c++) {
vdev_t *tvd = rvd->vdev_child[c];
- if (tvd->vdev_ishole)
+ if (tvd->vdev_ishole) {
array[idx++] = c;
+ }
}
if (idx) {
VERIFY(nvlist_add_uint64_array(config, ZPOOL_CONFIG_HOLE_ARRAY,
array, idx) == 0);
}
VERIFY(nvlist_add_uint64(config, ZPOOL_CONFIG_VDEV_CHILDREN,
rvd->vdev_children) == 0);
kmem_free(array, rvd->vdev_children * sizeof (uint64_t));
}
/*
* Returns the configuration from the label of the given vdev. For vdevs
* which don't have a txg value stored on their label (i.e. spares/cache)
* or have not been completely initialized (txg = 0) just return
* the configuration from the first valid label we find. Otherwise,
* find the most up-to-date label that does not exceed the specified
* 'txg' value.
*/
nvlist_t *
vdev_label_read_config(vdev_t *vd, uint64_t txg)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *config = NULL;
vdev_phys_t *vp;
abd_t *vp_abd;
zio_t *zio;
uint64_t best_txg = 0;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(spa_config_held(spa, SCL_STATE_ALL, RW_WRITER) == SCL_STATE_ALL);
if (!vdev_readable(vd))
return (NULL);
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
vp = abd_to_buf(vp_abd);
retry:
for (int l = 0; l < VDEV_LABELS; l++) {
nvlist_t *label = NULL;
zio = zio_root(spa, NULL, NULL, flags);
vdev_label_read(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t), NULL, NULL, flags);
if (zio_wait(zio) == 0 &&
nvlist_unpack(vp->vp_nvlist, sizeof (vp->vp_nvlist),
&label, 0) == 0) {
uint64_t label_txg = 0;
/*
* Auxiliary vdevs won't have txg values in their
* labels and newly added vdevs may not have been
* completely initialized so just return the
* configuration from the first valid label we
* encounter.
*/
error = nvlist_lookup_uint64(label,
ZPOOL_CONFIG_POOL_TXG, &label_txg);
if ((error || label_txg == 0) && !config) {
config = label;
break;
} else if (label_txg <= txg && label_txg > best_txg) {
best_txg = label_txg;
nvlist_free(config);
config = fnvlist_dup(label);
}
}
if (label != NULL) {
nvlist_free(label);
label = NULL;
}
}
if (config == NULL && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
abd_free(vp_abd);
return (config);
}
/*
* Determine if a device is in use. The 'spare_guid' parameter will be filled
* in with the device guid if this spare is active elsewhere on the system.
*/
static boolean_t
vdev_inuse(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason,
uint64_t *spare_guid, uint64_t *l2cache_guid)
{
spa_t *spa = vd->vdev_spa;
uint64_t state, pool_guid, device_guid, txg, spare_pool;
uint64_t vdtxg = 0;
nvlist_t *label;
if (spare_guid)
*spare_guid = 0ULL;
if (l2cache_guid)
*l2cache_guid = 0ULL;
/*
* Read the label, if any, and perform some basic sanity checks.
*/
if ((label = vdev_label_read_config(vd, -1ULL)) == NULL)
return (B_FALSE);
(void) nvlist_lookup_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
&vdtxg);
if (nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_STATE,
&state) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_GUID,
&device_guid) != 0) {
nvlist_free(label);
return (B_FALSE);
}
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_GUID,
&pool_guid) != 0 ||
nvlist_lookup_uint64(label, ZPOOL_CONFIG_POOL_TXG,
&txg) != 0)) {
nvlist_free(label);
return (B_FALSE);
}
nvlist_free(label);
/*
* Check to see if this device indeed belongs to the pool it claims to
* be a part of. The only way this is allowed is if the device is a hot
* spare (which we check for later on).
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
!spa_guid_exists(pool_guid, device_guid) &&
!spa_spare_exists(device_guid, NULL, NULL) &&
!spa_l2cache_exists(device_guid, NULL))
return (B_FALSE);
/*
* If the transaction group is zero, then this an initialized (but
* unused) label. This is only an error if the create transaction
* on-disk is the same as the one we're using now, in which case the
* user has attempted to add the same vdev multiple times in the same
* transaction.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
txg == 0 && vdtxg == crtxg)
return (B_TRUE);
/*
* Check to see if this is a spare device. We do an explicit check for
* spa_has_spare() here because it may be on our pending list of spares
* to add. We also check if it is an l2cache device.
*/
if (spa_spare_exists(device_guid, &spare_pool, NULL) ||
spa_has_spare(spa, device_guid)) {
if (spare_guid)
*spare_guid = device_guid;
switch (reason) {
case VDEV_LABEL_CREATE:
case VDEV_LABEL_L2CACHE:
return (B_TRUE);
case VDEV_LABEL_REPLACE:
return (!spa_has_spare(spa, device_guid) ||
spare_pool != 0ULL);
case VDEV_LABEL_SPARE:
return (spa_has_spare(spa, device_guid));
default:
break;
}
}
/*
* Check to see if this is an l2cache device.
*/
if (spa_l2cache_exists(device_guid, NULL))
return (B_TRUE);
/*
* We can't rely on a pool's state if it's been imported
* read-only. Instead we look to see if the pools is marked
* read-only in the namespace and set the state to active.
*/
if (state != POOL_STATE_SPARE && state != POOL_STATE_L2CACHE &&
(spa = spa_by_guid(pool_guid, device_guid)) != NULL &&
spa_mode(spa) == FREAD)
state = POOL_STATE_ACTIVE;
/*
* If the device is marked ACTIVE, then this device is in use by another
* pool on the system.
*/
return (state == POOL_STATE_ACTIVE);
}
/*
* Initialize a vdev label. We check to make sure each leaf device is not in
* use, and writable. We put down an initial label which we will later
* overwrite with a complete label. Note that it's important to do this
* sequentially, not in parallel, so that we catch cases of multiple use of the
* same leaf vdev in the vdev we're creating -- e.g. mirroring a disk with
* itself.
*/
int
vdev_label_init(vdev_t *vd, uint64_t crtxg, vdev_labeltype_t reason)
{
spa_t *spa = vd->vdev_spa;
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
abd_t *pad2;
uberblock_t *ub;
abd_t *ub_abd;
zio_t *zio;
char *buf;
size_t buflen;
int error;
uint64_t spare_guid = 0, l2cache_guid = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
for (int c = 0; c < vd->vdev_children; c++)
if ((error = vdev_label_init(vd->vdev_child[c],
crtxg, reason)) != 0)
return (error);
/* Track the creation time for this vdev */
vd->vdev_crtxg = crtxg;
if (!vd->vdev_ops->vdev_op_leaf || !spa_writeable(spa))
return (0);
/*
* Dead vdevs cannot be initialized.
*/
if (vdev_is_dead(vd))
return (SET_ERROR(EIO));
/*
* Determine if the vdev is in use.
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPLIT &&
vdev_inuse(vd, crtxg, reason, &spare_guid, &l2cache_guid))
return (SET_ERROR(EBUSY));
/*
* If this is a request to add or replace a spare or l2cache device
* that is in use elsewhere on the system, then we must update the
* guid (which was initialized to a random value) to reflect the
* actual GUID (which is shared between multiple pools).
*/
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_L2CACHE &&
spare_guid != 0ULL) {
uint64_t guid_delta = spare_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding a spare, then it's already
* labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_SPARE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE ||
reason == VDEV_LABEL_SPLIT);
}
if (reason != VDEV_LABEL_REMOVE && reason != VDEV_LABEL_SPARE &&
l2cache_guid != 0ULL) {
uint64_t guid_delta = l2cache_guid - vd->vdev_guid;
vd->vdev_guid += guid_delta;
for (vdev_t *pvd = vd; pvd != NULL; pvd = pvd->vdev_parent)
pvd->vdev_guid_sum += guid_delta;
/*
* If this is a replacement, then we want to fallthrough to the
* rest of the code. If we're adding an l2cache, then it's
* already labeled appropriately and we can just return.
*/
if (reason == VDEV_LABEL_L2CACHE)
return (0);
ASSERT(reason == VDEV_LABEL_REPLACE);
}
/*
* Initialize its label.
*/
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
/*
* Generate a label describing the pool and our top-level vdev.
* We mark it as being from txg 0 to indicate that it's not
* really part of an active pool just yet. The labels will
* be written again with a meaningful txg by spa_sync().
*/
if (reason == VDEV_LABEL_SPARE ||
(reason == VDEV_LABEL_REMOVE && vd->vdev_isspare)) {
/*
* For inactive hot spares, we generate a special label that
* identifies as a mutually shared hot spare. We write the
* label if we are adding a hot spare, or if we are removing an
* active hot spare (in which case we want to revert the
* labels).
*/
VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
POOL_STATE_SPARE) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
} else if (reason == VDEV_LABEL_L2CACHE ||
(reason == VDEV_LABEL_REMOVE && vd->vdev_isl2cache)) {
/*
* For level 2 ARC devices, add a special label.
*/
VERIFY(nvlist_alloc(&label, NV_UNIQUE_NAME, KM_SLEEP) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_VERSION,
spa_version(spa)) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_POOL_STATE,
POOL_STATE_L2CACHE) == 0);
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_GUID,
vd->vdev_guid) == 0);
} else {
uint64_t txg = 0ULL;
if (reason == VDEV_LABEL_SPLIT)
txg = spa->spa_uberblock.ub_txg;
label = spa_config_generate(spa, vd, txg, B_FALSE);
/*
* Add our creation time. This allows us to detect multiple
* vdev uses as described above, and automatically expires if we
* fail.
*/
VERIFY(nvlist_add_uint64(label, ZPOOL_CONFIG_CREATE_TXG,
crtxg) == 0);
}
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
error = nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP);
if (error != 0) {
nvlist_free(label);
abd_free(vp_abd);
/* EFAULT means nvlist_pack ran out of room */
return (SET_ERROR(error == EFAULT ? ENAMETOOLONG : EINVAL));
}
/*
* Initialize uberblock template.
*/
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_RING, B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_RING);
abd_copy_from_buf(ub_abd, &spa->spa_uberblock, sizeof (uberblock_t));
ub = abd_to_buf(ub_abd);
ub->ub_txg = 0;
/* Initialize the 2nd padding area. */
pad2 = abd_alloc_for_io(VDEV_PAD_SIZE, B_TRUE);
abd_zero(pad2, VDEV_PAD_SIZE);
/*
* Write everything in parallel.
*/
retry:
zio = zio_root(spa, NULL, NULL, flags);
for (int l = 0; l < VDEV_LABELS; l++) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t), NULL, NULL, flags);
/*
* Skip the 1st padding area.
* Zero out the 2nd padding area where it might have
* left over data from previous filesystem format.
*/
vdev_label_write(zio, vd, l, pad2,
offsetof(vdev_label_t, vl_pad2),
VDEV_PAD_SIZE, NULL, NULL, flags);
vdev_label_write(zio, vd, l, ub_abd,
offsetof(vdev_label_t, vl_uberblock),
VDEV_UBERBLOCK_RING, NULL, NULL, flags);
}
error = zio_wait(zio);
if (error != 0 && !(flags & ZIO_FLAG_TRYHARD)) {
flags |= ZIO_FLAG_TRYHARD;
goto retry;
}
nvlist_free(label);
abd_free(pad2);
abd_free(ub_abd);
abd_free(vp_abd);
/*
* If this vdev hasn't been previously identified as a spare, then we
* mark it as such only if a) we are labeling it as a spare, or b) it
* exists as a spare elsewhere in the system. Do the same for
* level 2 ARC devices.
*/
if (error == 0 && !vd->vdev_isspare &&
(reason == VDEV_LABEL_SPARE ||
spa_spare_exists(vd->vdev_guid, NULL, NULL)))
spa_spare_add(vd);
if (error == 0 && !vd->vdev_isl2cache &&
(reason == VDEV_LABEL_L2CACHE ||
spa_l2cache_exists(vd->vdev_guid, NULL)))
spa_l2cache_add(vd);
return (error);
}
/*
* ==========================================================================
* uberblock load/sync
* ==========================================================================
*/
/*
* Consider the following situation: txg is safely synced to disk. We've
* written the first uberblock for txg + 1, and then we lose power. When we
* come back up, we fail to see the uberblock for txg + 1 because, say,
* it was on a mirrored device and the replica to which we wrote txg + 1
* is now offline. If we then make some changes and sync txg + 1, and then
* the missing replica comes back, then for a few seconds we'll have two
* conflicting uberblocks on disk with the same txg. The solution is simple:
* among uberblocks with equal txg, choose the one with the latest timestamp.
*/
static int
vdev_uberblock_compare(const uberblock_t *ub1, const uberblock_t *ub2)
{
int cmp = AVL_CMP(ub1->ub_txg, ub2->ub_txg);
if (likely(cmp))
return (cmp);
return (AVL_CMP(ub1->ub_timestamp, ub2->ub_timestamp));
}
struct ubl_cbdata {
uberblock_t *ubl_ubbest; /* Best uberblock */
vdev_t *ubl_vd; /* vdev associated with the above */
};
static void
vdev_uberblock_load_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
spa_t *spa = zio->io_spa;
zio_t *rio = zio->io_private;
uberblock_t *ub = abd_to_buf(zio->io_abd);
struct ubl_cbdata *cbp = rio->io_private;
ASSERT3U(zio->io_size, ==, VDEV_UBERBLOCK_SIZE(vd));
if (zio->io_error == 0 && uberblock_verify(ub) == 0) {
mutex_enter(&rio->io_lock);
if (ub->ub_txg <= spa->spa_load_max_txg &&
vdev_uberblock_compare(ub, cbp->ubl_ubbest) > 0) {
/*
* Keep track of the vdev in which this uberblock
* was found. We will use this information later
* to obtain the config nvlist associated with
* this uberblock.
*/
*cbp->ubl_ubbest = *ub;
cbp->ubl_vd = vd;
}
mutex_exit(&rio->io_lock);
}
abd_free(zio->io_abd);
}
static void
vdev_uberblock_load_impl(zio_t *zio, vdev_t *vd, int flags,
struct ubl_cbdata *cbp)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_load_impl(zio, vd->vdev_child[c], flags, cbp);
if (vd->vdev_ops->vdev_op_leaf && vdev_readable(vd)) {
for (int l = 0; l < VDEV_LABELS; l++) {
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
vdev_label_read(zio, vd, l,
abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd),
B_TRUE), VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_load_done, zio, flags);
}
}
}
}
/*
* Reads the 'best' uberblock from disk along with its associated
* configuration. First, we read the uberblock array of each label of each
* vdev, keeping track of the uberblock with the highest txg in each array.
* Then, we read the configuration from the same vdev as the best uberblock.
*/
void
vdev_uberblock_load(vdev_t *rvd, uberblock_t *ub, nvlist_t **config)
{
zio_t *zio;
spa_t *spa = rvd->vdev_spa;
struct ubl_cbdata cb;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE | ZIO_FLAG_TRYHARD;
ASSERT(ub);
ASSERT(config);
bzero(ub, sizeof (uberblock_t));
*config = NULL;
cb.ubl_ubbest = ub;
cb.ubl_vd = NULL;
spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER);
zio = zio_root(spa, NULL, &cb, flags);
vdev_uberblock_load_impl(zio, rvd, flags, &cb);
(void) zio_wait(zio);
/*
* It's possible that the best uberblock was discovered on a label
* that has a configuration which was written in a future txg.
* Search all labels on this vdev to find the configuration that
* matches the txg for our uberblock.
*/
if (cb.ubl_vd != NULL)
*config = vdev_label_read_config(cb.ubl_vd, ub->ub_txg);
spa_config_exit(spa, SCL_ALL, FTAG);
}
/*
* For use when a leaf vdev is expanded.
* The location of labels 2 and 3 changed, and at the new location the
* uberblock rings are either empty or contain garbage. The sync will write
* new configs there because the vdev is dirty, but expansion also needs the
* uberblock rings copied. Read them from label 0 which did not move.
*
* Since the point is to populate labels {2,3} with valid uberblocks,
* we zero uberblocks we fail to read or which are not valid.
*/
static void
vdev_copy_uberblocks(vdev_t *vd)
{
abd_t *ub_abd;
zio_t *write_zio;
int locks = (SCL_L2ARC | SCL_ZIO);
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL |
ZIO_FLAG_SPECULATIVE;
ASSERT(spa_config_held(vd->vdev_spa, SCL_STATE, RW_READER) ==
SCL_STATE);
ASSERT(vd->vdev_ops->vdev_op_leaf);
spa_config_enter(vd->vdev_spa, locks, FTAG, RW_READER);
ub_abd = abd_alloc_linear(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
write_zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
for (int n = 0; n < VDEV_UBERBLOCK_COUNT(vd); n++) {
const int src_label = 0;
zio_t *zio;
zio = zio_root(vd->vdev_spa, NULL, NULL, flags);
vdev_label_read(zio, vd, src_label, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
NULL, NULL, flags);
if (zio_wait(zio) || uberblock_verify(abd_to_buf(ub_abd)))
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
for (int l = 2; l < VDEV_LABELS; l++)
vdev_label_write(write_zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n),
VDEV_UBERBLOCK_SIZE(vd), NULL, NULL,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
(void) zio_wait(write_zio);
spa_config_exit(vd->vdev_spa, locks, FTAG);
abd_free(ub_abd);
}
/*
* On success, increment root zio's count of good writes.
* We only get credit for writes to known-visible vdevs; see spa_vdev_add().
*/
static void
vdev_uberblock_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0 && zio->io_vd->vdev_top->vdev_ms_array != 0)
atomic_inc_64(good_writes);
}
/*
* Write the uberblock to all labels of all leaves of the specified vdev.
*/
static void
vdev_uberblock_sync(zio_t *zio, uberblock_t *ub, vdev_t *vd, int flags)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_uberblock_sync(zio, ub, vd->vdev_child[c], flags);
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/* If the vdev was expanded, need to copy uberblock rings. */
if (vd->vdev_state == VDEV_STATE_HEALTHY &&
vd->vdev_copy_uberblocks == B_TRUE) {
vdev_copy_uberblocks(vd);
vd->vdev_copy_uberblocks = B_FALSE;
}
int m = spa_multihost(vd->vdev_spa) ? MMP_BLOCKS_PER_LABEL : 0;
int n = ub->ub_txg % (VDEV_UBERBLOCK_COUNT(vd) - m);
/* Copy the uberblock_t into the ABD */
abd_t *ub_abd = abd_alloc_for_io(VDEV_UBERBLOCK_SIZE(vd), B_TRUE);
abd_zero(ub_abd, VDEV_UBERBLOCK_SIZE(vd));
abd_copy_from_buf(ub_abd, ub, sizeof (uberblock_t));
for (int l = 0; l < VDEV_LABELS; l++)
vdev_label_write(zio, vd, l, ub_abd,
VDEV_UBERBLOCK_OFFSET(vd, n), VDEV_UBERBLOCK_SIZE(vd),
vdev_uberblock_sync_done, zio->io_private,
flags | ZIO_FLAG_DONT_PROPAGATE);
abd_free(ub_abd);
}
/* Sync the uberblocks to all vdevs in svd[] */
int
vdev_uberblock_sync_list(vdev_t **svd, int svdcount, uberblock_t *ub, int flags)
{
spa_t *spa = svd[0]->vdev_spa;
zio_t *zio;
uint64_t good_writes = 0;
zio = zio_root(spa, NULL, &good_writes, flags);
for (int v = 0; v < svdcount; v++)
vdev_uberblock_sync(zio, ub, svd[v], flags);
(void) zio_wait(zio);
/*
* Flush the uberblocks to disk. This ensures that the odd labels
* are no longer needed (because the new uberblocks and the even
* labels are safely on disk), so it is safe to overwrite them.
*/
zio = zio_root(spa, NULL, NULL, flags);
- for (int v = 0; v < svdcount; v++)
- zio_flush(zio, svd[v]);
+ for (int v = 0; v < svdcount; v++) {
+ if (vdev_writeable(svd[v])) {
+ zio_flush(zio, svd[v]);
+ }
+ }
(void) zio_wait(zio);
return (good_writes >= 1 ? 0 : EIO);
}
/*
* On success, increment the count of good writes for our top-level vdev.
*/
static void
vdev_label_sync_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (zio->io_error == 0)
atomic_inc_64(good_writes);
}
/*
* If there weren't enough good writes, indicate failure to the parent.
*/
static void
vdev_label_sync_top_done(zio_t *zio)
{
uint64_t *good_writes = zio->io_private;
if (*good_writes == 0)
zio->io_error = SET_ERROR(EIO);
kmem_free(good_writes, sizeof (uint64_t));
}
/*
* We ignore errors for log and cache devices, simply free the private data.
*/
static void
vdev_label_sync_ignore_done(zio_t *zio)
{
kmem_free(zio->io_private, sizeof (uint64_t));
}
/*
* Write all even or odd labels to all leaves of the specified vdev.
*/
static void
vdev_label_sync(zio_t *zio, vdev_t *vd, int l, uint64_t txg, int flags)
{
nvlist_t *label;
vdev_phys_t *vp;
abd_t *vp_abd;
char *buf;
size_t buflen;
for (int c = 0; c < vd->vdev_children; c++)
vdev_label_sync(zio, vd->vdev_child[c], l, txg, flags);
if (!vd->vdev_ops->vdev_op_leaf)
return;
if (!vdev_writeable(vd))
return;
/*
* Generate a label describing the top-level config to which we belong.
*/
label = spa_config_generate(vd->vdev_spa, vd, txg, B_FALSE);
vp_abd = abd_alloc_linear(sizeof (vdev_phys_t), B_TRUE);
abd_zero(vp_abd, sizeof (vdev_phys_t));
vp = abd_to_buf(vp_abd);
buf = vp->vp_nvlist;
buflen = sizeof (vp->vp_nvlist);
if (!nvlist_pack(label, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP)) {
for (; l < VDEV_LABELS; l += 2) {
vdev_label_write(zio, vd, l, vp_abd,
offsetof(vdev_label_t, vl_vdev_phys),
sizeof (vdev_phys_t),
vdev_label_sync_done, zio->io_private,
flags | ZIO_FLAG_DONT_PROPAGATE);
}
}
abd_free(vp_abd);
nvlist_free(label);
}
int
vdev_label_sync_list(spa_t *spa, int l, uint64_t txg, int flags)
{
list_t *dl = &spa->spa_config_dirty_list;
vdev_t *vd;
zio_t *zio;
int error;
/*
* Write the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd)) {
uint64_t *good_writes;
ASSERT(!vd->vdev_ishole);
good_writes = kmem_zalloc(sizeof (uint64_t), KM_SLEEP);
zio_t *vio = zio_null(zio, spa, NULL,
(vd->vdev_islog || vd->vdev_aux != NULL) ?
vdev_label_sync_ignore_done : vdev_label_sync_top_done,
good_writes, flags);
vdev_label_sync(vio, vd, l, txg, flags);
zio_nowait(vio);
}
error = zio_wait(zio);
/*
* Flush the new labels to disk.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = list_head(dl); vd != NULL; vd = list_next(dl, vd))
zio_flush(zio, vd);
(void) zio_wait(zio);
return (error);
}
/*
* Sync the uberblock and any changes to the vdev configuration.
*
* The order of operations is carefully crafted to ensure that
* if the system panics or loses power at any time, the state on disk
* is still transactionally consistent. The in-line comments below
* describe the failure semantics at each stage.
*
* Moreover, vdev_config_sync() is designed to be idempotent: if it fails
* at any time, you can just call it again, and it will resume its work.
*/
int
vdev_config_sync(vdev_t **svd, int svdcount, uint64_t txg)
{
spa_t *spa = svd[0]->vdev_spa;
uberblock_t *ub = &spa->spa_uberblock;
vdev_t *vd;
zio_t *zio;
int error = 0;
int flags = ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_CANFAIL;
retry:
/*
* Normally, we don't want to try too hard to write every label and
* uberblock. If there is a flaky disk, we don't want the rest of the
* sync process to block while we retry. But if we can't write a
* single label out, we should retry with ZIO_FLAG_TRYHARD before
* bailing out and declaring the pool faulted.
*/
if (error != 0) {
if ((flags & ZIO_FLAG_TRYHARD) != 0)
return (error);
flags |= ZIO_FLAG_TRYHARD;
}
ASSERT(ub->ub_txg <= txg);
/*
* If this isn't a resync due to I/O errors,
* and nothing changed in this transaction group,
* and the vdev configuration hasn't changed,
* then there's nothing to do.
*/
if (ub->ub_txg < txg) {
boolean_t changed = uberblock_update(ub, spa->spa_root_vdev,
txg, spa->spa_mmp.mmp_delay);
if (!changed && list_is_empty(&spa->spa_config_dirty_list))
return (0);
}
if (txg > spa_freeze_txg(spa))
return (0);
ASSERT(txg <= spa->spa_final_txg);
/*
* Flush the write cache of every disk that's been written to
* in this transaction group. This ensures that all blocks
* written in this txg will be committed to stable storage
* before any uberblock that references them.
*/
zio = zio_root(spa, NULL, NULL, flags);
for (vd = txg_list_head(&spa->spa_vdev_txg_list, TXG_CLEAN(txg)); vd;
vd = txg_list_next(&spa->spa_vdev_txg_list, vd, TXG_CLEAN(txg)))
zio_flush(zio, vd);
(void) zio_wait(zio);
/*
* Sync out the even labels (L0, L2) for every dirty vdev. If the
* system dies in the middle of this process, that's OK: all of the
* even labels that made it to disk will be newer than any uberblock,
* and will therefore be considered invalid. The odd labels (L1, L3),
* which have not yet been touched, will still be valid. We flush
* the new labels to disk to ensure that all even-label updates
* are committed to stable storage before the uberblock update.
*/
if ((error = vdev_label_sync_list(spa, 0, txg, flags)) != 0)
goto retry;
/*
* Sync the uberblocks to all vdevs in svd[].
* If the system dies in the middle of this step, there are two cases
* to consider, and the on-disk state is consistent either way:
*
* (1) If none of the new uberblocks made it to disk, then the
* previous uberblock will be the newest, and the odd labels
* (which had not yet been touched) will be valid with respect
* to that uberblock.
*
* (2) If one or more new uberblocks made it to disk, then they
* will be the newest, and the even labels (which had all
* been successfully committed) will be valid with respect
* to the new uberblocks.
*/
if ((error = vdev_uberblock_sync_list(svd, svdcount, ub, flags)) != 0)
goto retry;
if (spa_multihost(spa))
mmp_update_uberblock(spa, ub);
/*
* Sync out odd labels for every dirty vdev. If the system dies
* in the middle of this process, the even labels and the new
* uberblocks will suffice to open the pool. The next time
* the pool is opened, the first thing we'll do -- before any
* user data is modified -- is mark every vdev dirty so that
* all labels will be brought up to date. We flush the new labels
* to disk to ensure that all odd-label updates are committed to
* stable storage before the next transaction group begins.
*/
if ((error = vdev_label_sync_list(spa, 1, txg, flags)) != 0)
goto retry;
return (0);
}
diff --git a/module/zfs/vdev_mirror.c b/module/zfs/vdev_mirror.c
index 1d5adce178b9..b56f955eb01b 100644
--- a/module/zfs/vdev_mirror.c
+++ b/module/zfs/vdev_mirror.c
@@ -1,738 +1,741 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
/*
* Vdev mirror kstats
*/
static kstat_t *mirror_ksp = NULL;
typedef struct mirror_stats {
kstat_named_t vdev_mirror_stat_rotating_linear;
kstat_named_t vdev_mirror_stat_rotating_offset;
kstat_named_t vdev_mirror_stat_rotating_seek;
kstat_named_t vdev_mirror_stat_non_rotating_linear;
kstat_named_t vdev_mirror_stat_non_rotating_seek;
kstat_named_t vdev_mirror_stat_preferred_found;
kstat_named_t vdev_mirror_stat_preferred_not_found;
} mirror_stats_t;
static mirror_stats_t mirror_stats = {
/* New I/O follows directly the last I/O */
{ "rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O is within zfs_vdev_mirror_rotating_seek_offset of the last */
{ "rotating_offset", KSTAT_DATA_UINT64 },
/* New I/O requires random seek */
{ "rotating_seek", KSTAT_DATA_UINT64 },
/* New I/O follows directly the last I/O (nonrot) */
{ "non_rotating_linear", KSTAT_DATA_UINT64 },
/* New I/O requires random seek (nonrot) */
{ "non_rotating_seek", KSTAT_DATA_UINT64 },
/* Preferred child vdev found */
{ "preferred_found", KSTAT_DATA_UINT64 },
/* Preferred child vdev not found or equal load */
{ "preferred_not_found", KSTAT_DATA_UINT64 },
};
#define MIRROR_STAT(stat) (mirror_stats.stat.value.ui64)
#define MIRROR_INCR(stat, val) atomic_add_64(&MIRROR_STAT(stat), val)
#define MIRROR_BUMP(stat) MIRROR_INCR(stat, 1)
void
vdev_mirror_stat_init(void)
{
mirror_ksp = kstat_create("zfs", 0, "vdev_mirror_stats",
"misc", KSTAT_TYPE_NAMED,
sizeof (mirror_stats) / sizeof (kstat_named_t), KSTAT_FLAG_VIRTUAL);
if (mirror_ksp != NULL) {
mirror_ksp->ks_data = &mirror_stats;
kstat_install(mirror_ksp);
}
}
void
vdev_mirror_stat_fini(void)
{
if (mirror_ksp != NULL) {
kstat_delete(mirror_ksp);
mirror_ksp = NULL;
}
}
/*
* Virtual device vector for mirroring.
*/
typedef struct mirror_child {
vdev_t *mc_vd;
uint64_t mc_offset;
int mc_error;
int mc_load;
uint8_t mc_tried;
uint8_t mc_skipped;
uint8_t mc_speculative;
} mirror_child_t;
typedef struct mirror_map {
int *mm_preferred;
int mm_preferred_cnt;
int mm_children;
boolean_t mm_replacing;
boolean_t mm_root;
mirror_child_t mm_child[];
} mirror_map_t;
static int vdev_mirror_shift = 21;
/*
* The load configuration settings below are tuned by default for
* the case where all devices are of the same rotational type.
*
* If there is a mixture of rotating and non-rotating media, setting
* zfs_vdev_mirror_non_rotating_seek_inc to 0 may well provide better results
* as it will direct more reads to the non-rotating vdevs which are more likely
* to have a higher performance.
*/
/* Rotating media load calculation configuration. */
static int zfs_vdev_mirror_rotating_inc = 0;
static int zfs_vdev_mirror_rotating_seek_inc = 5;
static int zfs_vdev_mirror_rotating_seek_offset = 1 * 1024 * 1024;
/* Non-rotating media load calculation configuration. */
static int zfs_vdev_mirror_non_rotating_inc = 0;
static int zfs_vdev_mirror_non_rotating_seek_inc = 1;
static inline size_t
vdev_mirror_map_size(int children)
{
return (offsetof(mirror_map_t, mm_child[children]) +
sizeof (int) * children);
}
static inline mirror_map_t *
vdev_mirror_map_alloc(int children, boolean_t replacing, boolean_t root)
{
mirror_map_t *mm;
mm = kmem_zalloc(vdev_mirror_map_size(children), KM_SLEEP);
mm->mm_children = children;
mm->mm_replacing = replacing;
mm->mm_root = root;
mm->mm_preferred = (int *)((uintptr_t)mm +
offsetof(mirror_map_t, mm_child[children]));
return (mm);
}
static void
vdev_mirror_map_free(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
kmem_free(mm, vdev_mirror_map_size(mm->mm_children));
}
static const zio_vsd_ops_t vdev_mirror_vsd_ops = {
.vsd_free = vdev_mirror_map_free,
.vsd_cksum_report = zio_vsd_default_cksum_report
};
static int
vdev_mirror_load(mirror_map_t *mm, vdev_t *vd, uint64_t zio_offset)
{
uint64_t last_offset;
int64_t offset_diff;
int load;
/* All DVAs have equal weight at the root. */
if (mm->mm_root)
return (INT_MAX);
/*
* We don't return INT_MAX if the device is resilvering i.e.
* vdev_resilver_txg != 0 as when tested performance was slightly
* worse overall when resilvering with compared to without.
*/
/* Fix zio_offset for leaf vdevs */
if (vd->vdev_ops->vdev_op_leaf)
zio_offset += VDEV_LABEL_START_SIZE;
/* Standard load based on pending queue length. */
load = vdev_queue_length(vd);
last_offset = vdev_queue_last_offset(vd);
if (vd->vdev_nonrot) {
/* Non-rotating media. */
if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_non_rotating_linear);
return (load + zfs_vdev_mirror_non_rotating_inc);
}
/*
* Apply a seek penalty even for non-rotating devices as
* sequential I/O's can be aggregated into fewer operations on
* the device, thus avoiding unnecessary per-command overhead
* and boosting performance.
*/
MIRROR_BUMP(vdev_mirror_stat_non_rotating_seek);
return (load + zfs_vdev_mirror_non_rotating_seek_inc);
}
/* Rotating media I/O's which directly follow the last I/O. */
if (last_offset == zio_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_linear);
return (load + zfs_vdev_mirror_rotating_inc);
}
/*
* Apply half the seek increment to I/O's within seek offset
* of the last I/O issued to this vdev as they should incur less
* of a seek increment.
*/
offset_diff = (int64_t)(last_offset - zio_offset);
if (ABS(offset_diff) < zfs_vdev_mirror_rotating_seek_offset) {
MIRROR_BUMP(vdev_mirror_stat_rotating_offset);
return (load + (zfs_vdev_mirror_rotating_seek_inc / 2));
}
/* Apply the full seek increment to all other I/O's. */
MIRROR_BUMP(vdev_mirror_stat_rotating_seek);
return (load + zfs_vdev_mirror_rotating_seek_inc);
}
/*
* Avoid inlining the function to keep vdev_mirror_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline static mirror_map_t *
vdev_mirror_map_init(zio_t *zio)
{
mirror_map_t *mm = NULL;
mirror_child_t *mc;
vdev_t *vd = zio->io_vd;
int c;
if (vd == NULL) {
dva_t *dva = zio->io_bp->blk_dva;
spa_t *spa = zio->io_spa;
mm = vdev_mirror_map_alloc(BP_GET_NDVAS(zio->io_bp), B_FALSE,
B_TRUE);
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
mc->mc_vd = vdev_lookup_top(spa, DVA_GET_VDEV(&dva[c]));
mc->mc_offset = DVA_GET_OFFSET(&dva[c]);
}
} else {
mm = vdev_mirror_map_alloc(vd->vdev_children,
(vd->vdev_ops == &vdev_replacing_ops ||
vd->vdev_ops == &vdev_spare_ops), B_FALSE);
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
mc->mc_vd = vd->vdev_child[c];
mc->mc_offset = zio->io_offset;
}
}
zio->io_vsd = mm;
zio->io_vsd_ops = &vdev_mirror_vsd_ops;
return (mm);
}
static int
vdev_mirror_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *ashift)
{
int numerrors = 0;
int lasterror = 0;
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*ashift = MAX(*ashift, cvd->vdev_ashift);
}
if (numerrors == vd->vdev_children) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_mirror_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_close(vd->vdev_child[c]);
}
static void
vdev_mirror_child_done(zio_t *zio)
{
mirror_child_t *mc = zio->io_private;
mc->mc_error = zio->io_error;
mc->mc_tried = 1;
mc->mc_skipped = 0;
}
static void
vdev_mirror_scrub_done(zio_t *zio)
{
mirror_child_t *mc = zio->io_private;
if (zio->io_error == 0) {
zio_t *pio;
zio_link_t *zl = NULL;
mutex_enter(&zio->io_lock);
while ((pio = zio_walk_parents(zio, &zl)) != NULL) {
mutex_enter(&pio->io_lock);
ASSERT3U(zio->io_size, >=, pio->io_size);
abd_copy(pio->io_abd, zio->io_abd, pio->io_size);
mutex_exit(&pio->io_lock);
}
mutex_exit(&zio->io_lock);
}
abd_free(zio->io_abd);
mc->mc_error = zio->io_error;
mc->mc_tried = 1;
mc->mc_skipped = 0;
}
/*
* Check the other, lower-index DVAs to see if they're on the same
* vdev as the child we picked. If they are, use them since they
* are likely to have been allocated from the primary metaslab in
* use at the time, and hence are more likely to have locality with
* single-copy data.
*/
static int
vdev_mirror_dva_select(zio_t *zio, int p)
{
dva_t *dva = zio->io_bp->blk_dva;
mirror_map_t *mm = zio->io_vsd;
int preferred;
int c;
preferred = mm->mm_preferred[p];
for (p--; p >= 0; p--) {
c = mm->mm_preferred[p];
if (DVA_GET_VDEV(&dva[c]) == DVA_GET_VDEV(&dva[preferred]))
preferred = c;
}
return (preferred);
}
static int
vdev_mirror_preferred_child_randomize(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
int p;
if (mm->mm_root) {
p = spa_get_random(mm->mm_preferred_cnt);
return (vdev_mirror_dva_select(zio, p));
}
/*
* To ensure we don't always favour the first matching vdev,
* which could lead to wear leveling issues on SSD's, we
* use the I/O offset as a pseudo random seed into the vdevs
* which have the lowest load.
*/
p = (zio->io_offset >> vdev_mirror_shift) % mm->mm_preferred_cnt;
return (mm->mm_preferred[p]);
}
/*
* Try to find a vdev whose DTL doesn't contain the block we want to read
* prefering vdevs based on determined load.
*
* Try to find a child whose DTL doesn't contain the block we want to read.
* If we can't, try the read on any vdev we haven't already tried.
*/
static int
vdev_mirror_child_select(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
uint64_t txg = zio->io_txg;
int c, lowest_load;
ASSERT(zio->io_bp == NULL || BP_PHYSICAL_BIRTH(zio->io_bp) == txg);
lowest_load = INT_MAX;
mm->mm_preferred_cnt = 0;
for (c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc;
mc = &mm->mm_child[c];
if (mc->mc_tried || mc->mc_skipped)
continue;
if (mc->mc_vd == NULL || !vdev_readable(mc->mc_vd)) {
mc->mc_error = SET_ERROR(ENXIO);
mc->mc_tried = 1; /* don't even try */
mc->mc_skipped = 1;
continue;
}
if (vdev_dtl_contains(mc->mc_vd, DTL_MISSING, txg, 1)) {
mc->mc_error = SET_ERROR(ESTALE);
mc->mc_skipped = 1;
mc->mc_speculative = 1;
continue;
}
mc->mc_load = vdev_mirror_load(mm, mc->mc_vd, mc->mc_offset);
if (mc->mc_load > lowest_load)
continue;
if (mc->mc_load < lowest_load) {
lowest_load = mc->mc_load;
mm->mm_preferred_cnt = 0;
}
mm->mm_preferred[mm->mm_preferred_cnt] = c;
mm->mm_preferred_cnt++;
}
if (mm->mm_preferred_cnt == 1) {
MIRROR_BUMP(vdev_mirror_stat_preferred_found);
return (mm->mm_preferred[0]);
}
if (mm->mm_preferred_cnt > 1) {
MIRROR_BUMP(vdev_mirror_stat_preferred_not_found);
return (vdev_mirror_preferred_child_randomize(zio));
}
/*
* Every device is either missing or has this txg in its DTL.
* Look for any child we haven't already tried before giving up.
*/
for (c = 0; c < mm->mm_children; c++) {
if (!mm->mm_child[c].mc_tried)
return (c);
}
/*
* Every child failed. There's no place left to look.
*/
return (-1);
}
static void
vdev_mirror_io_start(zio_t *zio)
{
mirror_map_t *mm;
mirror_child_t *mc;
int c, children;
mm = vdev_mirror_map_init(zio);
if (zio->io_type == ZIO_TYPE_READ) {
if ((zio->io_flags & ZIO_FLAG_SCRUB) && !mm->mm_replacing) {
/*
* For scrubbing reads we need to allocate a read
* buffer for each child and issue reads to all
* children. If any child succeeds, it will copy its
* data into zio->io_data in vdev_mirror_scrub_done.
*/
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset,
abd_alloc_sametype(zio->io_abd,
zio->io_size), zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_mirror_scrub_done, mc));
}
zio_execute(zio);
return;
}
/*
* For normal reads just pick one child.
*/
c = vdev_mirror_child_select(zio);
children = (c >= 0);
} else {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
/*
* Writes go to all children.
*/
c = 0;
children = mm->mm_children;
}
while (children--) {
mc = &mm->mm_child[c];
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
zio->io_type, zio->io_priority, 0,
vdev_mirror_child_done, mc));
c++;
}
zio_execute(zio);
}
static int
vdev_mirror_worst_error(mirror_map_t *mm)
{
int error[2] = { 0, 0 };
for (int c = 0; c < mm->mm_children; c++) {
mirror_child_t *mc = &mm->mm_child[c];
int s = mc->mc_speculative;
error[s] = zio_worst_error(error[s], mc->mc_error);
}
return (error[0] ? error[0] : error[1]);
}
static void
vdev_mirror_io_done(zio_t *zio)
{
mirror_map_t *mm = zio->io_vsd;
mirror_child_t *mc;
int c;
int good_copies = 0;
int unexpected_errors = 0;
for (c = 0; c < mm->mm_children; c++) {
mc = &mm->mm_child[c];
if (mc->mc_error) {
if (!mc->mc_skipped)
unexpected_errors++;
} else if (mc->mc_tried) {
good_copies++;
}
}
if (zio->io_type == ZIO_TYPE_WRITE) {
/*
* XXX -- for now, treat partial writes as success.
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
/* XXPOLICY */
if (good_copies != mm->mm_children) {
/*
* Always require at least one good copy.
*
* For ditto blocks (io_vd == NULL), require
* all copies to be good.
*
* XXX -- for replacing vdevs, there's no great answer.
* If the old device is really dead, we may not even
* be able to access it -- so we only want to
* require good writes to the new device. But if
* the new device turns out to be flaky, we want
* to be able to detach it -- which requires all
* writes to the old device to have succeeded.
*/
if (good_copies == 0 || zio->io_vd == NULL)
zio->io_error = vdev_mirror_worst_error(mm);
}
return;
}
ASSERT(zio->io_type == ZIO_TYPE_READ);
/*
* If we don't have a good copy yet, keep trying other children.
*/
/* XXPOLICY */
if (good_copies == 0 && (c = vdev_mirror_child_select(zio)) != -1) {
ASSERT(c >= 0 && c < mm->mm_children);
mc = &mm->mm_child[c];
zio_vdev_io_redone(zio);
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset, zio->io_abd, zio->io_size,
ZIO_TYPE_READ, zio->io_priority, 0,
vdev_mirror_child_done, mc));
return;
}
/* XXPOLICY */
if (good_copies == 0) {
zio->io_error = vdev_mirror_worst_error(mm);
ASSERT(zio->io_error != 0);
}
if (good_copies && spa_writeable(zio->io_spa) &&
(unexpected_errors ||
(zio->io_flags & ZIO_FLAG_RESILVER) ||
((zio->io_flags & ZIO_FLAG_SCRUB) && mm->mm_replacing))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (c = 0; c < mm->mm_children; c++) {
/*
* Don't rewrite known good children.
* Not only is it unnecessary, it could
* actually be harmful: if the system lost
* power while rewriting the only good copy,
* there would be no good copies left!
*/
mc = &mm->mm_child[c];
if (mc->mc_error == 0) {
if (mc->mc_tried)
continue;
if (!(zio->io_flags & ZIO_FLAG_SCRUB) &&
!vdev_dtl_contains(mc->mc_vd, DTL_PARTIAL,
zio->io_txg, 1))
continue;
mc->mc_error = SET_ERROR(ESTALE);
}
zio_nowait(zio_vdev_child_io(zio, zio->io_bp,
mc->mc_vd, mc->mc_offset,
zio->io_abd, zio->io_size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
}
static void
vdev_mirror_state_change(vdev_t *vd, int faulted, int degraded)
{
if (faulted == vd->vdev_children)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
vdev_ops_t vdev_mirror_ops = {
vdev_mirror_open,
vdev_mirror_close,
vdev_default_asize,
vdev_mirror_io_start,
vdev_mirror_io_done,
vdev_mirror_state_change,
NULL,
NULL,
NULL,
+ NULL,
VDEV_TYPE_MIRROR, /* name of this vdev type */
B_FALSE /* not a leaf vdev */
};
vdev_ops_t vdev_replacing_ops = {
vdev_mirror_open,
vdev_mirror_close,
vdev_default_asize,
vdev_mirror_io_start,
vdev_mirror_io_done,
vdev_mirror_state_change,
NULL,
NULL,
NULL,
+ NULL,
VDEV_TYPE_REPLACING, /* name of this vdev type */
B_FALSE /* not a leaf vdev */
};
vdev_ops_t vdev_spare_ops = {
vdev_mirror_open,
vdev_mirror_close,
vdev_default_asize,
vdev_mirror_io_start,
vdev_mirror_io_done,
vdev_mirror_state_change,
NULL,
NULL,
NULL,
+ NULL,
VDEV_TYPE_SPARE, /* name of this vdev type */
B_FALSE /* not a leaf vdev */
};
#if defined(_KERNEL) && defined(HAVE_SPL)
/* BEGIN CSTYLED */
module_param(zfs_vdev_mirror_rotating_inc, int, 0644);
MODULE_PARM_DESC(zfs_vdev_mirror_rotating_inc,
"Rotating media load increment for non-seeking I/O's");
module_param(zfs_vdev_mirror_rotating_seek_inc, int, 0644);
MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_inc,
"Rotating media load increment for seeking I/O's");
module_param(zfs_vdev_mirror_rotating_seek_offset, int, 0644);
MODULE_PARM_DESC(zfs_vdev_mirror_rotating_seek_offset,
"Offset in bytes from the last I/O which "
"triggers a reduced rotating media seek increment");
module_param(zfs_vdev_mirror_non_rotating_inc, int, 0644);
MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_inc,
"Non-rotating media load increment for non-seeking I/O's");
module_param(zfs_vdev_mirror_non_rotating_seek_inc, int, 0644);
MODULE_PARM_DESC(zfs_vdev_mirror_non_rotating_seek_inc,
"Non-rotating media load increment for seeking I/O's");
/* END CSTYLED */
#endif
diff --git a/module/zfs/vdev_missing.c b/module/zfs/vdev_missing.c
index d7d017fb8fbe..b1c039f167f2 100644
--- a/module/zfs/vdev_missing.c
+++ b/module/zfs/vdev_missing.c
@@ -1,108 +1,110 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
/*
* The 'missing' vdev is a special vdev type used only during import. It
* signifies a placeholder in the root vdev for some vdev that we know is
* missing. We pass it down to the kernel to allow the rest of the
* configuration to parsed and an attempt made to open all available devices.
* Because its GUID is always 0, we know that the guid sum will mismatch and we
* won't be able to open the pool anyway.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/fs/zfs.h>
#include <sys/zio.h>
/* ARGSUSED */
static int
vdev_missing_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
uint64_t *ashift)
{
/*
* Really this should just fail. But then the root vdev will be in the
* faulted state with VDEV_AUX_NO_REPLICAS, when what we really want is
* VDEV_AUX_BAD_GUID_SUM. So we pretend to succeed, knowing that we
* will fail the GUID sum check before ever trying to open the pool.
*/
*psize = 0;
*max_psize = 0;
*ashift = 0;
return (0);
}
/* ARGSUSED */
static void
vdev_missing_close(vdev_t *vd)
{
}
/* ARGSUSED */
static void
vdev_missing_io_start(zio_t *zio)
{
zio->io_error = SET_ERROR(ENOTSUP);
zio_execute(zio);
}
/* ARGSUSED */
static void
vdev_missing_io_done(zio_t *zio)
{
}
vdev_ops_t vdev_missing_ops = {
vdev_missing_open,
vdev_missing_close,
vdev_default_asize,
vdev_missing_io_start,
vdev_missing_io_done,
NULL,
NULL,
NULL,
NULL,
+ NULL,
VDEV_TYPE_MISSING, /* name of this vdev type */
B_TRUE /* leaf vdev */
};
vdev_ops_t vdev_hole_ops = {
vdev_missing_open,
vdev_missing_close,
vdev_default_asize,
vdev_missing_io_start,
vdev_missing_io_done,
NULL,
NULL,
NULL,
NULL,
+ NULL,
VDEV_TYPE_HOLE, /* name of this vdev type */
B_TRUE /* leaf vdev */
};
diff --git a/module/zfs/vdev_queue.c b/module/zfs/vdev_queue.c
index 5d2c98013364..3ac31a87278a 100644
--- a/module/zfs/vdev_queue.c
+++ b/module/zfs/vdev_queue.c
@@ -1,938 +1,948 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2009 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
* Copyright (c) 2012, 2017 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/vdev_impl.h>
#include <sys/spa_impl.h>
#include <sys/zio.h>
#include <sys/avl.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab_impl.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/kstat.h>
#include <sys/abd.h>
/*
* ZFS I/O Scheduler
* ---------------
*
* ZFS issues I/O operations to leaf vdevs to satisfy and complete zios. The
* I/O scheduler determines when and in what order those operations are
* issued. The I/O scheduler divides operations into five I/O classes
* prioritized in the following order: sync read, sync write, async read,
* async write, and scrub/resilver. Each queue defines the minimum and
* maximum number of concurrent operations that may be issued to the device.
* In addition, the device has an aggregate maximum. Note that the sum of the
* per-queue minimums must not exceed the aggregate maximum. If the
* sum of the per-queue maximums exceeds the aggregate maximum, then the
* number of active i/os may reach zfs_vdev_max_active, in which case no
* further i/os will be issued regardless of whether all per-queue
* minimums have been met.
*
* For many physical devices, throughput increases with the number of
* concurrent operations, but latency typically suffers. Further, physical
* devices typically have a limit at which more concurrent operations have no
* effect on throughput or can actually cause it to decrease.
*
* The scheduler selects the next operation to issue by first looking for an
* I/O class whose minimum has not been satisfied. Once all are satisfied and
* the aggregate maximum has not been hit, the scheduler looks for classes
* whose maximum has not been satisfied. Iteration through the I/O classes is
* done in the order specified above. No further operations are issued if the
* aggregate maximum number of concurrent operations has been hit or if there
* are no operations queued for an I/O class that has not hit its maximum.
* Every time an i/o is queued or an operation completes, the I/O scheduler
* looks for new operations to issue.
*
* All I/O classes have a fixed maximum number of outstanding operations
* except for the async write class. Asynchronous writes represent the data
* that is committed to stable storage during the syncing stage for
* transaction groups (see txg.c). Transaction groups enter the syncing state
* periodically so the number of queued async writes will quickly burst up and
* then bleed down to zero. Rather than servicing them as quickly as possible,
* the I/O scheduler changes the maximum number of active async write i/os
* according to the amount of dirty data in the pool (see dsl_pool.c). Since
* both throughput and latency typically increase with the number of
* concurrent operations issued to physical devices, reducing the burstiness
* in the number of concurrent operations also stabilizes the response time of
* operations from other -- and in particular synchronous -- queues. In broad
* strokes, the I/O scheduler will issue more concurrent operations from the
* async write queue as there's more dirty data in the pool.
*
* Async Writes
*
* The number of concurrent operations issued for the async write I/O class
* follows a piece-wise linear function defined by a few adjustable points.
*
* | o---------| <-- zfs_vdev_async_write_max_active
* ^ | /^ |
* | | / | |
* active | / | |
* I/O | / | |
* count | / | |
* | / | |
* |------------o | | <-- zfs_vdev_async_write_min_active
* 0|____________^______|_________|
* 0% | | 100% of zfs_dirty_data_max
* | |
* | `-- zfs_vdev_async_write_active_max_dirty_percent
* `--------- zfs_vdev_async_write_active_min_dirty_percent
*
* Until the amount of dirty data exceeds a minimum percentage of the dirty
* data allowed in the pool, the I/O scheduler will limit the number of
* concurrent operations to the minimum. As that threshold is crossed, the
* number of concurrent operations issued increases linearly to the maximum at
* the specified maximum percentage of the dirty data allowed in the pool.
*
* Ideally, the amount of dirty data on a busy pool will stay in the sloped
* part of the function between zfs_vdev_async_write_active_min_dirty_percent
* and zfs_vdev_async_write_active_max_dirty_percent. If it exceeds the
* maximum percentage, this indicates that the rate of incoming data is
* greater than the rate that the backend storage can handle. In this case, we
* must further throttle incoming writes (see dmu_tx_delay() for details).
*/
/*
* The maximum number of i/os active to each device. Ideally, this will be >=
* the sum of each queue's max_active. It must be at least the sum of each
* queue's min_active.
*/
uint32_t zfs_vdev_max_active = 1000;
/*
* Per-queue limits on the number of i/os active to each device. If the
* number of active i/os is < zfs_vdev_max_active, then the min_active comes
* into play. We will send min_active from each queue, and then select from
* queues in the order defined by zio_priority_t.
*
* In general, smaller max_active's will lead to lower latency of synchronous
* operations. Larger max_active's may lead to higher overall throughput,
* depending on underlying storage.
*
* The ratio of the queues' max_actives determines the balance of performance
* between reads, writes, and scrubs. E.g., increasing
* zfs_vdev_scrub_max_active will cause the scrub or resilver to complete
* more quickly, but reads and writes to have higher latency and lower
* throughput.
*/
uint32_t zfs_vdev_sync_read_min_active = 10;
uint32_t zfs_vdev_sync_read_max_active = 10;
uint32_t zfs_vdev_sync_write_min_active = 10;
uint32_t zfs_vdev_sync_write_max_active = 10;
uint32_t zfs_vdev_async_read_min_active = 1;
uint32_t zfs_vdev_async_read_max_active = 3;
uint32_t zfs_vdev_async_write_min_active = 2;
uint32_t zfs_vdev_async_write_max_active = 10;
uint32_t zfs_vdev_scrub_min_active = 1;
uint32_t zfs_vdev_scrub_max_active = 2;
+uint32_t zfs_vdev_removal_min_active = 1;
+uint32_t zfs_vdev_removal_max_active = 2;
/*
* When the pool has less than zfs_vdev_async_write_active_min_dirty_percent
* dirty data, use zfs_vdev_async_write_min_active. When it has more than
* zfs_vdev_async_write_active_max_dirty_percent, use
* zfs_vdev_async_write_max_active. The value is linearly interpolated
* between min and max.
*/
int zfs_vdev_async_write_active_min_dirty_percent = 30;
int zfs_vdev_async_write_active_max_dirty_percent = 60;
/*
* To reduce IOPs, we aggregate small adjacent I/Os into one large I/O.
* For read I/Os, we also aggregate across small adjacency gaps; for writes
* we include spans of optional I/Os to aid aggregation at the disk even when
* they aren't able to help us aggregate at this level.
*/
int zfs_vdev_aggregation_limit = 1 << 20;
int zfs_vdev_read_gap_limit = 32 << 10;
int zfs_vdev_write_gap_limit = 4 << 10;
/*
* Define the queue depth percentage for each top-level. This percentage is
* used in conjunction with zfs_vdev_async_max_active to determine how many
* allocations a specific top-level vdev should handle. Once the queue depth
* reaches zfs_vdev_queue_depth_pct * zfs_vdev_async_write_max_active / 100
* then allocator will stop allocating blocks on that top-level device.
* The default kernel setting is 1000% which will yield 100 allocations per
* device. For userland testing, the default setting is 300% which equates
* to 30 allocations per device.
*/
#ifdef _KERNEL
int zfs_vdev_queue_depth_pct = 1000;
#else
int zfs_vdev_queue_depth_pct = 300;
#endif
int
vdev_queue_offset_compare(const void *x1, const void *x2)
{
const zio_t *z1 = (const zio_t *)x1;
const zio_t *z2 = (const zio_t *)x2;
int cmp = AVL_CMP(z1->io_offset, z2->io_offset);
if (likely(cmp))
return (cmp);
return (AVL_PCMP(z1, z2));
}
static inline avl_tree_t *
vdev_queue_class_tree(vdev_queue_t *vq, zio_priority_t p)
{
return (&vq->vq_class[p].vqc_queued_tree);
}
static inline avl_tree_t *
vdev_queue_type_tree(vdev_queue_t *vq, zio_type_t t)
{
ASSERT(t == ZIO_TYPE_READ || t == ZIO_TYPE_WRITE);
if (t == ZIO_TYPE_READ)
return (&vq->vq_read_offset_tree);
else
return (&vq->vq_write_offset_tree);
}
int
vdev_queue_timestamp_compare(const void *x1, const void *x2)
{
const zio_t *z1 = (const zio_t *)x1;
const zio_t *z2 = (const zio_t *)x2;
int cmp = AVL_CMP(z1->io_timestamp, z2->io_timestamp);
if (likely(cmp))
return (cmp);
return (AVL_PCMP(z1, z2));
}
static int
vdev_queue_class_min_active(zio_priority_t p)
{
switch (p) {
case ZIO_PRIORITY_SYNC_READ:
return (zfs_vdev_sync_read_min_active);
case ZIO_PRIORITY_SYNC_WRITE:
return (zfs_vdev_sync_write_min_active);
case ZIO_PRIORITY_ASYNC_READ:
return (zfs_vdev_async_read_min_active);
case ZIO_PRIORITY_ASYNC_WRITE:
return (zfs_vdev_async_write_min_active);
case ZIO_PRIORITY_SCRUB:
return (zfs_vdev_scrub_min_active);
+ case ZIO_PRIORITY_REMOVAL:
+ return (zfs_vdev_removal_min_active);
default:
panic("invalid priority %u", p);
return (0);
}
}
static int
vdev_queue_max_async_writes(spa_t *spa)
{
int writes;
uint64_t dirty = 0;
dsl_pool_t *dp = spa_get_dsl(spa);
uint64_t min_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_min_dirty_percent / 100;
uint64_t max_bytes = zfs_dirty_data_max *
zfs_vdev_async_write_active_max_dirty_percent / 100;
/*
* Async writes may occur before the assignment of the spa's
* dsl_pool_t if a self-healing zio is issued prior to the
* completion of dmu_objset_open_impl().
*/
if (dp == NULL)
return (zfs_vdev_async_write_max_active);
/*
* Sync tasks correspond to interactive user actions. To reduce the
* execution time of those actions we push data out as fast as possible.
*/
if (spa_has_pending_synctask(spa))
return (zfs_vdev_async_write_max_active);
dirty = dp->dp_dirty_total;
if (dirty < min_bytes)
return (zfs_vdev_async_write_min_active);
if (dirty > max_bytes)
return (zfs_vdev_async_write_max_active);
/*
* linear interpolation:
* slope = (max_writes - min_writes) / (max_bytes - min_bytes)
* move right by min_bytes
* move up by min_writes
*/
writes = (dirty - min_bytes) *
(zfs_vdev_async_write_max_active -
zfs_vdev_async_write_min_active) /
(max_bytes - min_bytes) +
zfs_vdev_async_write_min_active;
ASSERT3U(writes, >=, zfs_vdev_async_write_min_active);
ASSERT3U(writes, <=, zfs_vdev_async_write_max_active);
return (writes);
}
static int
vdev_queue_class_max_active(spa_t *spa, zio_priority_t p)
{
switch (p) {
case ZIO_PRIORITY_SYNC_READ:
return (zfs_vdev_sync_read_max_active);
case ZIO_PRIORITY_SYNC_WRITE:
return (zfs_vdev_sync_write_max_active);
case ZIO_PRIORITY_ASYNC_READ:
return (zfs_vdev_async_read_max_active);
case ZIO_PRIORITY_ASYNC_WRITE:
return (vdev_queue_max_async_writes(spa));
case ZIO_PRIORITY_SCRUB:
return (zfs_vdev_scrub_max_active);
+ case ZIO_PRIORITY_REMOVAL:
+ return (zfs_vdev_removal_max_active);
default:
panic("invalid priority %u", p);
return (0);
}
}
/*
* Return the i/o class to issue from, or ZIO_PRIORITY_MAX_QUEUEABLE if
* there is no eligible class.
*/
static zio_priority_t
vdev_queue_class_to_issue(vdev_queue_t *vq)
{
spa_t *spa = vq->vq_vdev->vdev_spa;
zio_priority_t p;
if (avl_numnodes(&vq->vq_active_tree) >= zfs_vdev_max_active)
return (ZIO_PRIORITY_NUM_QUEUEABLE);
/* find a queue that has not reached its minimum # outstanding i/os */
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
vq->vq_class[p].vqc_active <
vdev_queue_class_min_active(p))
return (p);
}
/*
* If we haven't found a queue, look for one that hasn't reached its
* maximum # outstanding i/os.
*/
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
if (avl_numnodes(vdev_queue_class_tree(vq, p)) > 0 &&
vq->vq_class[p].vqc_active <
vdev_queue_class_max_active(spa, p))
return (p);
}
/* No eligible queued i/os */
return (ZIO_PRIORITY_NUM_QUEUEABLE);
}
void
vdev_queue_init(vdev_t *vd)
{
vdev_queue_t *vq = &vd->vdev_queue;
zio_priority_t p;
mutex_init(&vq->vq_lock, NULL, MUTEX_DEFAULT, NULL);
vq->vq_vdev = vd;
taskq_init_ent(&vd->vdev_queue.vq_io_search.io_tqent);
avl_create(&vq->vq_active_tree, vdev_queue_offset_compare,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_READ),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
avl_create(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE),
vdev_queue_offset_compare, sizeof (zio_t),
offsetof(struct zio, io_offset_node));
for (p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++) {
int (*compfn) (const void *, const void *);
/*
* The synchronous i/o queues are dispatched in FIFO rather
* than LBA order. This provides more consistent latency for
* these i/os.
*/
if (p == ZIO_PRIORITY_SYNC_READ || p == ZIO_PRIORITY_SYNC_WRITE)
compfn = vdev_queue_timestamp_compare;
else
compfn = vdev_queue_offset_compare;
avl_create(vdev_queue_class_tree(vq, p), compfn,
sizeof (zio_t), offsetof(struct zio, io_queue_node));
}
vq->vq_last_offset = 0;
}
void
vdev_queue_fini(vdev_t *vd)
{
vdev_queue_t *vq = &vd->vdev_queue;
for (zio_priority_t p = 0; p < ZIO_PRIORITY_NUM_QUEUEABLE; p++)
avl_destroy(vdev_queue_class_tree(vq, p));
avl_destroy(&vq->vq_active_tree);
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_READ));
avl_destroy(vdev_queue_type_tree(vq, ZIO_TYPE_WRITE));
mutex_destroy(&vq->vq_lock);
}
static void
vdev_queue_io_add(vdev_queue_t *vq, zio_t *zio)
{
spa_t *spa = zio->io_spa;
spa_stats_history_t *ssh = &spa->spa_stats.io_history;
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
avl_add(vdev_queue_type_tree(vq, zio->io_type), zio);
if (ssh->kstat != NULL) {
mutex_enter(&ssh->lock);
kstat_waitq_enter(ssh->kstat->ks_data);
mutex_exit(&ssh->lock);
}
}
static void
vdev_queue_io_remove(vdev_queue_t *vq, zio_t *zio)
{
spa_t *spa = zio->io_spa;
spa_stats_history_t *ssh = &spa->spa_stats.io_history;
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
avl_remove(vdev_queue_type_tree(vq, zio->io_type), zio);
if (ssh->kstat != NULL) {
mutex_enter(&ssh->lock);
kstat_waitq_exit(ssh->kstat->ks_data);
mutex_exit(&ssh->lock);
}
}
static void
vdev_queue_pending_add(vdev_queue_t *vq, zio_t *zio)
{
spa_t *spa = zio->io_spa;
spa_stats_history_t *ssh = &spa->spa_stats.io_history;
ASSERT(MUTEX_HELD(&vq->vq_lock));
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
vq->vq_class[zio->io_priority].vqc_active++;
avl_add(&vq->vq_active_tree, zio);
if (ssh->kstat != NULL) {
mutex_enter(&ssh->lock);
kstat_runq_enter(ssh->kstat->ks_data);
mutex_exit(&ssh->lock);
}
}
static void
vdev_queue_pending_remove(vdev_queue_t *vq, zio_t *zio)
{
spa_t *spa = zio->io_spa;
spa_stats_history_t *ssh = &spa->spa_stats.io_history;
ASSERT(MUTEX_HELD(&vq->vq_lock));
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
vq->vq_class[zio->io_priority].vqc_active--;
avl_remove(&vq->vq_active_tree, zio);
if (ssh->kstat != NULL) {
kstat_io_t *ksio = ssh->kstat->ks_data;
mutex_enter(&ssh->lock);
kstat_runq_exit(ksio);
if (zio->io_type == ZIO_TYPE_READ) {
ksio->reads++;
ksio->nread += zio->io_size;
} else if (zio->io_type == ZIO_TYPE_WRITE) {
ksio->writes++;
ksio->nwritten += zio->io_size;
}
mutex_exit(&ssh->lock);
}
}
static void
vdev_queue_agg_io_done(zio_t *aio)
{
if (aio->io_type == ZIO_TYPE_READ) {
zio_t *pio;
zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(aio, &zl)) != NULL) {
abd_copy_off(pio->io_abd, aio->io_abd,
0, pio->io_offset - aio->io_offset, pio->io_size);
}
}
abd_free(aio->io_abd);
}
/*
* Compute the range spanned by two i/os, which is the endpoint of the last
* (lio->io_offset + lio->io_size) minus start of the first (fio->io_offset).
* Conveniently, the gap between fio and lio is given by -IO_SPAN(lio, fio);
* thus fio and lio are adjacent if and only if IO_SPAN(lio, fio) == 0.
*/
#define IO_SPAN(fio, lio) ((lio)->io_offset + (lio)->io_size - (fio)->io_offset)
#define IO_GAP(fio, lio) (-IO_SPAN(lio, fio))
static zio_t *
vdev_queue_aggregate(vdev_queue_t *vq, zio_t *zio)
{
zio_t *first, *last, *aio, *dio, *mandatory, *nio;
zio_link_t *zl = NULL;
uint64_t maxgap = 0;
uint64_t size;
uint64_t limit;
int maxblocksize;
boolean_t stretch = B_FALSE;
avl_tree_t *t = vdev_queue_type_tree(vq, zio->io_type);
enum zio_flag flags = zio->io_flags & ZIO_FLAG_AGG_INHERIT;
abd_t *abd;
maxblocksize = spa_maxblocksize(vq->vq_vdev->vdev_spa);
limit = MAX(MIN(zfs_vdev_aggregation_limit, maxblocksize), 0);
if (zio->io_flags & ZIO_FLAG_DONT_AGGREGATE || limit == 0)
return (NULL);
first = last = zio;
if (zio->io_type == ZIO_TYPE_READ)
maxgap = zfs_vdev_read_gap_limit;
/*
* We can aggregate I/Os that are sufficiently adjacent and of
* the same flavor, as expressed by the AGG_INHERIT flags.
* The latter requirement is necessary so that certain
* attributes of the I/O, such as whether it's a normal I/O
* or a scrub/resilver, can be preserved in the aggregate.
* We can include optional I/Os, but don't allow them
* to begin a range as they add no benefit in that situation.
*/
/*
* We keep track of the last non-optional I/O.
*/
mandatory = (first->io_flags & ZIO_FLAG_OPTIONAL) ? NULL : first;
/*
* Walk backwards through sufficiently contiguous I/Os
* recording the last non-optional I/O.
*/
while ((dio = AVL_PREV(t, first)) != NULL &&
(dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
IO_SPAN(dio, last) <= limit &&
- IO_GAP(dio, first) <= maxgap) {
+ IO_GAP(dio, first) <= maxgap &&
+ dio->io_type == zio->io_type) {
first = dio;
if (mandatory == NULL && !(first->io_flags & ZIO_FLAG_OPTIONAL))
mandatory = first;
}
/*
* Skip any initial optional I/Os.
*/
while ((first->io_flags & ZIO_FLAG_OPTIONAL) && first != last) {
first = AVL_NEXT(t, first);
ASSERT(first != NULL);
}
/*
* Walk forward through sufficiently contiguous I/Os.
* The aggregation limit does not apply to optional i/os, so that
* we can issue contiguous writes even if they are larger than the
* aggregation limit.
*/
while ((dio = AVL_NEXT(t, last)) != NULL &&
(dio->io_flags & ZIO_FLAG_AGG_INHERIT) == flags &&
(IO_SPAN(first, dio) <= limit ||
(dio->io_flags & ZIO_FLAG_OPTIONAL)) &&
IO_SPAN(first, dio) <= maxblocksize &&
- IO_GAP(last, dio) <= maxgap) {
+ IO_GAP(last, dio) <= maxgap &&
+ dio->io_type == zio->io_type) {
last = dio;
if (!(last->io_flags & ZIO_FLAG_OPTIONAL))
mandatory = last;
}
/*
* Now that we've established the range of the I/O aggregation
* we must decide what to do with trailing optional I/Os.
* For reads, there's nothing to do. While we are unable to
* aggregate further, it's possible that a trailing optional
* I/O would allow the underlying device to aggregate with
* subsequent I/Os. We must therefore determine if the next
* non-optional I/O is close enough to make aggregation
* worthwhile.
*/
if (zio->io_type == ZIO_TYPE_WRITE && mandatory != NULL) {
zio_t *nio = last;
while ((dio = AVL_NEXT(t, nio)) != NULL &&
IO_GAP(nio, dio) == 0 &&
IO_GAP(mandatory, dio) <= zfs_vdev_write_gap_limit) {
nio = dio;
if (!(nio->io_flags & ZIO_FLAG_OPTIONAL)) {
stretch = B_TRUE;
break;
}
}
}
if (stretch) {
/*
* We are going to include an optional io in our aggregated
* span, thus closing the write gap. Only mandatory i/os can
* start aggregated spans, so make sure that the next i/o
* after our span is mandatory.
*/
dio = AVL_NEXT(t, last);
dio->io_flags &= ~ZIO_FLAG_OPTIONAL;
} else {
/* do not include the optional i/o */
while (last != mandatory && last != first) {
ASSERT(last->io_flags & ZIO_FLAG_OPTIONAL);
last = AVL_PREV(t, last);
ASSERT(last != NULL);
}
}
if (first == last)
return (NULL);
size = IO_SPAN(first, last);
ASSERT3U(size, <=, maxblocksize);
abd = abd_alloc_for_io(size, B_TRUE);
if (abd == NULL)
return (NULL);
aio = zio_vdev_delegated_io(first->io_vd, first->io_offset,
abd, size, first->io_type, zio->io_priority,
flags | ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE,
vdev_queue_agg_io_done, NULL);
aio->io_timestamp = first->io_timestamp;
nio = first;
do {
dio = nio;
nio = AVL_NEXT(t, dio);
ASSERT3U(dio->io_type, ==, aio->io_type);
if (dio->io_flags & ZIO_FLAG_NODATA) {
ASSERT3U(dio->io_type, ==, ZIO_TYPE_WRITE);
abd_zero_off(aio->io_abd,
dio->io_offset - aio->io_offset, dio->io_size);
} else if (dio->io_type == ZIO_TYPE_WRITE) {
abd_copy_off(aio->io_abd, dio->io_abd,
dio->io_offset - aio->io_offset, 0, dio->io_size);
}
zio_add_child(dio, aio);
vdev_queue_io_remove(vq, dio);
} while (dio != last);
/*
* We need to drop the vdev queue's lock to avoid a deadlock that we
* could encounter since this I/O will complete immediately.
*/
mutex_exit(&vq->vq_lock);
while ((dio = zio_walk_parents(aio, &zl)) != NULL) {
zio_vdev_io_bypass(dio);
zio_execute(dio);
}
mutex_enter(&vq->vq_lock);
return (aio);
}
static zio_t *
vdev_queue_io_to_issue(vdev_queue_t *vq)
{
zio_t *zio, *aio;
zio_priority_t p;
avl_index_t idx;
avl_tree_t *tree;
again:
ASSERT(MUTEX_HELD(&vq->vq_lock));
p = vdev_queue_class_to_issue(vq);
if (p == ZIO_PRIORITY_NUM_QUEUEABLE) {
/* No eligible queued i/os */
return (NULL);
}
/*
* For LBA-ordered queues (async / scrub), issue the i/o which follows
* the most recently issued i/o in LBA (offset) order.
*
* For FIFO queues (sync), issue the i/o with the lowest timestamp.
*/
tree = vdev_queue_class_tree(vq, p);
vq->vq_io_search.io_timestamp = 0;
vq->vq_io_search.io_offset = vq->vq_last_offset - 1;
VERIFY3P(avl_find(tree, &vq->vq_io_search, &idx), ==, NULL);
zio = avl_nearest(tree, idx, AVL_AFTER);
if (zio == NULL)
zio = avl_first(tree);
ASSERT3U(zio->io_priority, ==, p);
aio = vdev_queue_aggregate(vq, zio);
if (aio != NULL)
zio = aio;
else
vdev_queue_io_remove(vq, zio);
/*
* If the I/O is or was optional and therefore has no data, we need to
* simply discard it. We need to drop the vdev queue's lock to avoid a
* deadlock that we could encounter since this I/O will complete
* immediately.
*/
if (zio->io_flags & ZIO_FLAG_NODATA) {
mutex_exit(&vq->vq_lock);
zio_vdev_io_bypass(zio);
zio_execute(zio);
mutex_enter(&vq->vq_lock);
goto again;
}
vdev_queue_pending_add(vq, zio);
vq->vq_last_offset = zio->io_offset + zio->io_size;
return (zio);
}
zio_t *
vdev_queue_io(zio_t *zio)
{
vdev_queue_t *vq = &zio->io_vd->vdev_queue;
zio_t *nio;
if (zio->io_flags & ZIO_FLAG_DONT_QUEUE)
return (zio);
/*
* Children i/os inherent their parent's priority, which might
* not match the child's i/o type. Fix it up here.
*/
if (zio->io_type == ZIO_TYPE_READ) {
if (zio->io_priority != ZIO_PRIORITY_SYNC_READ &&
zio->io_priority != ZIO_PRIORITY_ASYNC_READ &&
- zio->io_priority != ZIO_PRIORITY_SCRUB)
+ zio->io_priority != ZIO_PRIORITY_SCRUB &&
+ zio->io_priority != ZIO_PRIORITY_REMOVAL)
zio->io_priority = ZIO_PRIORITY_ASYNC_READ;
} else {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
if (zio->io_priority != ZIO_PRIORITY_SYNC_WRITE &&
- zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE)
+ zio->io_priority != ZIO_PRIORITY_ASYNC_WRITE &&
+ zio->io_priority != ZIO_PRIORITY_REMOVAL)
zio->io_priority = ZIO_PRIORITY_ASYNC_WRITE;
}
zio->io_flags |= ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_QUEUE;
mutex_enter(&vq->vq_lock);
zio->io_timestamp = gethrtime();
vdev_queue_io_add(vq, zio);
nio = vdev_queue_io_to_issue(vq);
mutex_exit(&vq->vq_lock);
if (nio == NULL)
return (NULL);
if (nio->io_done == vdev_queue_agg_io_done) {
zio_nowait(nio);
return (NULL);
}
return (nio);
}
void
vdev_queue_io_done(zio_t *zio)
{
vdev_queue_t *vq = &zio->io_vd->vdev_queue;
zio_t *nio;
mutex_enter(&vq->vq_lock);
vdev_queue_pending_remove(vq, zio);
zio->io_delta = gethrtime() - zio->io_timestamp;
vq->vq_io_complete_ts = gethrtime();
vq->vq_io_delta_ts = vq->vq_io_complete_ts - zio->io_timestamp;
while ((nio = vdev_queue_io_to_issue(vq)) != NULL) {
mutex_exit(&vq->vq_lock);
if (nio->io_done == vdev_queue_agg_io_done) {
zio_nowait(nio);
} else {
zio_vdev_io_reissue(nio);
zio_execute(nio);
}
mutex_enter(&vq->vq_lock);
}
mutex_exit(&vq->vq_lock);
}
void
vdev_queue_change_io_priority(zio_t *zio, zio_priority_t priority)
{
vdev_queue_t *vq = &zio->io_vd->vdev_queue;
avl_tree_t *tree;
ASSERT3U(zio->io_priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (zio->io_type == ZIO_TYPE_READ) {
if (priority != ZIO_PRIORITY_SYNC_READ &&
priority != ZIO_PRIORITY_ASYNC_READ &&
priority != ZIO_PRIORITY_SCRUB)
priority = ZIO_PRIORITY_ASYNC_READ;
} else {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
if (priority != ZIO_PRIORITY_SYNC_WRITE &&
priority != ZIO_PRIORITY_ASYNC_WRITE)
priority = ZIO_PRIORITY_ASYNC_WRITE;
}
mutex_enter(&vq->vq_lock);
/*
* If the zio is in none of the queues we can simply change
* the priority. If the zio is waiting to be submitted we must
* remove it from the queue and re-insert it with the new priority.
* Otherwise, the zio is currently active and we cannot change its
* priority.
*/
tree = vdev_queue_class_tree(vq, zio->io_priority);
if (avl_find(tree, zio, NULL) == zio) {
avl_remove(vdev_queue_class_tree(vq, zio->io_priority), zio);
zio->io_priority = priority;
avl_add(vdev_queue_class_tree(vq, zio->io_priority), zio);
} else if (avl_find(&vq->vq_active_tree, zio, NULL) != zio) {
zio->io_priority = priority;
}
mutex_exit(&vq->vq_lock);
}
/*
* As these two methods are only used for load calculations we're not
* concerned if we get an incorrect value on 32bit platforms due to lack of
* vq_lock mutex use here, instead we prefer to keep it lock free for
* performance.
*/
int
vdev_queue_length(vdev_t *vd)
{
return (avl_numnodes(&vd->vdev_queue.vq_active_tree));
}
uint64_t
vdev_queue_last_offset(vdev_t *vd)
{
return (vd->vdev_queue.vq_last_offset);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
module_param(zfs_vdev_aggregation_limit, int, 0644);
MODULE_PARM_DESC(zfs_vdev_aggregation_limit, "Max vdev I/O aggregation size");
module_param(zfs_vdev_read_gap_limit, int, 0644);
MODULE_PARM_DESC(zfs_vdev_read_gap_limit, "Aggregate read I/O over gap");
module_param(zfs_vdev_write_gap_limit, int, 0644);
MODULE_PARM_DESC(zfs_vdev_write_gap_limit, "Aggregate write I/O over gap");
module_param(zfs_vdev_max_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_max_active, "Maximum number of active I/Os per vdev");
module_param(zfs_vdev_async_write_active_max_dirty_percent, int, 0644);
MODULE_PARM_DESC(zfs_vdev_async_write_active_max_dirty_percent,
"Async write concurrency max threshold");
module_param(zfs_vdev_async_write_active_min_dirty_percent, int, 0644);
MODULE_PARM_DESC(zfs_vdev_async_write_active_min_dirty_percent,
"Async write concurrency min threshold");
module_param(zfs_vdev_async_read_max_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_async_read_max_active,
"Max active async read I/Os per vdev");
module_param(zfs_vdev_async_read_min_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_async_read_min_active,
"Min active async read I/Os per vdev");
module_param(zfs_vdev_async_write_max_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_async_write_max_active,
"Max active async write I/Os per vdev");
module_param(zfs_vdev_async_write_min_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_async_write_min_active,
"Min active async write I/Os per vdev");
module_param(zfs_vdev_scrub_max_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_scrub_max_active, "Max active scrub I/Os per vdev");
module_param(zfs_vdev_scrub_min_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_scrub_min_active, "Min active scrub I/Os per vdev");
module_param(zfs_vdev_sync_read_max_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_sync_read_max_active,
"Max active sync read I/Os per vdev");
module_param(zfs_vdev_sync_read_min_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_sync_read_min_active,
"Min active sync read I/Os per vdev");
module_param(zfs_vdev_sync_write_max_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_sync_write_max_active,
"Max active sync write I/Os per vdev");
module_param(zfs_vdev_sync_write_min_active, int, 0644);
MODULE_PARM_DESC(zfs_vdev_sync_write_min_active,
"Min active sync write I/Os per vdev");
module_param(zfs_vdev_queue_depth_pct, int, 0644);
MODULE_PARM_DESC(zfs_vdev_queue_depth_pct,
"Queue depth percentage for each top-level vdev");
#endif
diff --git a/module/zfs/vdev_raidz.c b/module/zfs/vdev_raidz.c
index ef81af6f7716..a21baf9c264b 100644
--- a/module/zfs/vdev_raidz.c
+++ b/module/zfs/vdev_raidz.c
@@ -1,2338 +1,2339 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2014 by Delphix. All rights reserved.
* Copyright (c) 2016 Gvozden Nešković. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/zio_checksum.h>
#include <sys/abd.h>
#include <sys/fs/zfs.h>
#include <sys/fm/fs/zfs.h>
#include <sys/vdev_raidz.h>
#include <sys/vdev_raidz_impl.h>
/*
* Virtual device vector for RAID-Z.
*
* This vdev supports single, double, and triple parity. For single parity,
* we use a simple XOR of all the data columns. For double or triple parity,
* we use a special case of Reed-Solomon coding. This extends the
* technique described in "The mathematics of RAID-6" by H. Peter Anvin by
* drawing on the system described in "A Tutorial on Reed-Solomon Coding for
* Fault-Tolerance in RAID-like Systems" by James S. Plank on which the
* former is also based. The latter is designed to provide higher performance
* for writes.
*
* Note that the Plank paper claimed to support arbitrary N+M, but was then
* amended six years later identifying a critical flaw that invalidates its
* claims. Nevertheless, the technique can be adapted to work for up to
* triple parity. For additional parity, the amendment "Note: Correction to
* the 1997 Tutorial on Reed-Solomon Coding" by James S. Plank and Ying Ding
* is viable, but the additional complexity means that write performance will
* suffer.
*
* All of the methods above operate on a Galois field, defined over the
* integers mod 2^N. In our case we choose N=8 for GF(8) so that all elements
* can be expressed with a single byte. Briefly, the operations on the
* field are defined as follows:
*
* o addition (+) is represented by a bitwise XOR
* o subtraction (-) is therefore identical to addition: A + B = A - B
* o multiplication of A by 2 is defined by the following bitwise expression:
*
* (A * 2)_7 = A_6
* (A * 2)_6 = A_5
* (A * 2)_5 = A_4
* (A * 2)_4 = A_3 + A_7
* (A * 2)_3 = A_2 + A_7
* (A * 2)_2 = A_1 + A_7
* (A * 2)_1 = A_0
* (A * 2)_0 = A_7
*
* In C, multiplying by 2 is therefore ((a << 1) ^ ((a & 0x80) ? 0x1d : 0)).
* As an aside, this multiplication is derived from the error correcting
* primitive polynomial x^8 + x^4 + x^3 + x^2 + 1.
*
* Observe that any number in the field (except for 0) can be expressed as a
* power of 2 -- a generator for the field. We store a table of the powers of
* 2 and logs base 2 for quick look ups, and exploit the fact that A * B can
* be rewritten as 2^(log_2(A) + log_2(B)) (where '+' is normal addition rather
* than field addition). The inverse of a field element A (A^-1) is therefore
* A ^ (255 - 1) = A^254.
*
* The up-to-three parity columns, P, Q, R over several data columns,
* D_0, ... D_n-1, can be expressed by field operations:
*
* P = D_0 + D_1 + ... + D_n-2 + D_n-1
* Q = 2^n-1 * D_0 + 2^n-2 * D_1 + ... + 2^1 * D_n-2 + 2^0 * D_n-1
* = ((...((D_0) * 2 + D_1) * 2 + ...) * 2 + D_n-2) * 2 + D_n-1
* R = 4^n-1 * D_0 + 4^n-2 * D_1 + ... + 4^1 * D_n-2 + 4^0 * D_n-1
* = ((...((D_0) * 4 + D_1) * 4 + ...) * 4 + D_n-2) * 4 + D_n-1
*
* We chose 1, 2, and 4 as our generators because 1 corresponds to the trival
* XOR operation, and 2 and 4 can be computed quickly and generate linearly-
* independent coefficients. (There are no additional coefficients that have
* this property which is why the uncorrected Plank method breaks down.)
*
* See the reconstruction code below for how P, Q and R can used individually
* or in concert to recover missing data columns.
*/
#define VDEV_RAIDZ_P 0
#define VDEV_RAIDZ_Q 1
#define VDEV_RAIDZ_R 2
#define VDEV_RAIDZ_MUL_2(x) (((x) << 1) ^ (((x) & 0x80) ? 0x1d : 0))
#define VDEV_RAIDZ_MUL_4(x) (VDEV_RAIDZ_MUL_2(VDEV_RAIDZ_MUL_2(x)))
/*
* We provide a mechanism to perform the field multiplication operation on a
* 64-bit value all at once rather than a byte at a time. This works by
* creating a mask from the top bit in each byte and using that to
* conditionally apply the XOR of 0x1d.
*/
#define VDEV_RAIDZ_64MUL_2(x, mask) \
{ \
(mask) = (x) & 0x8080808080808080ULL; \
(mask) = ((mask) << 1) - ((mask) >> 7); \
(x) = (((x) << 1) & 0xfefefefefefefefeULL) ^ \
((mask) & 0x1d1d1d1d1d1d1d1dULL); \
}
#define VDEV_RAIDZ_64MUL_4(x, mask) \
{ \
VDEV_RAIDZ_64MUL_2((x), mask); \
VDEV_RAIDZ_64MUL_2((x), mask); \
}
void
vdev_raidz_map_free(raidz_map_t *rm)
{
int c;
for (c = 0; c < rm->rm_firstdatacol; c++) {
abd_free(rm->rm_col[c].rc_abd);
if (rm->rm_col[c].rc_gdata != NULL)
abd_free(rm->rm_col[c].rc_gdata);
}
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++)
abd_put(rm->rm_col[c].rc_abd);
if (rm->rm_abd_copy != NULL)
abd_free(rm->rm_abd_copy);
kmem_free(rm, offsetof(raidz_map_t, rm_col[rm->rm_scols]));
}
static void
vdev_raidz_map_free_vsd(zio_t *zio)
{
raidz_map_t *rm = zio->io_vsd;
ASSERT0(rm->rm_freed);
rm->rm_freed = 1;
if (rm->rm_reports == 0)
vdev_raidz_map_free(rm);
}
/*ARGSUSED*/
static void
vdev_raidz_cksum_free(void *arg, size_t ignored)
{
raidz_map_t *rm = arg;
ASSERT3U(rm->rm_reports, >, 0);
if (--rm->rm_reports == 0 && rm->rm_freed != 0)
vdev_raidz_map_free(rm);
}
static void
vdev_raidz_cksum_finish(zio_cksum_report_t *zcr, const abd_t *good_data)
{
raidz_map_t *rm = zcr->zcr_cbdata;
const size_t c = zcr->zcr_cbinfo;
size_t x, offset;
const abd_t *good = NULL;
const abd_t *bad = rm->rm_col[c].rc_abd;
if (good_data == NULL) {
zfs_ereport_finish_checksum(zcr, NULL, NULL, B_FALSE);
return;
}
if (c < rm->rm_firstdatacol) {
/*
* The first time through, calculate the parity blocks for
* the good data (this relies on the fact that the good
* data never changes for a given logical ZIO)
*/
if (rm->rm_col[0].rc_gdata == NULL) {
abd_t *bad_parity[VDEV_RAIDZ_MAXPARITY];
/*
* Set up the rm_col[]s to generate the parity for
* good_data, first saving the parity bufs and
* replacing them with buffers to hold the result.
*/
for (x = 0; x < rm->rm_firstdatacol; x++) {
bad_parity[x] = rm->rm_col[x].rc_abd;
rm->rm_col[x].rc_abd =
rm->rm_col[x].rc_gdata =
abd_alloc_sametype(rm->rm_col[x].rc_abd,
rm->rm_col[x].rc_size);
}
/* fill in the data columns from good_data */
offset = 0;
for (; x < rm->rm_cols; x++) {
abd_put(rm->rm_col[x].rc_abd);
rm->rm_col[x].rc_abd =
abd_get_offset_size((abd_t *)good_data,
offset, rm->rm_col[x].rc_size);
offset += rm->rm_col[x].rc_size;
}
/*
* Construct the parity from the good data.
*/
vdev_raidz_generate_parity(rm);
/* restore everything back to its original state */
for (x = 0; x < rm->rm_firstdatacol; x++)
rm->rm_col[x].rc_abd = bad_parity[x];
offset = 0;
for (x = rm->rm_firstdatacol; x < rm->rm_cols; x++) {
abd_put(rm->rm_col[x].rc_abd);
rm->rm_col[x].rc_abd = abd_get_offset_size(
rm->rm_abd_copy, offset,
rm->rm_col[x].rc_size);
offset += rm->rm_col[x].rc_size;
}
}
ASSERT3P(rm->rm_col[c].rc_gdata, !=, NULL);
good = abd_get_offset_size(rm->rm_col[c].rc_gdata, 0,
rm->rm_col[c].rc_size);
} else {
/* adjust good_data to point at the start of our column */
offset = 0;
for (x = rm->rm_firstdatacol; x < c; x++)
offset += rm->rm_col[x].rc_size;
good = abd_get_offset_size((abd_t *)good_data, offset,
rm->rm_col[c].rc_size);
}
/* we drop the ereport if it ends up that the data was good */
zfs_ereport_finish_checksum(zcr, good, bad, B_TRUE);
abd_put((abd_t *)good);
}
/*
* Invoked indirectly by zfs_ereport_start_checksum(), called
* below when our read operation fails completely. The main point
* is to keep a copy of everything we read from disk, so that at
* vdev_raidz_cksum_finish() time we can compare it with the good data.
*/
static void
vdev_raidz_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *arg)
{
size_t c = (size_t)(uintptr_t)arg;
size_t offset;
raidz_map_t *rm = zio->io_vsd;
size_t size;
/* set up the report and bump the refcount */
zcr->zcr_cbdata = rm;
zcr->zcr_cbinfo = c;
zcr->zcr_finish = vdev_raidz_cksum_finish;
zcr->zcr_free = vdev_raidz_cksum_free;
rm->rm_reports++;
ASSERT3U(rm->rm_reports, >, 0);
if (rm->rm_abd_copy != NULL)
return;
/*
* It's the first time we're called for this raidz_map_t, so we need
* to copy the data aside; there's no guarantee that our zio's buffer
* won't be re-used for something else.
*
* Our parity data is already in separate buffers, so there's no need
* to copy them.
*/
size = 0;
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++)
size += rm->rm_col[c].rc_size;
rm->rm_abd_copy = abd_alloc_for_io(size, B_FALSE);
for (offset = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
raidz_col_t *col = &rm->rm_col[c];
abd_t *tmp = abd_get_offset_size(rm->rm_abd_copy, offset,
col->rc_size);
abd_copy(tmp, col->rc_abd, col->rc_size);
abd_put(col->rc_abd);
col->rc_abd = tmp;
offset += col->rc_size;
}
ASSERT3U(offset, ==, size);
}
static const zio_vsd_ops_t vdev_raidz_vsd_ops = {
.vsd_free = vdev_raidz_map_free_vsd,
.vsd_cksum_report = vdev_raidz_cksum_report
};
/*
* Divides the IO evenly across all child vdevs; usually, dcols is
* the number of children in the target vdev.
*
* Avoid inlining the function to keep vdev_raidz_io_start(), which
* is this functions only caller, as small as possible on the stack.
*/
noinline raidz_map_t *
vdev_raidz_map_alloc(zio_t *zio, uint64_t ashift, uint64_t dcols,
uint64_t nparity)
{
raidz_map_t *rm;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = zio->io_offset >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = zio->io_size >> ashift;
/* The first column for this stripe. */
uint64_t f = b % dcols;
/* The starting byte offset on each child vdev. */
uint64_t o = (b / dcols) << ashift;
uint64_t q, r, c, bc, col, acols, scols, coff, devidx, asize, tot;
uint64_t off = 0;
/*
* "Quotient": The number of data sectors for this stripe on all but
* the "big column" child vdevs that also contain "remainder" data.
*/
q = s / (dcols - nparity);
/*
* "Remainder": The number of partial stripe data sectors in this I/O.
* This will add a sector to some, but not all, child vdevs.
*/
r = s - q * (dcols - nparity);
/* The number of "big columns" - those which contain remainder data. */
bc = (r == 0 ? 0 : r + nparity);
/*
* The total number of data and parity sectors associated with
* this I/O.
*/
tot = s + nparity * (q + (r == 0 ? 0 : 1));
/* acols: The columns that will be accessed. */
/* scols: The columns that will be accessed or skipped. */
if (q == 0) {
/* Our I/O request doesn't span all child vdevs. */
acols = bc;
scols = MIN(dcols, roundup(bc, nparity + 1));
} else {
acols = dcols;
scols = dcols;
}
ASSERT3U(acols, <=, scols);
rm = kmem_alloc(offsetof(raidz_map_t, rm_col[scols]), KM_SLEEP);
rm->rm_cols = acols;
rm->rm_scols = scols;
rm->rm_bigcols = bc;
rm->rm_skipstart = bc;
rm->rm_missingdata = 0;
rm->rm_missingparity = 0;
rm->rm_firstdatacol = nparity;
rm->rm_abd_copy = NULL;
rm->rm_reports = 0;
rm->rm_freed = 0;
rm->rm_ecksuminjected = 0;
asize = 0;
for (c = 0; c < scols; c++) {
col = f + c;
coff = o;
if (col >= dcols) {
col -= dcols;
coff += 1ULL << ashift;
}
rm->rm_col[c].rc_devidx = col;
rm->rm_col[c].rc_offset = coff;
rm->rm_col[c].rc_abd = NULL;
rm->rm_col[c].rc_gdata = NULL;
rm->rm_col[c].rc_error = 0;
rm->rm_col[c].rc_tried = 0;
rm->rm_col[c].rc_skipped = 0;
if (c >= acols)
rm->rm_col[c].rc_size = 0;
else if (c < bc)
rm->rm_col[c].rc_size = (q + 1) << ashift;
else
rm->rm_col[c].rc_size = q << ashift;
asize += rm->rm_col[c].rc_size;
}
ASSERT3U(asize, ==, tot << ashift);
rm->rm_asize = roundup(asize, (nparity + 1) << ashift);
rm->rm_nskip = roundup(tot, nparity + 1) - tot;
ASSERT3U(rm->rm_asize - asize, ==, rm->rm_nskip << ashift);
ASSERT3U(rm->rm_nskip, <=, nparity);
for (c = 0; c < rm->rm_firstdatacol; c++)
rm->rm_col[c].rc_abd =
abd_alloc_linear(rm->rm_col[c].rc_size, B_FALSE);
rm->rm_col[c].rc_abd = abd_get_offset_size(zio->io_abd, 0,
rm->rm_col[c].rc_size);
off = rm->rm_col[c].rc_size;
for (c = c + 1; c < acols; c++) {
rm->rm_col[c].rc_abd = abd_get_offset_size(zio->io_abd, off,
rm->rm_col[c].rc_size);
off += rm->rm_col[c].rc_size;
}
/*
* If all data stored spans all columns, there's a danger that parity
* will always be on the same device and, since parity isn't read
* during normal operation, that that device's I/O bandwidth won't be
* used effectively. We therefore switch the parity every 1MB.
*
* ... at least that was, ostensibly, the theory. As a practical
* matter unless we juggle the parity between all devices evenly, we
* won't see any benefit. Further, occasional writes that aren't a
* multiple of the LCM of the number of children and the minimum
* stripe width are sufficient to avoid pessimal behavior.
* Unfortunately, this decision created an implicit on-disk format
* requirement that we need to support for all eternity, but only
* for single-parity RAID-Z.
*
* If we intend to skip a sector in the zeroth column for padding
* we must make sure to note this swap. We will never intend to
* skip the first column since at least one data and one parity
* column must appear in each row.
*/
ASSERT(rm->rm_cols >= 2);
ASSERT(rm->rm_col[0].rc_size == rm->rm_col[1].rc_size);
if (rm->rm_firstdatacol == 1 && (zio->io_offset & (1ULL << 20))) {
devidx = rm->rm_col[0].rc_devidx;
o = rm->rm_col[0].rc_offset;
rm->rm_col[0].rc_devidx = rm->rm_col[1].rc_devidx;
rm->rm_col[0].rc_offset = rm->rm_col[1].rc_offset;
rm->rm_col[1].rc_devidx = devidx;
rm->rm_col[1].rc_offset = o;
if (rm->rm_skipstart == 0)
rm->rm_skipstart = 1;
}
zio->io_vsd = rm;
zio->io_vsd_ops = &vdev_raidz_vsd_ops;
/* init RAIDZ parity ops */
rm->rm_ops = vdev_raidz_math_get_ops();
return (rm);
}
struct pqr_struct {
uint64_t *p;
uint64_t *q;
uint64_t *r;
};
static int
vdev_raidz_p_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && !pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++)
*pqr->p ^= *src;
return (0);
}
static int
vdev_raidz_pq_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && !pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
}
return (0);
}
static int
vdev_raidz_pqr_func(void *buf, size_t size, void *private)
{
struct pqr_struct *pqr = private;
const uint64_t *src = buf;
uint64_t mask;
int i, cnt = size / sizeof (src[0]);
ASSERT(pqr->p && pqr->q && pqr->r);
for (i = 0; i < cnt; i++, src++, pqr->p++, pqr->q++, pqr->r++) {
*pqr->p ^= *src;
VDEV_RAIDZ_64MUL_2(*pqr->q, mask);
*pqr->q ^= *src;
VDEV_RAIDZ_64MUL_4(*pqr->r, mask);
*pqr->r ^= *src;
}
return (0);
}
static void
vdev_raidz_generate_parity_p(raidz_map_t *rm)
{
uint64_t *p;
int c;
abd_t *src;
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
src = rm->rm_col[c].rc_abd;
p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
if (c == rm->rm_firstdatacol) {
abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
} else {
struct pqr_struct pqr = { p, NULL, NULL };
(void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
vdev_raidz_p_func, &pqr);
}
}
}
static void
vdev_raidz_generate_parity_pq(raidz_map_t *rm)
{
uint64_t *p, *q, pcnt, ccnt, mask, i;
int c;
abd_t *src;
pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
rm->rm_col[VDEV_RAIDZ_Q].rc_size);
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
src = rm->rm_col[c].rc_abd;
p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
q = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
ccnt = rm->rm_col[c].rc_size / sizeof (p[0]);
if (c == rm->rm_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
(void) memcpy(q, p, rm->rm_col[c].rc_size);
for (i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, NULL };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
vdev_raidz_pq_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
for (i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
}
}
}
}
static void
vdev_raidz_generate_parity_pqr(raidz_map_t *rm)
{
uint64_t *p, *q, *r, pcnt, ccnt, mask, i;
int c;
abd_t *src;
pcnt = rm->rm_col[VDEV_RAIDZ_P].rc_size / sizeof (p[0]);
ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
rm->rm_col[VDEV_RAIDZ_Q].rc_size);
ASSERT(rm->rm_col[VDEV_RAIDZ_P].rc_size ==
rm->rm_col[VDEV_RAIDZ_R].rc_size);
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
src = rm->rm_col[c].rc_abd;
p = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
q = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
r = abd_to_buf(rm->rm_col[VDEV_RAIDZ_R].rc_abd);
ccnt = rm->rm_col[c].rc_size / sizeof (p[0]);
if (c == rm->rm_firstdatacol) {
ASSERT(ccnt == pcnt || ccnt == 0);
abd_copy_to_buf(p, src, rm->rm_col[c].rc_size);
(void) memcpy(q, p, rm->rm_col[c].rc_size);
(void) memcpy(r, p, rm->rm_col[c].rc_size);
for (i = ccnt; i < pcnt; i++) {
p[i] = 0;
q[i] = 0;
r[i] = 0;
}
} else {
struct pqr_struct pqr = { p, q, r };
ASSERT(ccnt <= pcnt);
(void) abd_iterate_func(src, 0, rm->rm_col[c].rc_size,
vdev_raidz_pqr_func, &pqr);
/*
* Treat short columns as though they are full of 0s.
* Note that there's therefore nothing needed for P.
*/
for (i = ccnt; i < pcnt; i++) {
VDEV_RAIDZ_64MUL_2(q[i], mask);
VDEV_RAIDZ_64MUL_4(r[i], mask);
}
}
}
}
/*
* Generate RAID parity in the first virtual columns according to the number of
* parity columns available.
*/
void
vdev_raidz_generate_parity(raidz_map_t *rm)
{
/* Generate using the new math implementation */
if (vdev_raidz_math_generate(rm) != RAIDZ_ORIGINAL_IMPL)
return;
switch (rm->rm_firstdatacol) {
case 1:
vdev_raidz_generate_parity_p(rm);
break;
case 2:
vdev_raidz_generate_parity_pq(rm);
break;
case 3:
vdev_raidz_generate_parity_pqr(rm);
break;
default:
cmn_err(CE_PANIC, "invalid RAID-Z configuration");
}
}
/* ARGSUSED */
static int
vdev_raidz_reconst_p_func(void *dbuf, void *sbuf, size_t size, void *private)
{
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
int cnt = size / sizeof (src[0]);
for (int i = 0; i < cnt; i++) {
dst[i] ^= src[i];
}
return (0);
}
/* ARGSUSED */
static int
vdev_raidz_reconst_q_pre_func(void *dbuf, void *sbuf, size_t size,
void *private)
{
uint64_t *dst = dbuf;
uint64_t *src = sbuf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, src++) {
VDEV_RAIDZ_64MUL_2(*dst, mask);
*dst ^= *src;
}
return (0);
}
/* ARGSUSED */
static int
vdev_raidz_reconst_q_pre_tail_func(void *buf, size_t size, void *private)
{
uint64_t *dst = buf;
uint64_t mask;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++) {
/* same operation as vdev_raidz_reconst_q_pre_func() on dst */
VDEV_RAIDZ_64MUL_2(*dst, mask);
}
return (0);
}
struct reconst_q_struct {
uint64_t *q;
int exp;
};
static int
vdev_raidz_reconst_q_post_func(void *buf, size_t size, void *private)
{
struct reconst_q_struct *rq = private;
uint64_t *dst = buf;
int cnt = size / sizeof (dst[0]);
for (int i = 0; i < cnt; i++, dst++, rq->q++) {
int j;
uint8_t *b;
*dst ^= *rq->q;
for (j = 0, b = (uint8_t *)dst; j < 8; j++, b++) {
*b = vdev_raidz_exp2(*b, rq->exp);
}
}
return (0);
}
struct reconst_pq_struct {
uint8_t *p;
uint8_t *q;
uint8_t *pxy;
uint8_t *qxy;
int aexp;
int bexp;
};
static int
vdev_raidz_reconst_pq_func(void *xbuf, void *ybuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
uint8_t *yd = ybuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++, yd++) {
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
*yd = *rpq->p ^ *rpq->pxy ^ *xd;
}
return (0);
}
static int
vdev_raidz_reconst_pq_tail_func(void *xbuf, size_t size, void *private)
{
struct reconst_pq_struct *rpq = private;
uint8_t *xd = xbuf;
for (int i = 0; i < size;
i++, rpq->p++, rpq->q++, rpq->pxy++, rpq->qxy++, xd++) {
/* same operation as vdev_raidz_reconst_pq_func() on xd */
*xd = vdev_raidz_exp2(*rpq->p ^ *rpq->pxy, rpq->aexp) ^
vdev_raidz_exp2(*rpq->q ^ *rpq->qxy, rpq->bexp);
}
return (0);
}
static int
vdev_raidz_reconstruct_p(raidz_map_t *rm, int *tgts, int ntgts)
{
int x = tgts[0];
int c;
abd_t *dst, *src;
ASSERT(ntgts == 1);
ASSERT(x >= rm->rm_firstdatacol);
ASSERT(x < rm->rm_cols);
ASSERT(rm->rm_col[x].rc_size <= rm->rm_col[VDEV_RAIDZ_P].rc_size);
ASSERT(rm->rm_col[x].rc_size > 0);
src = rm->rm_col[VDEV_RAIDZ_P].rc_abd;
dst = rm->rm_col[x].rc_abd;
abd_copy_from_buf(dst, abd_to_buf(src), rm->rm_col[x].rc_size);
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
uint64_t size = MIN(rm->rm_col[x].rc_size,
rm->rm_col[c].rc_size);
src = rm->rm_col[c].rc_abd;
dst = rm->rm_col[x].rc_abd;
if (c == x)
continue;
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_p_func, NULL);
}
return (1 << VDEV_RAIDZ_P);
}
static int
vdev_raidz_reconstruct_q(raidz_map_t *rm, int *tgts, int ntgts)
{
int x = tgts[0];
int c, exp;
abd_t *dst, *src;
ASSERT(ntgts == 1);
ASSERT(rm->rm_col[x].rc_size <= rm->rm_col[VDEV_RAIDZ_Q].rc_size);
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
uint64_t size = (c == x) ? 0 : MIN(rm->rm_col[x].rc_size,
rm->rm_col[c].rc_size);
src = rm->rm_col[c].rc_abd;
dst = rm->rm_col[x].rc_abd;
if (c == rm->rm_firstdatacol) {
abd_copy(dst, src, size);
if (rm->rm_col[x].rc_size > size)
abd_zero_off(dst, size,
rm->rm_col[x].rc_size - size);
} else {
ASSERT3U(size, <=, rm->rm_col[x].rc_size);
(void) abd_iterate_func2(dst, src, 0, 0, size,
vdev_raidz_reconst_q_pre_func, NULL);
(void) abd_iterate_func(dst,
size, rm->rm_col[x].rc_size - size,
vdev_raidz_reconst_q_pre_tail_func, NULL);
}
}
src = rm->rm_col[VDEV_RAIDZ_Q].rc_abd;
dst = rm->rm_col[x].rc_abd;
exp = 255 - (rm->rm_cols - 1 - x);
struct reconst_q_struct rq = { abd_to_buf(src), exp };
(void) abd_iterate_func(dst, 0, rm->rm_col[x].rc_size,
vdev_raidz_reconst_q_post_func, &rq);
return (1 << VDEV_RAIDZ_Q);
}
static int
vdev_raidz_reconstruct_pq(raidz_map_t *rm, int *tgts, int ntgts)
{
uint8_t *p, *q, *pxy, *qxy, tmp, a, b, aexp, bexp;
abd_t *pdata, *qdata;
uint64_t xsize, ysize;
int x = tgts[0];
int y = tgts[1];
abd_t *xd, *yd;
ASSERT(ntgts == 2);
ASSERT(x < y);
ASSERT(x >= rm->rm_firstdatacol);
ASSERT(y < rm->rm_cols);
ASSERT(rm->rm_col[x].rc_size >= rm->rm_col[y].rc_size);
/*
* Move the parity data aside -- we're going to compute parity as
* though columns x and y were full of zeros -- Pxy and Qxy. We want to
* reuse the parity generation mechanism without trashing the actual
* parity so we make those columns appear to be full of zeros by
* setting their lengths to zero.
*/
pdata = rm->rm_col[VDEV_RAIDZ_P].rc_abd;
qdata = rm->rm_col[VDEV_RAIDZ_Q].rc_abd;
xsize = rm->rm_col[x].rc_size;
ysize = rm->rm_col[y].rc_size;
rm->rm_col[VDEV_RAIDZ_P].rc_abd =
abd_alloc_linear(rm->rm_col[VDEV_RAIDZ_P].rc_size, B_TRUE);
rm->rm_col[VDEV_RAIDZ_Q].rc_abd =
abd_alloc_linear(rm->rm_col[VDEV_RAIDZ_Q].rc_size, B_TRUE);
rm->rm_col[x].rc_size = 0;
rm->rm_col[y].rc_size = 0;
vdev_raidz_generate_parity_pq(rm);
rm->rm_col[x].rc_size = xsize;
rm->rm_col[y].rc_size = ysize;
p = abd_to_buf(pdata);
q = abd_to_buf(qdata);
pxy = abd_to_buf(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
qxy = abd_to_buf(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
xd = rm->rm_col[x].rc_abd;
yd = rm->rm_col[y].rc_abd;
/*
* We now have:
* Pxy = P + D_x + D_y
* Qxy = Q + 2^(ndevs - 1 - x) * D_x + 2^(ndevs - 1 - y) * D_y
*
* We can then solve for D_x:
* D_x = A * (P + Pxy) + B * (Q + Qxy)
* where
* A = 2^(x - y) * (2^(x - y) + 1)^-1
* B = 2^(ndevs - 1 - x) * (2^(x - y) + 1)^-1
*
* With D_x in hand, we can easily solve for D_y:
* D_y = P + Pxy + D_x
*/
a = vdev_raidz_pow2[255 + x - y];
b = vdev_raidz_pow2[255 - (rm->rm_cols - 1 - x)];
tmp = 255 - vdev_raidz_log2[a ^ 1];
aexp = vdev_raidz_log2[vdev_raidz_exp2(a, tmp)];
bexp = vdev_raidz_log2[vdev_raidz_exp2(b, tmp)];
ASSERT3U(xsize, >=, ysize);
struct reconst_pq_struct rpq = { p, q, pxy, qxy, aexp, bexp };
(void) abd_iterate_func2(xd, yd, 0, 0, ysize,
vdev_raidz_reconst_pq_func, &rpq);
(void) abd_iterate_func(xd, ysize, xsize - ysize,
vdev_raidz_reconst_pq_tail_func, &rpq);
abd_free(rm->rm_col[VDEV_RAIDZ_P].rc_abd);
abd_free(rm->rm_col[VDEV_RAIDZ_Q].rc_abd);
/*
* Restore the saved parity data.
*/
rm->rm_col[VDEV_RAIDZ_P].rc_abd = pdata;
rm->rm_col[VDEV_RAIDZ_Q].rc_abd = qdata;
return ((1 << VDEV_RAIDZ_P) | (1 << VDEV_RAIDZ_Q));
}
/* BEGIN CSTYLED */
/*
* In the general case of reconstruction, we must solve the system of linear
* equations defined by the coeffecients used to generate parity as well as
* the contents of the data and parity disks. This can be expressed with
* vectors for the original data (D) and the actual data (d) and parity (p)
* and a matrix composed of the identity matrix (I) and a dispersal matrix (V):
*
* __ __ __ __
* | | __ __ | p_0 |
* | V | | D_0 | | p_m-1 |
* | | x | : | = | d_0 |
* | I | | D_n-1 | | : |
* | | ~~ ~~ | d_n-1 |
* ~~ ~~ ~~ ~~
*
* I is simply a square identity matrix of size n, and V is a vandermonde
* matrix defined by the coeffecients we chose for the various parity columns
* (1, 2, 4). Note that these values were chosen both for simplicity, speedy
* computation as well as linear separability.
*
* __ __ __ __
* | 1 .. 1 1 1 | | p_0 |
* | 2^n-1 .. 4 2 1 | __ __ | : |
* | 4^n-1 .. 16 4 1 | | D_0 | | p_m-1 |
* | 1 .. 0 0 0 | | D_1 | | d_0 |
* | 0 .. 0 0 0 | x | D_2 | = | d_1 |
* | : : : : | | : | | d_2 |
* | 0 .. 1 0 0 | | D_n-1 | | : |
* | 0 .. 0 1 0 | ~~ ~~ | : |
* | 0 .. 0 0 1 | | d_n-1 |
* ~~ ~~ ~~ ~~
*
* Note that I, V, d, and p are known. To compute D, we must invert the
* matrix and use the known data and parity values to reconstruct the unknown
* data values. We begin by removing the rows in V|I and d|p that correspond
* to failed or missing columns; we then make V|I square (n x n) and d|p
* sized n by removing rows corresponding to unused parity from the bottom up
* to generate (V|I)' and (d|p)'. We can then generate the inverse of (V|I)'
* using Gauss-Jordan elimination. In the example below we use m=3 parity
* columns, n=8 data columns, with errors in d_1, d_2, and p_1:
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 | <-----+-+-- missing disks
* | 19 205 116 29 64 16 4 1 | / /
* | 1 0 0 0 0 0 0 0 | / /
* | 0 1 0 0 0 0 0 0 | <--' /
* (V|I) = | 0 0 1 0 0 0 0 0 | <---'
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 1 1 1 1 1 1 1 |
* | 128 64 32 16 8 4 2 1 |
* | 19 205 116 29 64 16 4 1 |
* | 1 0 0 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 |
* (V|I)' = | 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* Here we employ Gauss-Jordan elimination to find the inverse of (V|I)'. We
* have carefully chosen the seed values 1, 2, and 4 to ensure that this
* matrix is not singular.
* __ __
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 1 1 1 1 1 1 1 1 1 0 0 0 0 0 0 0 |
* | 19 205 116 29 64 16 4 1 0 1 0 0 0 0 0 0 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 205 116 0 0 0 0 0 0 1 19 29 64 16 4 1 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 185 0 0 0 0 0 205 1 222 208 141 221 201 204 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 1 0 0 0 0 0 1 0 1 1 1 1 1 1 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 1 0 0 0 0 0 0 0 0 0 1 0 0 0 0 0 |
* | 0 1 0 0 0 0 0 0 167 100 5 41 159 169 217 208 |
* | 0 0 1 0 0 0 0 0 166 100 4 40 158 168 216 209 |
* | 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 0 0 0 0 0 0 0 1 |
* ~~ ~~
* __ __
* | 0 0 1 0 0 0 0 0 |
* | 167 100 5 41 159 169 217 208 |
* | 166 100 4 40 158 168 216 209 |
* (V|I)'^-1 = | 0 0 0 1 0 0 0 0 |
* | 0 0 0 0 1 0 0 0 |
* | 0 0 0 0 0 1 0 0 |
* | 0 0 0 0 0 0 1 0 |
* | 0 0 0 0 0 0 0 1 |
* ~~ ~~
*
* We can then simply compute D = (V|I)'^-1 x (d|p)' to discover the values
* of the missing data.
*
* As is apparent from the example above, the only non-trivial rows in the
* inverse matrix correspond to the data disks that we're trying to
* reconstruct. Indeed, those are the only rows we need as the others would
* only be useful for reconstructing data known or assumed to be valid. For
* that reason, we only build the coefficients in the rows that correspond to
* targeted columns.
*/
/* END CSTYLED */
static void
vdev_raidz_matrix_init(raidz_map_t *rm, int n, int nmap, int *map,
uint8_t **rows)
{
int i, j;
int pow;
ASSERT(n == rm->rm_cols - rm->rm_firstdatacol);
/*
* Fill in the missing rows of interest.
*/
for (i = 0; i < nmap; i++) {
ASSERT3S(0, <=, map[i]);
ASSERT3S(map[i], <=, 2);
pow = map[i] * n;
if (pow > 255)
pow -= 255;
ASSERT(pow <= 255);
for (j = 0; j < n; j++) {
pow -= map[i];
if (pow < 0)
pow += 255;
rows[i][j] = vdev_raidz_pow2[pow];
}
}
}
static void
vdev_raidz_matrix_invert(raidz_map_t *rm, int n, int nmissing, int *missing,
uint8_t **rows, uint8_t **invrows, const uint8_t *used)
{
int i, j, ii, jj;
uint8_t log;
/*
* Assert that the first nmissing entries from the array of used
* columns correspond to parity columns and that subsequent entries
* correspond to data columns.
*/
for (i = 0; i < nmissing; i++) {
ASSERT3S(used[i], <, rm->rm_firstdatacol);
}
for (; i < n; i++) {
ASSERT3S(used[i], >=, rm->rm_firstdatacol);
}
/*
* First initialize the storage where we'll compute the inverse rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
invrows[i][j] = (i == j) ? 1 : 0;
}
}
/*
* Subtract all trivial rows from the rows of consequence.
*/
for (i = 0; i < nmissing; i++) {
for (j = nmissing; j < n; j++) {
ASSERT3U(used[j], >=, rm->rm_firstdatacol);
jj = used[j] - rm->rm_firstdatacol;
ASSERT3S(jj, <, n);
invrows[i][j] = rows[i][jj];
rows[i][jj] = 0;
}
}
/*
* For each of the rows of interest, we must normalize it and subtract
* a multiple of it from the other rows.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < missing[i]; j++) {
ASSERT0(rows[i][j]);
}
ASSERT3U(rows[i][missing[i]], !=, 0);
/*
* Compute the inverse of the first element and multiply each
* element in the row by that value.
*/
log = 255 - vdev_raidz_log2[rows[i][missing[i]]];
for (j = 0; j < n; j++) {
rows[i][j] = vdev_raidz_exp2(rows[i][j], log);
invrows[i][j] = vdev_raidz_exp2(invrows[i][j], log);
}
for (ii = 0; ii < nmissing; ii++) {
if (i == ii)
continue;
ASSERT3U(rows[ii][missing[i]], !=, 0);
log = vdev_raidz_log2[rows[ii][missing[i]]];
for (j = 0; j < n; j++) {
rows[ii][j] ^=
vdev_raidz_exp2(rows[i][j], log);
invrows[ii][j] ^=
vdev_raidz_exp2(invrows[i][j], log);
}
}
}
/*
* Verify that the data that is left in the rows are properly part of
* an identity matrix.
*/
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
if (j == missing[i]) {
ASSERT3U(rows[i][j], ==, 1);
} else {
ASSERT0(rows[i][j]);
}
}
}
}
static void
vdev_raidz_matrix_reconstruct(raidz_map_t *rm, int n, int nmissing,
int *missing, uint8_t **invrows, const uint8_t *used)
{
int i, j, x, cc, c;
uint8_t *src;
uint64_t ccount;
uint8_t *dst[VDEV_RAIDZ_MAXPARITY] = { NULL };
uint64_t dcount[VDEV_RAIDZ_MAXPARITY] = { 0 };
uint8_t log = 0;
uint8_t val;
int ll;
uint8_t *invlog[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
psize = sizeof (invlog[0][0]) * n * nmissing;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing; i++) {
invlog[i] = pp;
pp += n;
}
for (i = 0; i < nmissing; i++) {
for (j = 0; j < n; j++) {
ASSERT3U(invrows[i][j], !=, 0);
invlog[i][j] = vdev_raidz_log2[invrows[i][j]];
}
}
for (i = 0; i < n; i++) {
c = used[i];
ASSERT3U(c, <, rm->rm_cols);
src = abd_to_buf(rm->rm_col[c].rc_abd);
ccount = rm->rm_col[c].rc_size;
for (j = 0; j < nmissing; j++) {
cc = missing[j] + rm->rm_firstdatacol;
ASSERT3U(cc, >=, rm->rm_firstdatacol);
ASSERT3U(cc, <, rm->rm_cols);
ASSERT3U(cc, !=, c);
dst[j] = abd_to_buf(rm->rm_col[cc].rc_abd);
dcount[j] = rm->rm_col[cc].rc_size;
}
ASSERT(ccount >= rm->rm_col[missing[0]].rc_size || i > 0);
for (x = 0; x < ccount; x++, src++) {
if (*src != 0)
log = vdev_raidz_log2[*src];
for (cc = 0; cc < nmissing; cc++) {
if (x >= dcount[cc])
continue;
if (*src == 0) {
val = 0;
} else {
if ((ll = log + invlog[cc][i]) >= 255)
ll -= 255;
val = vdev_raidz_pow2[ll];
}
if (i == 0)
dst[cc][x] = val;
else
dst[cc][x] ^= val;
}
}
}
kmem_free(p, psize);
}
static int
vdev_raidz_reconstruct_general(raidz_map_t *rm, int *tgts, int ntgts)
{
int n, i, c, t, tt;
int nmissing_rows;
int missing_rows[VDEV_RAIDZ_MAXPARITY];
int parity_map[VDEV_RAIDZ_MAXPARITY];
uint8_t *p, *pp;
size_t psize;
uint8_t *rows[VDEV_RAIDZ_MAXPARITY];
uint8_t *invrows[VDEV_RAIDZ_MAXPARITY];
uint8_t *used;
abd_t **bufs = NULL;
int code = 0;
/*
* Matrix reconstruction can't use scatter ABDs yet, so we allocate
* temporary linear ABDs.
*/
if (!abd_is_linear(rm->rm_col[rm->rm_firstdatacol].rc_abd)) {
bufs = kmem_alloc(rm->rm_cols * sizeof (abd_t *), KM_PUSHPAGE);
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
raidz_col_t *col = &rm->rm_col[c];
bufs[c] = col->rc_abd;
col->rc_abd = abd_alloc_linear(col->rc_size, B_TRUE);
abd_copy(col->rc_abd, bufs[c], col->rc_size);
}
}
n = rm->rm_cols - rm->rm_firstdatacol;
/*
* Figure out which data columns are missing.
*/
nmissing_rows = 0;
for (t = 0; t < ntgts; t++) {
if (tgts[t] >= rm->rm_firstdatacol) {
missing_rows[nmissing_rows++] =
tgts[t] - rm->rm_firstdatacol;
}
}
/*
* Figure out which parity columns to use to help generate the missing
* data columns.
*/
for (tt = 0, c = 0, i = 0; i < nmissing_rows; c++) {
ASSERT(tt < ntgts);
ASSERT(c < rm->rm_firstdatacol);
/*
* Skip any targeted parity columns.
*/
if (c == tgts[tt]) {
tt++;
continue;
}
code |= 1 << c;
parity_map[i] = c;
i++;
}
ASSERT(code != 0);
ASSERT3U(code, <, 1 << VDEV_RAIDZ_MAXPARITY);
psize = (sizeof (rows[0][0]) + sizeof (invrows[0][0])) *
nmissing_rows * n + sizeof (used[0]) * n;
p = kmem_alloc(psize, KM_SLEEP);
for (pp = p, i = 0; i < nmissing_rows; i++) {
rows[i] = pp;
pp += n;
invrows[i] = pp;
pp += n;
}
used = pp;
for (i = 0; i < nmissing_rows; i++) {
used[i] = parity_map[i];
}
for (tt = 0, c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
if (tt < nmissing_rows &&
c == missing_rows[tt] + rm->rm_firstdatacol) {
tt++;
continue;
}
ASSERT3S(i, <, n);
used[i] = c;
i++;
}
/*
* Initialize the interesting rows of the matrix.
*/
vdev_raidz_matrix_init(rm, n, nmissing_rows, parity_map, rows);
/*
* Invert the matrix.
*/
vdev_raidz_matrix_invert(rm, n, nmissing_rows, missing_rows, rows,
invrows, used);
/*
* Reconstruct the missing data using the generated matrix.
*/
vdev_raidz_matrix_reconstruct(rm, n, nmissing_rows, missing_rows,
invrows, used);
kmem_free(p, psize);
/*
* copy back from temporary linear abds and free them
*/
if (bufs) {
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
raidz_col_t *col = &rm->rm_col[c];
abd_copy(bufs[c], col->rc_abd, col->rc_size);
abd_free(col->rc_abd);
col->rc_abd = bufs[c];
}
kmem_free(bufs, rm->rm_cols * sizeof (abd_t *));
}
return (code);
}
int
vdev_raidz_reconstruct(raidz_map_t *rm, const int *t, int nt)
{
int tgts[VDEV_RAIDZ_MAXPARITY], *dt;
int ntgts;
int i, c, ret;
int code;
int nbadparity, nbaddata;
int parity_valid[VDEV_RAIDZ_MAXPARITY];
/*
* The tgts list must already be sorted.
*/
for (i = 1; i < nt; i++) {
ASSERT(t[i] > t[i - 1]);
}
nbadparity = rm->rm_firstdatacol;
nbaddata = rm->rm_cols - nbadparity;
ntgts = 0;
for (i = 0, c = 0; c < rm->rm_cols; c++) {
if (c < rm->rm_firstdatacol)
parity_valid[c] = B_FALSE;
if (i < nt && c == t[i]) {
tgts[ntgts++] = c;
i++;
} else if (rm->rm_col[c].rc_error != 0) {
tgts[ntgts++] = c;
} else if (c >= rm->rm_firstdatacol) {
nbaddata--;
} else {
parity_valid[c] = B_TRUE;
nbadparity--;
}
}
ASSERT(ntgts >= nt);
ASSERT(nbaddata >= 0);
ASSERT(nbaddata + nbadparity == ntgts);
dt = &tgts[nbadparity];
/* Reconstruct using the new math implementation */
ret = vdev_raidz_math_reconstruct(rm, parity_valid, dt, nbaddata);
if (ret != RAIDZ_ORIGINAL_IMPL)
return (ret);
/*
* See if we can use any of our optimized reconstruction routines.
*/
switch (nbaddata) {
case 1:
if (parity_valid[VDEV_RAIDZ_P])
return (vdev_raidz_reconstruct_p(rm, dt, 1));
ASSERT(rm->rm_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_Q])
return (vdev_raidz_reconstruct_q(rm, dt, 1));
ASSERT(rm->rm_firstdatacol > 2);
break;
case 2:
ASSERT(rm->rm_firstdatacol > 1);
if (parity_valid[VDEV_RAIDZ_P] &&
parity_valid[VDEV_RAIDZ_Q])
return (vdev_raidz_reconstruct_pq(rm, dt, 2));
ASSERT(rm->rm_firstdatacol > 2);
break;
}
code = vdev_raidz_reconstruct_general(rm, tgts, ntgts);
ASSERT(code < (1 << VDEV_RAIDZ_MAXPARITY));
ASSERT(code > 0);
return (code);
}
static int
vdev_raidz_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *ashift)
{
vdev_t *cvd;
uint64_t nparity = vd->vdev_nparity;
int c;
int lasterror = 0;
int numerrors = 0;
ASSERT(nparity > 0);
if (nparity > VDEV_RAIDZ_MAXPARITY ||
vd->vdev_children < nparity + 1) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (c = 0; c < vd->vdev_children; c++) {
cvd = vd->vdev_child[c];
if (cvd->vdev_open_error != 0) {
lasterror = cvd->vdev_open_error;
numerrors++;
continue;
}
*asize = MIN(*asize - 1, cvd->vdev_asize - 1) + 1;
*max_asize = MIN(*max_asize - 1, cvd->vdev_max_asize - 1) + 1;
*ashift = MAX(*ashift, cvd->vdev_ashift);
}
*asize *= vd->vdev_children;
*max_asize *= vd->vdev_children;
if (numerrors > nparity) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
return (0);
}
static void
vdev_raidz_close(vdev_t *vd)
{
int c;
for (c = 0; c < vd->vdev_children; c++)
vdev_close(vd->vdev_child[c]);
}
static uint64_t
vdev_raidz_asize(vdev_t *vd, uint64_t psize)
{
uint64_t asize;
uint64_t ashift = vd->vdev_top->vdev_ashift;
uint64_t cols = vd->vdev_children;
uint64_t nparity = vd->vdev_nparity;
asize = ((psize - 1) >> ashift) + 1;
asize += nparity * ((asize + cols - nparity - 1) / (cols - nparity));
asize = roundup(asize, nparity + 1) << ashift;
return (asize);
}
static void
vdev_raidz_child_done(zio_t *zio)
{
raidz_col_t *rc = zio->io_private;
rc->rc_error = zio->io_error;
rc->rc_tried = 1;
rc->rc_skipped = 0;
}
/*
* Start an IO operation on a RAIDZ VDev
*
* Outline:
* - For write operations:
* 1. Generate the parity data
* 2. Create child zio write operations to each column's vdev, for both
* data and parity.
* 3. If the column skips any sectors for padding, create optional dummy
* write zio children for those areas to improve aggregation continuity.
* - For read operations:
* 1. Create child zio read operations to each data column's vdev to read
* the range of data required for zio.
* 2. If this is a scrub or resilver operation, or if any of the data
* vdevs have had errors, then create zio read operations to the parity
* columns' VDevs as well.
*/
static void
vdev_raidz_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_t *tvd = vd->vdev_top;
vdev_t *cvd;
raidz_map_t *rm;
raidz_col_t *rc;
int c, i;
rm = vdev_raidz_map_alloc(zio, tvd->vdev_ashift, vd->vdev_children,
vd->vdev_nparity);
ASSERT3U(rm->rm_asize, ==, vdev_psize_to_asize(vd, zio->io_size));
if (zio->io_type == ZIO_TYPE_WRITE) {
vdev_raidz_generate_parity(rm);
for (c = 0; c < rm->rm_cols; c++) {
rc = &rm->rm_col[c];
cvd = vd->vdev_child[rc->rc_devidx];
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
/*
* Generate optional I/Os for any skipped sectors to improve
* aggregation contiguity.
*/
for (c = rm->rm_skipstart, i = 0; i < rm->rm_nskip; c++, i++) {
ASSERT(c <= rm->rm_scols);
if (c == rm->rm_scols)
c = 0;
rc = &rm->rm_col[c];
cvd = vd->vdev_child[rc->rc_devidx];
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset + rc->rc_size, NULL,
1 << tvd->vdev_ashift,
zio->io_type, zio->io_priority,
ZIO_FLAG_NODATA | ZIO_FLAG_OPTIONAL, NULL, NULL));
}
zio_execute(zio);
return;
}
ASSERT(zio->io_type == ZIO_TYPE_READ);
/*
* Iterate over the columns in reverse order so that we hit the parity
* last -- any errors along the way will force us to read the parity.
*/
for (c = rm->rm_cols - 1; c >= 0; c--) {
rc = &rm->rm_col[c];
cvd = vd->vdev_child[rc->rc_devidx];
if (!vdev_readable(cvd)) {
if (c >= rm->rm_firstdatacol)
rm->rm_missingdata++;
else
rm->rm_missingparity++;
rc->rc_error = SET_ERROR(ENXIO);
rc->rc_tried = 1; /* don't even try */
rc->rc_skipped = 1;
continue;
}
if (vdev_dtl_contains(cvd, DTL_MISSING, zio->io_txg, 1)) {
if (c >= rm->rm_firstdatacol)
rm->rm_missingdata++;
else
rm->rm_missingparity++;
rc->rc_error = SET_ERROR(ESTALE);
rc->rc_skipped = 1;
continue;
}
if (c >= rm->rm_firstdatacol || rm->rm_missingdata > 0 ||
(zio->io_flags & (ZIO_FLAG_SCRUB | ZIO_FLAG_RESILVER))) {
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
}
}
zio_execute(zio);
}
/*
* Report a checksum error for a child of a RAID-Z device.
*/
static void
raidz_checksum_error(zio_t *zio, raidz_col_t *rc, abd_t *bad_data)
{
vdev_t *vd = zio->io_vd->vdev_child[rc->rc_devidx];
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd;
mutex_enter(&vd->vdev_stat_lock);
vd->vdev_stat.vs_checksum_errors++;
mutex_exit(&vd->vdev_stat_lock);
zbc.zbc_has_cksum = 0;
zbc.zbc_injected = rm->rm_ecksuminjected;
zfs_ereport_post_checksum(zio->io_spa, vd,
&zio->io_bookmark, zio, rc->rc_offset, rc->rc_size,
rc->rc_abd, bad_data, &zbc);
}
}
/*
* We keep track of whether or not there were any injected errors, so that
* any ereports we generate can note it.
*/
static int
raidz_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t zbc;
raidz_map_t *rm = zio->io_vsd;
bzero(&zbc, sizeof (zio_bad_cksum_t));
int ret = zio_checksum_error(zio, &zbc);
if (ret != 0 && zbc.zbc_injected != 0)
rm->rm_ecksuminjected = 1;
return (ret);
}
/*
* Generate the parity from the data columns. If we tried and were able to
* read the parity without error, verify that the generated parity matches the
* data we read. If it doesn't, we fire off a checksum error. Return the
* number such failures.
*/
static int
raidz_parity_verify(zio_t *zio, raidz_map_t *rm)
{
abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int c, ret = 0;
raidz_col_t *rc;
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum = (bp == NULL ? zio->io_prop.zp_checksum :
(BP_IS_GANG(bp) ? ZIO_CHECKSUM_GANG_HEADER : BP_GET_CHECKSUM(bp)));
if (checksum == ZIO_CHECKSUM_NOPARITY)
return (ret);
for (c = 0; c < rm->rm_firstdatacol; c++) {
rc = &rm->rm_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
orig[c] = abd_alloc_sametype(rc->rc_abd, rc->rc_size);
abd_copy(orig[c], rc->rc_abd, rc->rc_size);
}
vdev_raidz_generate_parity(rm);
for (c = 0; c < rm->rm_firstdatacol; c++) {
rc = &rm->rm_col[c];
if (!rc->rc_tried || rc->rc_error != 0)
continue;
if (abd_cmp(orig[c], rc->rc_abd) != 0) {
raidz_checksum_error(zio, rc, orig[c]);
rc->rc_error = SET_ERROR(ECKSUM);
ret++;
}
abd_free(orig[c]);
}
return (ret);
}
static int
vdev_raidz_worst_error(raidz_map_t *rm)
{
int error = 0;
for (int c = 0; c < rm->rm_cols; c++)
error = zio_worst_error(error, rm->rm_col[c].rc_error);
return (error);
}
/*
* Iterate over all combinations of bad data and attempt a reconstruction.
* Note that the algorithm below is non-optimal because it doesn't take into
* account how reconstruction is actually performed. For example, with
* triple-parity RAID-Z the reconstruction procedure is the same if column 4
* is targeted as invalid as if columns 1 and 4 are targeted since in both
* cases we'd only use parity information in column 0.
*/
static int
vdev_raidz_combrec(zio_t *zio, int total_errors, int data_errors)
{
raidz_map_t *rm = zio->io_vsd;
raidz_col_t *rc;
abd_t *orig[VDEV_RAIDZ_MAXPARITY];
int tstore[VDEV_RAIDZ_MAXPARITY + 2];
int *tgts = &tstore[1];
int curr, next, i, c, n;
int code, ret = 0;
ASSERT(total_errors < rm->rm_firstdatacol);
/*
* This simplifies one edge condition.
*/
tgts[-1] = -1;
for (n = 1; n <= rm->rm_firstdatacol - total_errors; n++) {
/*
* Initialize the targets array by finding the first n columns
* that contain no error.
*
* If there were no data errors, we need to ensure that we're
* always explicitly attempting to reconstruct at least one
* data column. To do this, we simply push the highest target
* up into the data columns.
*/
for (c = 0, i = 0; i < n; i++) {
if (i == n - 1 && data_errors == 0 &&
c < rm->rm_firstdatacol) {
c = rm->rm_firstdatacol;
}
while (rm->rm_col[c].rc_error != 0) {
c++;
ASSERT3S(c, <, rm->rm_cols);
}
tgts[i] = c++;
}
/*
* Setting tgts[n] simplifies the other edge condition.
*/
tgts[n] = rm->rm_cols;
/*
* These buffers were allocated in previous iterations.
*/
for (i = 0; i < n - 1; i++) {
ASSERT(orig[i] != NULL);
}
orig[n - 1] = abd_alloc_sametype(rm->rm_col[0].rc_abd,
rm->rm_col[0].rc_size);
curr = 0;
next = tgts[curr];
while (curr != n) {
tgts[curr] = next;
curr = 0;
/*
* Save off the original data that we're going to
* attempt to reconstruct.
*/
for (i = 0; i < n; i++) {
ASSERT(orig[i] != NULL);
c = tgts[i];
ASSERT3S(c, >=, 0);
ASSERT3S(c, <, rm->rm_cols);
rc = &rm->rm_col[c];
abd_copy(orig[i], rc->rc_abd, rc->rc_size);
}
/*
* Attempt a reconstruction and exit the outer loop on
* success.
*/
code = vdev_raidz_reconstruct(rm, tgts, n);
if (raidz_checksum_verify(zio) == 0) {
for (i = 0; i < n; i++) {
c = tgts[i];
rc = &rm->rm_col[c];
ASSERT(rc->rc_error == 0);
if (rc->rc_tried)
raidz_checksum_error(zio, rc,
orig[i]);
rc->rc_error = SET_ERROR(ECKSUM);
}
ret = code;
goto done;
}
/*
* Restore the original data.
*/
for (i = 0; i < n; i++) {
c = tgts[i];
rc = &rm->rm_col[c];
abd_copy(rc->rc_abd, orig[i], rc->rc_size);
}
do {
/*
* Find the next valid column after the curr
* position..
*/
for (next = tgts[curr] + 1;
next < rm->rm_cols &&
rm->rm_col[next].rc_error != 0; next++)
continue;
ASSERT(next <= tgts[curr + 1]);
/*
* If that spot is available, we're done here.
*/
if (next != tgts[curr + 1])
break;
/*
* Otherwise, find the next valid column after
* the previous position.
*/
for (c = tgts[curr - 1] + 1;
rm->rm_col[c].rc_error != 0; c++)
continue;
tgts[curr] = c;
curr++;
} while (curr != n);
}
}
n--;
done:
for (i = 0; i < n; i++)
abd_free(orig[i]);
return (ret);
}
/*
* Complete an IO operation on a RAIDZ VDev
*
* Outline:
* - For write operations:
* 1. Check for errors on the child IOs.
* 2. Return, setting an error code if too few child VDevs were written
* to reconstruct the data later. Note that partial writes are
* considered successful if they can be reconstructed at all.
* - For read operations:
* 1. Check for errors on the child IOs.
* 2. If data errors occurred:
* a. Try to reassemble the data from the parity available.
* b. If we haven't yet read the parity drives, read them now.
* c. If all parity drives have been read but the data still doesn't
* reassemble with a correct checksum, then try combinatorial
* reconstruction.
* d. If that doesn't work, return an error.
* 3. If there were unexpected errors or this is a resilver operation,
* rewrite the vdevs that had errors.
*/
static void
vdev_raidz_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_t *cvd;
raidz_map_t *rm = zio->io_vsd;
raidz_col_t *rc = NULL;
int unexpected_errors = 0;
int parity_errors = 0;
int parity_untried = 0;
int data_errors = 0;
int total_errors = 0;
int n, c;
int tgts[VDEV_RAIDZ_MAXPARITY];
int code;
ASSERT(zio->io_bp != NULL); /* XXX need to add code to enforce this */
ASSERT(rm->rm_missingparity <= rm->rm_firstdatacol);
ASSERT(rm->rm_missingdata <= rm->rm_cols - rm->rm_firstdatacol);
for (c = 0; c < rm->rm_cols; c++) {
rc = &rm->rm_col[c];
if (rc->rc_error) {
ASSERT(rc->rc_error != ECKSUM); /* child has no bp */
if (c < rm->rm_firstdatacol)
parity_errors++;
else
data_errors++;
if (!rc->rc_skipped)
unexpected_errors++;
total_errors++;
} else if (c < rm->rm_firstdatacol && !rc->rc_tried) {
parity_untried++;
}
}
if (zio->io_type == ZIO_TYPE_WRITE) {
/*
* XXX -- for now, treat partial writes as a success.
* (If we couldn't write enough columns to reconstruct
* the data, the I/O failed. Otherwise, good enough.)
*
* Now that we support write reallocation, it would be better
* to treat partial failure as real failure unless there are
* no non-degraded top-level vdevs left, and not update DTLs
* if we intend to reallocate.
*/
/* XXPOLICY */
if (total_errors > rm->rm_firstdatacol)
zio->io_error = vdev_raidz_worst_error(rm);
return;
}
ASSERT(zio->io_type == ZIO_TYPE_READ);
/*
* There are three potential phases for a read:
* 1. produce valid data from the columns read
* 2. read all disks and try again
* 3. perform combinatorial reconstruction
*
* Each phase is progressively both more expensive and less likely to
* occur. If we encounter more errors than we can repair or all phases
* fail, we have no choice but to return an error.
*/
/*
* If the number of errors we saw was correctable -- less than or equal
* to the number of parity disks read -- attempt to produce data that
* has a valid checksum. Naturally, this case applies in the absence of
* any errors.
*/
if (total_errors <= rm->rm_firstdatacol - parity_untried) {
if (data_errors == 0) {
if (raidz_checksum_verify(zio) == 0) {
/*
* If we read parity information (unnecessarily
* as it happens since no reconstruction was
* needed) regenerate and verify the parity.
* We also regenerate parity when resilvering
* so we can write it out to the failed device
* later.
*/
if (parity_errors + parity_untried <
rm->rm_firstdatacol ||
(zio->io_flags & ZIO_FLAG_RESILVER)) {
n = raidz_parity_verify(zio, rm);
unexpected_errors += n;
ASSERT(parity_errors + n <=
rm->rm_firstdatacol);
}
goto done;
}
} else {
/*
* We either attempt to read all the parity columns or
* none of them. If we didn't try to read parity, we
* wouldn't be here in the correctable case. There must
* also have been fewer parity errors than parity
* columns or, again, we wouldn't be in this code path.
*/
ASSERT(parity_untried == 0);
ASSERT(parity_errors < rm->rm_firstdatacol);
/*
* Identify the data columns that reported an error.
*/
n = 0;
for (c = rm->rm_firstdatacol; c < rm->rm_cols; c++) {
rc = &rm->rm_col[c];
if (rc->rc_error != 0) {
ASSERT(n < VDEV_RAIDZ_MAXPARITY);
tgts[n++] = c;
}
}
ASSERT(rm->rm_firstdatacol >= n);
code = vdev_raidz_reconstruct(rm, tgts, n);
if (raidz_checksum_verify(zio) == 0) {
/*
* If we read more parity disks than were used
* for reconstruction, confirm that the other
* parity disks produced correct data. This
* routine is suboptimal in that it regenerates
* the parity that we already used in addition
* to the parity that we're attempting to
* verify, but this should be a relatively
* uncommon case, and can be optimized if it
* becomes a problem. Note that we regenerate
* parity when resilvering so we can write it
* out to failed devices later.
*/
if (parity_errors < rm->rm_firstdatacol - n ||
(zio->io_flags & ZIO_FLAG_RESILVER)) {
n = raidz_parity_verify(zio, rm);
unexpected_errors += n;
ASSERT(parity_errors + n <=
rm->rm_firstdatacol);
}
goto done;
}
}
}
/*
* This isn't a typical situation -- either we got a read error or
* a child silently returned bad data. Read every block so we can
* try again with as much data and parity as we can track down. If
* we've already been through once before, all children will be marked
* as tried so we'll proceed to combinatorial reconstruction.
*/
unexpected_errors = 1;
rm->rm_missingdata = 0;
rm->rm_missingparity = 0;
for (c = 0; c < rm->rm_cols; c++) {
if (rm->rm_col[c].rc_tried)
continue;
zio_vdev_io_redone(zio);
do {
rc = &rm->rm_col[c];
if (rc->rc_tried)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL,
vd->vdev_child[rc->rc_devidx],
rc->rc_offset, rc->rc_abd, rc->rc_size,
zio->io_type, zio->io_priority, 0,
vdev_raidz_child_done, rc));
} while (++c < rm->rm_cols);
return;
}
/*
* At this point we've attempted to reconstruct the data given the
* errors we detected, and we've attempted to read all columns. There
* must, therefore, be one or more additional problems -- silent errors
* resulting in invalid data rather than explicit I/O errors resulting
* in absent data. We check if there is enough additional data to
* possibly reconstruct the data and then perform combinatorial
* reconstruction over all possible combinations. If that fails,
* we're cooked.
*/
if (total_errors > rm->rm_firstdatacol) {
zio->io_error = vdev_raidz_worst_error(rm);
} else if (total_errors < rm->rm_firstdatacol &&
(code = vdev_raidz_combrec(zio, total_errors, data_errors)) != 0) {
/*
* If we didn't use all the available parity for the
* combinatorial reconstruction, verify that the remaining
* parity is correct.
*/
if (code != (1 << rm->rm_firstdatacol) - 1)
(void) raidz_parity_verify(zio, rm);
} else {
/*
* We're here because either:
*
* total_errors == rm_first_datacol, or
* vdev_raidz_combrec() failed
*
* In either case, there is enough bad data to prevent
* reconstruction.
*
* Start checksum ereports for all children which haven't
* failed, and the IO wasn't speculative.
*/
zio->io_error = SET_ERROR(ECKSUM);
if (!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
for (c = 0; c < rm->rm_cols; c++) {
rc = &rm->rm_col[c];
if (rc->rc_error == 0) {
zio_bad_cksum_t zbc;
zbc.zbc_has_cksum = 0;
zbc.zbc_injected =
rm->rm_ecksuminjected;
zfs_ereport_start_checksum(
zio->io_spa,
vd->vdev_child[rc->rc_devidx],
&zio->io_bookmark, zio,
rc->rc_offset, rc->rc_size,
(void *)(uintptr_t)c, &zbc);
}
}
}
}
done:
zio_checksum_verified(zio);
if (zio->io_error == 0 && spa_writeable(zio->io_spa) &&
(unexpected_errors || (zio->io_flags & ZIO_FLAG_RESILVER))) {
/*
* Use the good data we have in hand to repair damaged children.
*/
for (c = 0; c < rm->rm_cols; c++) {
rc = &rm->rm_col[c];
cvd = vd->vdev_child[rc->rc_devidx];
if (rc->rc_error == 0)
continue;
zio_nowait(zio_vdev_child_io(zio, NULL, cvd,
rc->rc_offset, rc->rc_abd, rc->rc_size,
ZIO_TYPE_WRITE, ZIO_PRIORITY_ASYNC_WRITE,
ZIO_FLAG_IO_REPAIR | (unexpected_errors ?
ZIO_FLAG_SELF_HEAL : 0), NULL, NULL));
}
}
}
static void
vdev_raidz_state_change(vdev_t *vd, int faulted, int degraded)
{
if (faulted > vd->vdev_nparity)
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
else if (degraded + faulted != 0)
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
else
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
/*
* Determine if any portion of the provided block resides on a child vdev
* with a dirty DTL and therefore needs to be resilvered. The function
* assumes that at least one DTL is dirty which imples that full stripe
* width blocks must be resilvered.
*/
static boolean_t
vdev_raidz_need_resilver(vdev_t *vd, uint64_t offset, size_t psize)
{
uint64_t dcols = vd->vdev_children;
uint64_t nparity = vd->vdev_nparity;
uint64_t ashift = vd->vdev_top->vdev_ashift;
/* The starting RAIDZ (parent) vdev sector of the block. */
uint64_t b = offset >> ashift;
/* The zio's size in units of the vdev's minimum sector size. */
uint64_t s = ((psize - 1) >> ashift) + 1;
/* The first column for this stripe. */
uint64_t f = b % dcols;
if (s + nparity >= dcols)
return (B_TRUE);
for (uint64_t c = 0; c < s + nparity; c++) {
uint64_t devidx = (f + c) % dcols;
vdev_t *cvd = vd->vdev_child[devidx];
/*
* dsl_scan_need_resilver() already checked vd with
* vdev_dtl_contains(). So here just check cvd with
* vdev_dtl_empty(), cheaper and a good approximation.
*/
if (!vdev_dtl_empty(cvd, DTL_PARTIAL))
return (B_TRUE);
}
return (B_FALSE);
}
vdev_ops_t vdev_raidz_ops = {
vdev_raidz_open,
vdev_raidz_close,
vdev_raidz_asize,
vdev_raidz_io_start,
vdev_raidz_io_done,
vdev_raidz_state_change,
vdev_raidz_need_resilver,
NULL,
NULL,
+ NULL,
VDEV_TYPE_RAIDZ, /* name of this vdev type */
B_FALSE /* not a leaf vdev */
};
diff --git a/module/zfs/vdev_removal.c b/module/zfs/vdev_removal.c
new file mode 100644
index 000000000000..6e81bf014894
--- /dev/null
+++ b/module/zfs/vdev_removal.c
@@ -0,0 +1,1925 @@
+/*
+ * CDDL HEADER START
+ *
+ * The contents of this file are subject to the terms of the
+ * Common Development and Distribution License (the "License").
+ * You may not use this file except in compliance with the License.
+ *
+ * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
+ * or http://www.opensolaris.org/os/licensing.
+ * See the License for the specific language governing permissions
+ * and limitations under the License.
+ *
+ * When distributing Covered Code, include this CDDL HEADER in each
+ * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
+ * If applicable, add the following below this CDDL HEADER, with the
+ * fields enclosed by brackets "[]" replaced with your own identifying
+ * information: Portions Copyright [yyyy] [name of copyright owner]
+ *
+ * CDDL HEADER END
+ */
+
+/*
+ * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
+ * Copyright (c) 2011, 2017 by Delphix. All rights reserved.
+ */
+
+#include <sys/zfs_context.h>
+#include <sys/spa_impl.h>
+#include <sys/dmu.h>
+#include <sys/dmu_tx.h>
+#include <sys/zap.h>
+#include <sys/vdev_impl.h>
+#include <sys/metaslab.h>
+#include <sys/metaslab_impl.h>
+#include <sys/uberblock_impl.h>
+#include <sys/txg.h>
+#include <sys/avl.h>
+#include <sys/bpobj.h>
+#include <sys/dsl_pool.h>
+#include <sys/dsl_synctask.h>
+#include <sys/dsl_dir.h>
+#include <sys/arc.h>
+#include <sys/zfeature.h>
+#include <sys/vdev_indirect_births.h>
+#include <sys/vdev_indirect_mapping.h>
+#include <sys/abd.h>
+#include <sys/trace_vdev.h>
+
+/*
+ * This file contains the necessary logic to remove vdevs from a
+ * storage pool. Currently, the only devices that can be removed
+ * are log, cache, and spare devices; and top level vdevs from a pool
+ * w/o raidz or mirrors. (Note that members of a mirror can be removed
+ * by the detach operation.)
+ *
+ * Log vdevs are removed by evacuating them and then turning the vdev
+ * into a hole vdev while holding spa config locks.
+ *
+ * Top level vdevs are removed and converted into an indirect vdev via
+ * a multi-step process:
+ *
+ * - Disable allocations from this device (spa_vdev_remove_top).
+ *
+ * - From a new thread (spa_vdev_remove_thread), copy data from
+ * the removing vdev to a different vdev. The copy happens in open
+ * context (spa_vdev_copy_impl) and issues a sync task
+ * (vdev_mapping_sync) so the sync thread can update the partial
+ * indirect mappings in core and on disk.
+ *
+ * - If a free happens during a removal, it is freed from the
+ * removing vdev, and if it has already been copied, from the new
+ * location as well (free_from_removing_vdev).
+ *
+ * - After the removal is completed, the copy thread converts the vdev
+ * into an indirect vdev (vdev_remove_complete) before instructing
+ * the sync thread to destroy the space maps and finish the removal
+ * (spa_finish_removal).
+ */
+
+typedef struct vdev_copy_arg {
+ metaslab_t *vca_msp;
+ uint64_t vca_outstanding_bytes;
+ kcondvar_t vca_cv;
+ kmutex_t vca_lock;
+} vdev_copy_arg_t;
+
+typedef struct vdev_copy_seg_arg {
+ vdev_copy_arg_t *vcsa_copy_arg;
+ uint64_t vcsa_txg;
+ dva_t *vcsa_dest_dva;
+ blkptr_t *vcsa_dest_bp;
+} vdev_copy_seg_arg_t;
+
+/*
+ * The maximum amount of allowed data we're allowed to copy from a device
+ * at a time when removing it.
+ */
+int zfs_remove_max_copy_bytes = 8 * 1024 * 1024;
+
+/*
+ * The largest contiguous segment that we will attempt to allocate when
+ * removing a device. This can be no larger than SPA_MAXBLOCKSIZE. If
+ * there is a performance problem with attempting to allocate large blocks,
+ * consider decreasing this.
+ */
+int zfs_remove_max_segment = SPA_MAXBLOCKSIZE;
+
+#define VDEV_REMOVAL_ZAP_OBJS "lzap"
+
+static void spa_vdev_remove_thread(void *arg);
+
+static void
+spa_sync_removing_state(spa_t *spa, dmu_tx_t *tx)
+{
+ VERIFY0(zap_update(spa->spa_dsl_pool->dp_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_REMOVING, sizeof (uint64_t),
+ sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
+ &spa->spa_removing_phys, tx));
+}
+
+static nvlist_t *
+spa_nvlist_lookup_by_guid(nvlist_t **nvpp, int count, uint64_t target_guid)
+{
+ for (int i = 0; i < count; i++) {
+ uint64_t guid =
+ fnvlist_lookup_uint64(nvpp[i], ZPOOL_CONFIG_GUID);
+
+ if (guid == target_guid)
+ return (nvpp[i]);
+ }
+
+ return (NULL);
+}
+
+static void
+spa_vdev_remove_aux(nvlist_t *config, char *name, nvlist_t **dev, int count,
+ nvlist_t *dev_to_remove)
+{
+ nvlist_t **newdev = NULL;
+
+ if (count > 1)
+ newdev = kmem_alloc((count - 1) * sizeof (void *), KM_SLEEP);
+
+ for (int i = 0, j = 0; i < count; i++) {
+ if (dev[i] == dev_to_remove)
+ continue;
+ VERIFY(nvlist_dup(dev[i], &newdev[j++], KM_SLEEP) == 0);
+ }
+
+ VERIFY(nvlist_remove(config, name, DATA_TYPE_NVLIST_ARRAY) == 0);
+ VERIFY(nvlist_add_nvlist_array(config, name, newdev, count - 1) == 0);
+
+ for (int i = 0; i < count - 1; i++)
+ nvlist_free(newdev[i]);
+
+ if (count > 1)
+ kmem_free(newdev, (count - 1) * sizeof (void *));
+}
+
+static spa_vdev_removal_t *
+spa_vdev_removal_create(vdev_t *vd)
+{
+ spa_vdev_removal_t *svr = kmem_zalloc(sizeof (*svr), KM_SLEEP);
+ mutex_init(&svr->svr_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&svr->svr_cv, NULL, CV_DEFAULT, NULL);
+ svr->svr_allocd_segs = range_tree_create(NULL, NULL);
+ svr->svr_vdev = vd;
+
+ for (int i = 0; i < TXG_SIZE; i++) {
+ svr->svr_frees[i] = range_tree_create(NULL, NULL);
+ list_create(&svr->svr_new_segments[i],
+ sizeof (vdev_indirect_mapping_entry_t),
+ offsetof(vdev_indirect_mapping_entry_t, vime_node));
+ }
+
+ return (svr);
+}
+
+void
+spa_vdev_removal_destroy(spa_vdev_removal_t *svr)
+{
+ for (int i = 0; i < TXG_SIZE; i++) {
+ ASSERT0(svr->svr_bytes_done[i]);
+ ASSERT0(svr->svr_max_offset_to_sync[i]);
+ range_tree_destroy(svr->svr_frees[i]);
+ list_destroy(&svr->svr_new_segments[i]);
+ }
+
+ range_tree_destroy(svr->svr_allocd_segs);
+ mutex_destroy(&svr->svr_lock);
+ cv_destroy(&svr->svr_cv);
+ kmem_free(svr, sizeof (*svr));
+}
+
+/*
+ * This is called as a synctask in the txg in which we will mark this vdev
+ * as removing (in the config stored in the MOS).
+ *
+ * It begins the evacuation of a toplevel vdev by:
+ * - initializing the spa_removing_phys which tracks this removal
+ * - computing the amount of space to remove for accounting purposes
+ * - dirtying all dbufs in the spa_config_object
+ * - creating the spa_vdev_removal
+ * - starting the spa_vdev_remove_thread
+ */
+static void
+vdev_remove_initiate_sync(void *arg, dmu_tx_t *tx)
+{
+ vdev_t *vd = arg;
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+ spa_t *spa = vd->vdev_spa;
+ objset_t *mos = spa->spa_dsl_pool->dp_meta_objset;
+ spa_vdev_removal_t *svr = NULL;
+ ASSERTV(uint64_t txg = dmu_tx_get_txg(tx));
+
+ ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
+ svr = spa_vdev_removal_create(vd);
+
+ ASSERT(vd->vdev_removing);
+ ASSERT3P(vd->vdev_indirect_mapping, ==, NULL);
+
+ spa_feature_incr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
+ if (spa_feature_is_enabled(spa, SPA_FEATURE_OBSOLETE_COUNTS)) {
+ /*
+ * By activating the OBSOLETE_COUNTS feature, we prevent
+ * the pool from being downgraded and ensure that the
+ * refcounts are precise.
+ */
+ spa_feature_incr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ uint64_t one = 1;
+ VERIFY0(zap_add(spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, sizeof (one), 1,
+ &one, tx));
+ ASSERT3U(vdev_obsolete_counts_are_precise(vd), !=, 0);
+ }
+
+ vic->vic_mapping_object = vdev_indirect_mapping_alloc(mos, tx);
+ vd->vdev_indirect_mapping =
+ vdev_indirect_mapping_open(mos, vic->vic_mapping_object);
+ vic->vic_births_object = vdev_indirect_births_alloc(mos, tx);
+ vd->vdev_indirect_births =
+ vdev_indirect_births_open(mos, vic->vic_births_object);
+ spa->spa_removing_phys.sr_removing_vdev = vd->vdev_id;
+ spa->spa_removing_phys.sr_start_time = gethrestime_sec();
+ spa->spa_removing_phys.sr_end_time = 0;
+ spa->spa_removing_phys.sr_state = DSS_SCANNING;
+ spa->spa_removing_phys.sr_to_copy = 0;
+ spa->spa_removing_phys.sr_copied = 0;
+
+ /*
+ * Note: We can't use vdev_stat's vs_alloc for sr_to_copy, because
+ * there may be space in the defer tree, which is free, but still
+ * counted in vs_alloc.
+ */
+ for (uint64_t i = 0; i < vd->vdev_ms_count; i++) {
+ metaslab_t *ms = vd->vdev_ms[i];
+ if (ms->ms_sm == NULL)
+ continue;
+
+ /*
+ * Sync tasks happen before metaslab_sync(), therefore
+ * smp_alloc and sm_alloc must be the same.
+ */
+ ASSERT3U(space_map_allocated(ms->ms_sm), ==,
+ ms->ms_sm->sm_phys->smp_alloc);
+
+ spa->spa_removing_phys.sr_to_copy +=
+ space_map_allocated(ms->ms_sm);
+
+ /*
+ * Space which we are freeing this txg does not need to
+ * be copied.
+ */
+ spa->spa_removing_phys.sr_to_copy -=
+ range_tree_space(ms->ms_freeingtree);
+
+ ASSERT0(range_tree_space(ms->ms_freedtree));
+ for (int t = 0; t < TXG_SIZE; t++)
+ ASSERT0(range_tree_space(ms->ms_alloctree[t]));
+ }
+
+ /*
+ * Sync tasks are called before metaslab_sync(), so there should
+ * be no already-synced metaslabs in the TXG_CLEAN list.
+ */
+ ASSERT3P(txg_list_head(&vd->vdev_ms_list, TXG_CLEAN(txg)), ==, NULL);
+
+ spa_sync_removing_state(spa, tx);
+
+ /*
+ * All blocks that we need to read the most recent mapping must be
+ * stored on concrete vdevs. Therefore, we must dirty anything that
+ * is read before spa_remove_init(). Specifically, the
+ * spa_config_object. (Note that although we already modified the
+ * spa_config_object in spa_sync_removing_state, that may not have
+ * modified all blocks of the object.)
+ */
+ dmu_object_info_t doi;
+ VERIFY0(dmu_object_info(mos, DMU_POOL_DIRECTORY_OBJECT, &doi));
+ for (uint64_t offset = 0; offset < doi.doi_max_offset; ) {
+ dmu_buf_t *dbuf;
+ VERIFY0(dmu_buf_hold(mos, DMU_POOL_DIRECTORY_OBJECT,
+ offset, FTAG, &dbuf, 0));
+ dmu_buf_will_dirty(dbuf, tx);
+ offset += dbuf->db_size;
+ dmu_buf_rele(dbuf, FTAG);
+ }
+
+ /*
+ * Now that we've allocated the im_object, dirty the vdev to ensure
+ * that the object gets written to the config on disk.
+ */
+ vdev_config_dirty(vd);
+
+ zfs_dbgmsg("starting removal thread for vdev %llu (%p) in txg %llu "
+ "im_obj=%llu", vd->vdev_id, vd, dmu_tx_get_txg(tx),
+ vic->vic_mapping_object);
+
+ spa_history_log_internal(spa, "vdev remove started", tx,
+ "%s vdev %llu %s", spa_name(spa), vd->vdev_id,
+ (vd->vdev_path != NULL) ? vd->vdev_path : "-");
+ /*
+ * Setting spa_vdev_removal causes subsequent frees to call
+ * free_from_removing_vdev(). Note that we don't need any locking
+ * because we are the sync thread, and metaslab_free_impl() is only
+ * called from syncing context (potentially from a zio taskq thread,
+ * but in any case only when there are outstanding free i/os, which
+ * there are not).
+ */
+ ASSERT3P(spa->spa_vdev_removal, ==, NULL);
+ spa->spa_vdev_removal = svr;
+ svr->svr_thread = thread_create(NULL, 0,
+ spa_vdev_remove_thread, vd, 0, &p0, TS_RUN, minclsyspri);
+}
+
+/*
+ * When we are opening a pool, we must read the mapping for each
+ * indirect vdev in order from most recently removed to least
+ * recently removed. We do this because the blocks for the mapping
+ * of older indirect vdevs may be stored on more recently removed vdevs.
+ * In order to read each indirect mapping object, we must have
+ * initialized all more recently removed vdevs.
+ */
+int
+spa_remove_init(spa_t *spa)
+{
+ int error;
+
+ error = zap_lookup(spa->spa_dsl_pool->dp_meta_objset,
+ DMU_POOL_DIRECTORY_OBJECT,
+ DMU_POOL_REMOVING, sizeof (uint64_t),
+ sizeof (spa->spa_removing_phys) / sizeof (uint64_t),
+ &spa->spa_removing_phys);
+
+ if (error == ENOENT) {
+ spa->spa_removing_phys.sr_state = DSS_NONE;
+ spa->spa_removing_phys.sr_removing_vdev = -1;
+ spa->spa_removing_phys.sr_prev_indirect_vdev = -1;
+ return (0);
+ } else if (error != 0) {
+ return (error);
+ }
+
+ if (spa->spa_removing_phys.sr_state == DSS_SCANNING) {
+ /*
+ * We are currently removing a vdev. Create and
+ * initialize a spa_vdev_removal_t from the bonus
+ * buffer of the removing vdevs vdev_im_object, and
+ * initialize its partial mapping.
+ */
+ spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+ vdev_t *vd = vdev_lookup_top(spa,
+ spa->spa_removing_phys.sr_removing_vdev);
+ spa_config_exit(spa, SCL_STATE, FTAG);
+
+ if (vd == NULL)
+ return (EINVAL);
+
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+
+ ASSERT(vdev_is_concrete(vd));
+ spa_vdev_removal_t *svr = spa_vdev_removal_create(vd);
+ ASSERT(svr->svr_vdev->vdev_removing);
+
+ vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
+ spa->spa_meta_objset, vic->vic_mapping_object);
+ vd->vdev_indirect_births = vdev_indirect_births_open(
+ spa->spa_meta_objset, vic->vic_births_object);
+
+ spa->spa_vdev_removal = svr;
+ }
+
+ spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+ uint64_t indirect_vdev_id =
+ spa->spa_removing_phys.sr_prev_indirect_vdev;
+ while (indirect_vdev_id != UINT64_MAX) {
+ vdev_t *vd = vdev_lookup_top(spa, indirect_vdev_id);
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+ vd->vdev_indirect_mapping = vdev_indirect_mapping_open(
+ spa->spa_meta_objset, vic->vic_mapping_object);
+ vd->vdev_indirect_births = vdev_indirect_births_open(
+ spa->spa_meta_objset, vic->vic_births_object);
+
+ indirect_vdev_id = vic->vic_prev_indirect_vdev;
+ }
+ spa_config_exit(spa, SCL_STATE, FTAG);
+
+ /*
+ * Now that we've loaded all the indirect mappings, we can allow
+ * reads from other blocks (e.g. via predictive prefetch).
+ */
+ spa->spa_indirect_vdevs_loaded = B_TRUE;
+ return (0);
+}
+
+void
+spa_restart_removal(spa_t *spa)
+{
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+
+ if (svr == NULL)
+ return;
+
+ /*
+ * In general when this function is called there is no
+ * removal thread running. The only scenario where this
+ * is not true is during spa_import() where this function
+ * is called twice [once from spa_import_impl() and
+ * spa_async_resume()]. Thus, in the scenario where we
+ * import a pool that has an ongoing removal we don't
+ * want to spawn a second thread.
+ */
+ if (svr->svr_thread != NULL)
+ return;
+
+ if (!spa_writeable(spa))
+ return;
+
+ vdev_t *vd = svr->svr_vdev;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+
+ ASSERT3P(vd, !=, NULL);
+ ASSERT(vd->vdev_removing);
+
+ zfs_dbgmsg("restarting removal of %llu at count=%llu",
+ vd->vdev_id, vdev_indirect_mapping_num_entries(vim));
+ svr->svr_thread = thread_create(NULL, 0, spa_vdev_remove_thread, vd,
+ 0, &p0, TS_RUN, minclsyspri);
+}
+
+/*
+ * Process freeing from a device which is in the middle of being removed.
+ * We must handle this carefully so that we attempt to copy freed data,
+ * and we correctly free already-copied data.
+ */
+void
+free_from_removing_vdev(vdev_t *vd, uint64_t offset, uint64_t size,
+ uint64_t txg)
+{
+ spa_t *spa = vd->vdev_spa;
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ uint64_t max_offset_yet = 0;
+
+ ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
+ ASSERT3U(vd->vdev_indirect_config.vic_mapping_object, ==,
+ vdev_indirect_mapping_object(vim));
+ ASSERT3P(vd, ==, svr->svr_vdev);
+ ASSERT3U(spa_syncing_txg(spa), ==, txg);
+
+ mutex_enter(&svr->svr_lock);
+
+ /*
+ * Remove the segment from the removing vdev's spacemap. This
+ * ensures that we will not attempt to copy this space (if the
+ * removal thread has not yet visited it), and also ensures
+ * that we know what is actually allocated on the new vdevs
+ * (needed if we cancel the removal).
+ *
+ * Note: we must do the metaslab_free_concrete() with the svr_lock
+ * held, so that the remove_thread can not load this metaslab and then
+ * visit this offset between the time that we metaslab_free_concrete()
+ * and when we check to see if it has been visited.
+ */
+ metaslab_free_concrete(vd, offset, size, txg);
+
+ uint64_t synced_size = 0;
+ uint64_t synced_offset = 0;
+ uint64_t max_offset_synced = vdev_indirect_mapping_max_offset(vim);
+ if (offset < max_offset_synced) {
+ /*
+ * The mapping for this offset is already on disk.
+ * Free from the new location.
+ *
+ * Note that we use svr_max_synced_offset because it is
+ * updated atomically with respect to the in-core mapping.
+ * By contrast, vim_max_offset is not.
+ *
+ * This block may be split between a synced entry and an
+ * in-flight or unvisited entry. Only process the synced
+ * portion of it here.
+ */
+ synced_size = MIN(size, max_offset_synced - offset);
+ synced_offset = offset;
+
+ ASSERT3U(max_offset_yet, <=, max_offset_synced);
+ max_offset_yet = max_offset_synced;
+
+ DTRACE_PROBE3(remove__free__synced,
+ spa_t *, spa,
+ uint64_t, offset,
+ uint64_t, synced_size);
+
+ size -= synced_size;
+ offset += synced_size;
+ }
+
+ /*
+ * Look at all in-flight txgs starting from the currently syncing one
+ * and see if a section of this free is being copied. By starting from
+ * this txg and iterating forward, we might find that this region
+ * was copied in two different txgs and handle it appropriately.
+ */
+ for (int i = 0; i < TXG_CONCURRENT_STATES; i++) {
+ int txgoff = (txg + i) & TXG_MASK;
+ if (size > 0 && offset < svr->svr_max_offset_to_sync[txgoff]) {
+ /*
+ * The mapping for this offset is in flight, and
+ * will be synced in txg+i.
+ */
+ uint64_t inflight_size = MIN(size,
+ svr->svr_max_offset_to_sync[txgoff] - offset);
+
+ DTRACE_PROBE4(remove__free__inflight,
+ spa_t *, spa,
+ uint64_t, offset,
+ uint64_t, inflight_size,
+ uint64_t, txg + i);
+
+ /*
+ * We copy data in order of increasing offset.
+ * Therefore the max_offset_to_sync[] must increase
+ * (or be zero, indicating that nothing is being
+ * copied in that txg).
+ */
+ if (svr->svr_max_offset_to_sync[txgoff] != 0) {
+ ASSERT3U(svr->svr_max_offset_to_sync[txgoff],
+ >=, max_offset_yet);
+ max_offset_yet =
+ svr->svr_max_offset_to_sync[txgoff];
+ }
+
+ /*
+ * We've already committed to copying this segment:
+ * we have allocated space elsewhere in the pool for
+ * it and have an IO outstanding to copy the data. We
+ * cannot free the space before the copy has
+ * completed, or else the copy IO might overwrite any
+ * new data. To free that space, we record the
+ * segment in the appropriate svr_frees tree and free
+ * the mapped space later, in the txg where we have
+ * completed the copy and synced the mapping (see
+ * vdev_mapping_sync).
+ */
+ range_tree_add(svr->svr_frees[txgoff],
+ offset, inflight_size);
+ size -= inflight_size;
+ offset += inflight_size;
+
+ /*
+ * This space is already accounted for as being
+ * done, because it is being copied in txg+i.
+ * However, if i!=0, then it is being copied in
+ * a future txg. If we crash after this txg
+ * syncs but before txg+i syncs, then the space
+ * will be free. Therefore we must account
+ * for the space being done in *this* txg
+ * (when it is freed) rather than the future txg
+ * (when it will be copied).
+ */
+ ASSERT3U(svr->svr_bytes_done[txgoff], >=,
+ inflight_size);
+ svr->svr_bytes_done[txgoff] -= inflight_size;
+ svr->svr_bytes_done[txg & TXG_MASK] += inflight_size;
+ }
+ }
+ ASSERT0(svr->svr_max_offset_to_sync[TXG_CLEAN(txg) & TXG_MASK]);
+
+ if (size > 0) {
+ /*
+ * The copy thread has not yet visited this offset. Ensure
+ * that it doesn't.
+ */
+
+ DTRACE_PROBE3(remove__free__unvisited,
+ spa_t *, spa,
+ uint64_t, offset,
+ uint64_t, size);
+
+ if (svr->svr_allocd_segs != NULL)
+ range_tree_clear(svr->svr_allocd_segs, offset, size);
+
+ /*
+ * Since we now do not need to copy this data, for
+ * accounting purposes we have done our job and can count
+ * it as completed.
+ */
+ svr->svr_bytes_done[txg & TXG_MASK] += size;
+ }
+ mutex_exit(&svr->svr_lock);
+
+ /*
+ * Now that we have dropped svr_lock, process the synced portion
+ * of this free.
+ */
+ if (synced_size > 0) {
+ vdev_indirect_mark_obsolete(vd, synced_offset, synced_size,
+ txg);
+ /*
+ * Note: this can only be called from syncing context,
+ * and the vdev_indirect_mapping is only changed from the
+ * sync thread, so we don't need svr_lock while doing
+ * metaslab_free_impl_cb.
+ */
+ vdev_indirect_ops.vdev_op_remap(vd, synced_offset, synced_size,
+ metaslab_free_impl_cb, &txg);
+ }
+}
+
+/*
+ * Stop an active removal and update the spa_removing phys.
+ */
+static void
+spa_finish_removal(spa_t *spa, dsl_scan_state_t state, dmu_tx_t *tx)
+{
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ ASSERT3U(dmu_tx_get_txg(tx), ==, spa_syncing_txg(spa));
+
+ /* Ensure the removal thread has completed before we free the svr. */
+ spa_vdev_remove_suspend(spa);
+
+ ASSERT(state == DSS_FINISHED || state == DSS_CANCELED);
+
+ if (state == DSS_FINISHED) {
+ spa_removing_phys_t *srp = &spa->spa_removing_phys;
+ vdev_t *vd = svr->svr_vdev;
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+
+ if (srp->sr_prev_indirect_vdev != UINT64_MAX) {
+ vdev_t *pvd;
+ pvd = vdev_lookup_top(spa,
+ srp->sr_prev_indirect_vdev);
+ ASSERT3P(pvd->vdev_ops, ==, &vdev_indirect_ops);
+ }
+
+ vic->vic_prev_indirect_vdev = srp->sr_prev_indirect_vdev;
+ srp->sr_prev_indirect_vdev = vd->vdev_id;
+ }
+ spa->spa_removing_phys.sr_state = state;
+ spa->spa_removing_phys.sr_end_time = gethrestime_sec();
+
+ spa->spa_vdev_removal = NULL;
+ spa_vdev_removal_destroy(svr);
+
+ spa_sync_removing_state(spa, tx);
+
+ vdev_config_dirty(spa->spa_root_vdev);
+}
+
+static void
+free_mapped_segment_cb(void *arg, uint64_t offset, uint64_t size)
+{
+ vdev_t *vd = arg;
+ vdev_indirect_mark_obsolete(vd, offset, size,
+ vd->vdev_spa->spa_syncing_txg);
+ vdev_indirect_ops.vdev_op_remap(vd, offset, size,
+ metaslab_free_impl_cb, &vd->vdev_spa->spa_syncing_txg);
+}
+
+/*
+ * On behalf of the removal thread, syncs an incremental bit more of
+ * the indirect mapping to disk and updates the in-memory mapping.
+ * Called as a sync task in every txg that the removal thread makes progress.
+ */
+static void
+vdev_mapping_sync(void *arg, dmu_tx_t *tx)
+{
+ spa_vdev_removal_t *svr = arg;
+ spa_t *spa = dmu_tx_pool(tx)->dp_spa;
+ vdev_t *vd = svr->svr_vdev;
+ ASSERTV(vdev_indirect_config_t *vic = &vd->vdev_indirect_config);
+ uint64_t txg = dmu_tx_get_txg(tx);
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+
+ ASSERT(vic->vic_mapping_object != 0);
+ ASSERT3U(txg, ==, spa_syncing_txg(spa));
+
+ vdev_indirect_mapping_add_entries(vim,
+ &svr->svr_new_segments[txg & TXG_MASK], tx);
+ vdev_indirect_births_add_entry(vd->vdev_indirect_births,
+ vdev_indirect_mapping_max_offset(vim), dmu_tx_get_txg(tx), tx);
+
+ /*
+ * Free the copied data for anything that was freed while the
+ * mapping entries were in flight.
+ */
+ mutex_enter(&svr->svr_lock);
+ range_tree_vacate(svr->svr_frees[txg & TXG_MASK],
+ free_mapped_segment_cb, vd);
+ ASSERT3U(svr->svr_max_offset_to_sync[txg & TXG_MASK], >=,
+ vdev_indirect_mapping_max_offset(vim));
+ svr->svr_max_offset_to_sync[txg & TXG_MASK] = 0;
+ mutex_exit(&svr->svr_lock);
+
+ spa_sync_removing_state(spa, tx);
+}
+
+static void
+spa_vdev_copy_segment_write_done(zio_t *zio)
+{
+ vdev_copy_seg_arg_t *vcsa = zio->io_private;
+ vdev_copy_arg_t *vca = vcsa->vcsa_copy_arg;
+ spa_config_exit(zio->io_spa, SCL_STATE, FTAG);
+ abd_free(zio->io_abd);
+
+ mutex_enter(&vca->vca_lock);
+ vca->vca_outstanding_bytes -= zio->io_size;
+ cv_signal(&vca->vca_cv);
+ mutex_exit(&vca->vca_lock);
+
+ ASSERT0(zio->io_error);
+ kmem_free(vcsa->vcsa_dest_bp, sizeof (blkptr_t));
+ kmem_free(vcsa, sizeof (vdev_copy_seg_arg_t));
+}
+
+static void
+spa_vdev_copy_segment_read_done(zio_t *zio)
+{
+ vdev_copy_seg_arg_t *vcsa = zio->io_private;
+ dva_t *dest_dva = vcsa->vcsa_dest_dva;
+ uint64_t txg = vcsa->vcsa_txg;
+ spa_t *spa = zio->io_spa;
+ ASSERTV(vdev_t *dest_vd = vdev_lookup_top(spa, DVA_GET_VDEV(dest_dva)));
+ blkptr_t *bp = NULL;
+ dva_t *dva = NULL;
+ uint64_t size = zio->io_size;
+
+ ASSERT3P(dest_vd, !=, NULL);
+ ASSERT0(zio->io_error);
+
+ vcsa->vcsa_dest_bp = kmem_alloc(sizeof (blkptr_t), KM_SLEEP);
+ bp = vcsa->vcsa_dest_bp;
+ dva = bp->blk_dva;
+
+ BP_ZERO(bp);
+
+ /* initialize with dest_dva */
+ bcopy(dest_dva, dva, sizeof (dva_t));
+ BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
+
+ BP_SET_LSIZE(bp, size);
+ BP_SET_PSIZE(bp, size);
+ BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
+ BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
+ BP_SET_TYPE(bp, DMU_OT_NONE);
+ BP_SET_LEVEL(bp, 0);
+ BP_SET_DEDUP(bp, 0);
+ BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
+
+ zio_nowait(zio_rewrite(spa->spa_txg_zio[txg & TXG_MASK], spa,
+ txg, bp, zio->io_abd, size,
+ spa_vdev_copy_segment_write_done, vcsa,
+ ZIO_PRIORITY_REMOVAL, 0, NULL));
+}
+
+static int
+spa_vdev_copy_segment(vdev_t *vd, uint64_t start, uint64_t size, uint64_t txg,
+ vdev_copy_arg_t *vca, zio_alloc_list_t *zal)
+{
+ metaslab_group_t *mg = vd->vdev_mg;
+ spa_t *spa = vd->vdev_spa;
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ vdev_indirect_mapping_entry_t *entry;
+ vdev_copy_seg_arg_t *private;
+ dva_t dst = {{ 0 }};
+ blkptr_t blk, *bp = &blk;
+ dva_t *dva = bp->blk_dva;
+
+ ASSERT3U(size, <=, SPA_MAXBLOCKSIZE);
+
+ int error = metaslab_alloc_dva(spa, mg->mg_class, size,
+ &dst, 0, NULL, txg, 0, zal);
+ if (error != 0)
+ return (error);
+
+ /*
+ * We can't have any padding of the allocated size, otherwise we will
+ * misunderstand what's allocated, and the size of the mapping.
+ * The caller ensures this will be true by passing in a size that is
+ * aligned to the worst (highest) ashift in the pool.
+ */
+ ASSERT3U(DVA_GET_ASIZE(&dst), ==, size);
+
+ mutex_enter(&vca->vca_lock);
+ vca->vca_outstanding_bytes += size;
+ mutex_exit(&vca->vca_lock);
+
+ entry = kmem_zalloc(sizeof (vdev_indirect_mapping_entry_t), KM_SLEEP);
+ DVA_MAPPING_SET_SRC_OFFSET(&entry->vime_mapping, start);
+ entry->vime_mapping.vimep_dst = dst;
+
+ private = kmem_alloc(sizeof (vdev_copy_seg_arg_t), KM_SLEEP);
+ private->vcsa_dest_dva = &entry->vime_mapping.vimep_dst;
+ private->vcsa_txg = txg;
+ private->vcsa_copy_arg = vca;
+
+ /*
+ * This lock is eventually released by the donefunc for the
+ * zio_write_phys that finishes copying the data.
+ */
+ spa_config_enter(spa, SCL_STATE, FTAG, RW_READER);
+
+ /*
+ * Do logical I/O, letting the redundancy vdevs (like mirror)
+ * handle their own I/O instead of duplicating that code here.
+ */
+ BP_ZERO(bp);
+
+ DVA_SET_VDEV(&dva[0], vd->vdev_id);
+ DVA_SET_OFFSET(&dva[0], start);
+ DVA_SET_GANG(&dva[0], 0);
+ DVA_SET_ASIZE(&dva[0], vdev_psize_to_asize(vd, size));
+
+ BP_SET_BIRTH(bp, TXG_INITIAL, TXG_INITIAL);
+
+ BP_SET_LSIZE(bp, size);
+ BP_SET_PSIZE(bp, size);
+ BP_SET_COMPRESS(bp, ZIO_COMPRESS_OFF);
+ BP_SET_CHECKSUM(bp, ZIO_CHECKSUM_OFF);
+ BP_SET_TYPE(bp, DMU_OT_NONE);
+ BP_SET_LEVEL(bp, 0);
+ BP_SET_DEDUP(bp, 0);
+ BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
+
+ zio_nowait(zio_read(spa->spa_txg_zio[txg & TXG_MASK], spa,
+ bp, abd_alloc_for_io(size, B_FALSE), size,
+ spa_vdev_copy_segment_read_done, private,
+ ZIO_PRIORITY_REMOVAL, 0, NULL));
+
+ list_insert_tail(&svr->svr_new_segments[txg & TXG_MASK], entry);
+ ASSERT3U(start + size, <=, vd->vdev_ms_count << vd->vdev_ms_shift);
+ vdev_dirty(vd, 0, NULL, txg);
+
+ return (0);
+}
+
+/*
+ * Complete the removal of a toplevel vdev. This is called as a
+ * synctask in the same txg that we will sync out the new config (to the
+ * MOS object) which indicates that this vdev is indirect.
+ */
+static void
+vdev_remove_complete_sync(void *arg, dmu_tx_t *tx)
+{
+ spa_vdev_removal_t *svr = arg;
+ vdev_t *vd = svr->svr_vdev;
+ spa_t *spa = vd->vdev_spa;
+
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+
+ for (int i = 0; i < TXG_SIZE; i++) {
+ ASSERT0(svr->svr_bytes_done[i]);
+ }
+
+ ASSERT3U(spa->spa_removing_phys.sr_copied, ==,
+ spa->spa_removing_phys.sr_to_copy);
+
+ vdev_destroy_spacemaps(vd, tx);
+
+ /* destroy leaf zaps, if any */
+ ASSERT3P(svr->svr_zaplist, !=, NULL);
+ for (nvpair_t *pair = nvlist_next_nvpair(svr->svr_zaplist, NULL);
+ pair != NULL;
+ pair = nvlist_next_nvpair(svr->svr_zaplist, pair)) {
+ vdev_destroy_unlink_zap(vd, fnvpair_value_uint64(pair), tx);
+ }
+ fnvlist_free(svr->svr_zaplist);
+
+ spa_finish_removal(dmu_tx_pool(tx)->dp_spa, DSS_FINISHED, tx);
+ /* vd->vdev_path is not available here */
+ spa_history_log_internal(spa, "vdev remove completed", tx,
+ "%s vdev %llu", spa_name(spa), vd->vdev_id);
+}
+
+static void
+vdev_indirect_state_transfer(vdev_t *ivd, vdev_t *vd)
+{
+ ivd->vdev_indirect_config = vd->vdev_indirect_config;
+
+ ASSERT3P(ivd->vdev_indirect_mapping, ==, NULL);
+ ASSERT(vd->vdev_indirect_mapping != NULL);
+ ivd->vdev_indirect_mapping = vd->vdev_indirect_mapping;
+ vd->vdev_indirect_mapping = NULL;
+
+ ASSERT3P(ivd->vdev_indirect_births, ==, NULL);
+ ASSERT(vd->vdev_indirect_births != NULL);
+ ivd->vdev_indirect_births = vd->vdev_indirect_births;
+ vd->vdev_indirect_births = NULL;
+
+ ASSERT0(range_tree_space(vd->vdev_obsolete_segments));
+ ASSERT0(range_tree_space(ivd->vdev_obsolete_segments));
+
+ if (vd->vdev_obsolete_sm != NULL) {
+ ASSERT3U(ivd->vdev_asize, ==, vd->vdev_asize);
+
+ /*
+ * We cannot use space_map_{open,close} because we hold all
+ * the config locks as writer.
+ */
+ ASSERT3P(ivd->vdev_obsolete_sm, ==, NULL);
+ ivd->vdev_obsolete_sm = vd->vdev_obsolete_sm;
+ vd->vdev_obsolete_sm = NULL;
+ }
+}
+
+static void
+vdev_remove_enlist_zaps(vdev_t *vd, nvlist_t *zlist)
+{
+ ASSERT3P(zlist, !=, NULL);
+ ASSERT3P(vd->vdev_ops, !=, &vdev_raidz_ops);
+
+ if (vd->vdev_leaf_zap != 0) {
+ char zkey[32];
+ (void) snprintf(zkey, sizeof (zkey), "%s-%llu",
+ VDEV_REMOVAL_ZAP_OBJS, (u_longlong_t)vd->vdev_leaf_zap);
+ fnvlist_add_uint64(zlist, zkey, vd->vdev_leaf_zap);
+ }
+
+ for (uint64_t id = 0; id < vd->vdev_children; id++) {
+ vdev_remove_enlist_zaps(vd->vdev_child[id], zlist);
+ }
+}
+
+static void
+vdev_remove_replace_with_indirect(vdev_t *vd, uint64_t txg)
+{
+ vdev_t *ivd;
+ dmu_tx_t *tx;
+ spa_t *spa = vd->vdev_spa;
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+
+ /*
+ * First, build a list of leaf zaps to be destroyed.
+ * This is passed to the sync context thread,
+ * which does the actual unlinking.
+ */
+ svr->svr_zaplist = fnvlist_alloc();
+ vdev_remove_enlist_zaps(vd, svr->svr_zaplist);
+
+ ivd = vdev_add_parent(vd, &vdev_indirect_ops);
+
+ vd->vdev_leaf_zap = 0;
+
+ vdev_remove_child(ivd, vd);
+ vdev_compact_children(ivd);
+
+ vdev_indirect_state_transfer(ivd, vd);
+
+ svr->svr_vdev = ivd;
+
+ ASSERT(!ivd->vdev_removing);
+ ASSERT(!list_link_active(&vd->vdev_state_dirty_node));
+
+ tx = dmu_tx_create_assigned(spa->spa_dsl_pool, txg);
+ dsl_sync_task_nowait(spa->spa_dsl_pool, vdev_remove_complete_sync, svr,
+ 0, ZFS_SPACE_CHECK_NONE, tx);
+ dmu_tx_commit(tx);
+
+ /*
+ * Indicate that this thread has exited.
+ * After this, we can not use svr.
+ */
+ mutex_enter(&svr->svr_lock);
+ svr->svr_thread = NULL;
+ cv_broadcast(&svr->svr_cv);
+ mutex_exit(&svr->svr_lock);
+}
+
+/*
+ * Complete the removal of a toplevel vdev. This is called in open
+ * context by the removal thread after we have copied all vdev's data.
+ */
+static void
+vdev_remove_complete(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+ uint64_t txg;
+
+ /*
+ * Wait for any deferred frees to be synced before we call
+ * vdev_metaslab_fini()
+ */
+ txg_wait_synced(spa->spa_dsl_pool, 0);
+
+ txg = spa_vdev_enter(spa);
+ zfs_dbgmsg("finishing device removal for vdev %llu in txg %llu",
+ vd->vdev_id, txg);
+
+ /*
+ * Discard allocation state.
+ */
+ if (vd->vdev_mg != NULL) {
+ vdev_metaslab_fini(vd);
+ metaslab_group_destroy(vd->vdev_mg);
+ vd->vdev_mg = NULL;
+ }
+ ASSERT0(vd->vdev_stat.vs_space);
+ ASSERT0(vd->vdev_stat.vs_dspace);
+
+ vdev_remove_replace_with_indirect(vd, txg);
+
+ /*
+ * We now release the locks, allowing spa_sync to run and finish the
+ * removal via vdev_remove_complete_sync in syncing context.
+ */
+ (void) spa_vdev_exit(spa, NULL, txg, 0);
+
+ /*
+ * Top ZAP should have been transferred to the indirect vdev in
+ * vdev_remove_replace_with_indirect.
+ */
+ ASSERT0(vd->vdev_top_zap);
+
+ /*
+ * Leaf ZAP should have been moved in vdev_remove_replace_with_indirect.
+ */
+ ASSERT0(vd->vdev_leaf_zap);
+
+ txg = spa_vdev_enter(spa);
+ (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
+ /*
+ * Request to update the config and the config cachefile.
+ */
+ vdev_config_dirty(spa->spa_root_vdev);
+ (void) spa_vdev_exit(spa, vd, txg, 0);
+}
+
+/*
+ * Evacuates a segment of size at most max_alloc from the vdev
+ * via repeated calls to spa_vdev_copy_segment. If an allocation
+ * fails, the pool is probably too fragmented to handle such a
+ * large size, so decrease max_alloc so that the caller will not try
+ * this size again this txg.
+ */
+static void
+spa_vdev_copy_impl(spa_vdev_removal_t *svr, vdev_copy_arg_t *vca,
+ uint64_t *max_alloc, dmu_tx_t *tx)
+{
+ uint64_t txg = dmu_tx_get_txg(tx);
+ spa_t *spa = dmu_tx_pool(tx)->dp_spa;
+
+ mutex_enter(&svr->svr_lock);
+
+ range_seg_t *rs = avl_first(&svr->svr_allocd_segs->rt_root);
+ if (rs == NULL) {
+ mutex_exit(&svr->svr_lock);
+ return;
+ }
+ uint64_t offset = rs->rs_start;
+ uint64_t length = MIN(rs->rs_end - rs->rs_start, *max_alloc);
+
+ range_tree_remove(svr->svr_allocd_segs, offset, length);
+
+ if (svr->svr_max_offset_to_sync[txg & TXG_MASK] == 0) {
+ dsl_sync_task_nowait(dmu_tx_pool(tx), vdev_mapping_sync,
+ svr, 0, ZFS_SPACE_CHECK_NONE, tx);
+ }
+
+ svr->svr_max_offset_to_sync[txg & TXG_MASK] = offset + length;
+
+ /*
+ * Note: this is the amount of *allocated* space
+ * that we are taking care of each txg.
+ */
+ svr->svr_bytes_done[txg & TXG_MASK] += length;
+
+ mutex_exit(&svr->svr_lock);
+
+ zio_alloc_list_t zal;
+ metaslab_trace_init(&zal);
+ uint64_t thismax = *max_alloc;
+ while (length > 0) {
+ uint64_t mylen = MIN(length, thismax);
+
+ int error = spa_vdev_copy_segment(svr->svr_vdev,
+ offset, mylen, txg, vca, &zal);
+
+ if (error == ENOSPC) {
+ /*
+ * Cut our segment in half, and don't try this
+ * segment size again this txg. Note that the
+ * allocation size must be aligned to the highest
+ * ashift in the pool, so that the allocation will
+ * not be padded out to a multiple of the ashift,
+ * which could cause us to think that this mapping
+ * is larger than we intended.
+ */
+ ASSERT3U(spa->spa_max_ashift, >=, SPA_MINBLOCKSHIFT);
+ ASSERT3U(spa->spa_max_ashift, ==, spa->spa_min_ashift);
+ thismax = P2ROUNDUP(mylen / 2,
+ 1 << spa->spa_max_ashift);
+ ASSERT3U(thismax, <, mylen);
+ /*
+ * The minimum-size allocation can not fail.
+ */
+ ASSERT3U(mylen, >, 1 << spa->spa_max_ashift);
+ *max_alloc = mylen - (1 << spa->spa_max_ashift);
+ } else {
+ ASSERT0(error);
+ length -= mylen;
+ offset += mylen;
+
+ /*
+ * We've performed an allocation, so reset the
+ * alloc trace list.
+ */
+ metaslab_trace_fini(&zal);
+ metaslab_trace_init(&zal);
+ }
+ }
+ metaslab_trace_fini(&zal);
+}
+
+/*
+ * The removal thread operates in open context. It iterates over all
+ * allocated space in the vdev, by loading each metaslab's spacemap.
+ * For each contiguous segment of allocated space (capping the segment
+ * size at SPA_MAXBLOCKSIZE), we:
+ * - Allocate space for it on another vdev.
+ * - Create a new mapping from the old location to the new location
+ * (as a record in svr_new_segments).
+ * - Initiate a physical read zio to get the data off the removing disk.
+ * - In the read zio's done callback, initiate a physical write zio to
+ * write it to the new vdev.
+ * Note that all of this will take effect when a particular TXG syncs.
+ * The sync thread ensures that all the phys reads and writes for the syncing
+ * TXG have completed (see spa_txg_zio) and writes the new mappings to disk
+ * (see vdev_mapping_sync()).
+ */
+static void
+spa_vdev_remove_thread(void *arg)
+{
+ vdev_t *vd = arg;
+ spa_t *spa = vd->vdev_spa;
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ vdev_copy_arg_t vca;
+ uint64_t max_alloc = zfs_remove_max_segment;
+ uint64_t last_txg = 0;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ uint64_t start_offset = vdev_indirect_mapping_max_offset(vim);
+
+ ASSERT3P(vd->vdev_ops, !=, &vdev_indirect_ops);
+ ASSERT(vdev_is_concrete(vd));
+ ASSERT(vd->vdev_removing);
+ ASSERT(vd->vdev_indirect_config.vic_mapping_object != 0);
+ ASSERT3P(svr->svr_vdev, ==, vd);
+ ASSERT(vim != NULL);
+
+ mutex_init(&vca.vca_lock, NULL, MUTEX_DEFAULT, NULL);
+ cv_init(&vca.vca_cv, NULL, CV_DEFAULT, NULL);
+ vca.vca_outstanding_bytes = 0;
+
+ mutex_enter(&svr->svr_lock);
+
+ /*
+ * Start from vim_max_offset so we pick up where we left off
+ * if we are restarting the removal after opening the pool.
+ */
+ uint64_t msi;
+ for (msi = start_offset >> vd->vdev_ms_shift;
+ msi < vd->vdev_ms_count && !svr->svr_thread_exit; msi++) {
+ metaslab_t *msp = vd->vdev_ms[msi];
+ ASSERT3U(msi, <=, vd->vdev_ms_count);
+
+ ASSERT0(range_tree_space(svr->svr_allocd_segs));
+
+ mutex_enter(&msp->ms_sync_lock);
+ mutex_enter(&msp->ms_lock);
+
+ /*
+ * Assert nothing in flight -- ms_*tree is empty.
+ */
+ for (int i = 0; i < TXG_SIZE; i++) {
+ ASSERT0(range_tree_space(msp->ms_alloctree[i]));
+ }
+
+ /*
+ * If the metaslab has ever been allocated from (ms_sm!=NULL),
+ * read the allocated segments from the space map object
+ * into svr_allocd_segs. Since we do this while holding
+ * svr_lock and ms_sync_lock, concurrent frees (which
+ * would have modified the space map) will wait for us
+ * to finish loading the spacemap, and then take the
+ * appropriate action (see free_from_removing_vdev()).
+ */
+ if (msp->ms_sm != NULL) {
+ space_map_t *sm = NULL;
+
+ /*
+ * We have to open a new space map here, because
+ * ms_sm's sm_length and sm_alloc may not reflect
+ * what's in the object contents, if we are in between
+ * metaslab_sync() and metaslab_sync_done().
+ */
+ VERIFY0(space_map_open(&sm,
+ spa->spa_dsl_pool->dp_meta_objset,
+ msp->ms_sm->sm_object, msp->ms_sm->sm_start,
+ msp->ms_sm->sm_size, msp->ms_sm->sm_shift));
+ space_map_update(sm);
+ VERIFY0(space_map_load(sm, svr->svr_allocd_segs,
+ SM_ALLOC));
+ space_map_close(sm);
+
+ range_tree_walk(msp->ms_freeingtree,
+ range_tree_remove, svr->svr_allocd_segs);
+
+ /*
+ * When we are resuming from a paused removal (i.e.
+ * when importing a pool with a removal in progress),
+ * discard any state that we have already processed.
+ */
+ range_tree_clear(svr->svr_allocd_segs, 0, start_offset);
+ }
+ mutex_exit(&msp->ms_lock);
+ mutex_exit(&msp->ms_sync_lock);
+
+ vca.vca_msp = msp;
+ zfs_dbgmsg("copying %llu segments for metaslab %llu",
+ avl_numnodes(&svr->svr_allocd_segs->rt_root),
+ msp->ms_id);
+
+ while (!svr->svr_thread_exit &&
+ range_tree_space(svr->svr_allocd_segs) != 0) {
+
+ mutex_exit(&svr->svr_lock);
+
+ mutex_enter(&vca.vca_lock);
+ while (vca.vca_outstanding_bytes >
+ zfs_remove_max_copy_bytes) {
+ cv_wait(&vca.vca_cv, &vca.vca_lock);
+ }
+ mutex_exit(&vca.vca_lock);
+
+ dmu_tx_t *tx =
+ dmu_tx_create_dd(spa_get_dsl(spa)->dp_mos_dir);
+ dmu_tx_hold_space(tx, SPA_MAXBLOCKSIZE);
+ VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
+ uint64_t txg = dmu_tx_get_txg(tx);
+
+ if (txg != last_txg)
+ max_alloc = zfs_remove_max_segment;
+ last_txg = txg;
+
+ spa_vdev_copy_impl(svr, &vca, &max_alloc, tx);
+
+ dmu_tx_commit(tx);
+ mutex_enter(&svr->svr_lock);
+ }
+ }
+
+ mutex_exit(&svr->svr_lock);
+ /*
+ * Wait for all copies to finish before cleaning up the vca.
+ */
+ txg_wait_synced(spa->spa_dsl_pool, 0);
+ ASSERT0(vca.vca_outstanding_bytes);
+
+ mutex_destroy(&vca.vca_lock);
+ cv_destroy(&vca.vca_cv);
+
+ if (svr->svr_thread_exit) {
+ mutex_enter(&svr->svr_lock);
+ range_tree_vacate(svr->svr_allocd_segs, NULL, NULL);
+ svr->svr_thread = NULL;
+ cv_broadcast(&svr->svr_cv);
+ mutex_exit(&svr->svr_lock);
+ } else {
+ ASSERT0(range_tree_space(svr->svr_allocd_segs));
+ vdev_remove_complete(vd);
+ }
+}
+
+void
+spa_vdev_remove_suspend(spa_t *spa)
+{
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+
+ if (svr == NULL)
+ return;
+
+ mutex_enter(&svr->svr_lock);
+ svr->svr_thread_exit = B_TRUE;
+ while (svr->svr_thread != NULL)
+ cv_wait(&svr->svr_cv, &svr->svr_lock);
+ svr->svr_thread_exit = B_FALSE;
+ mutex_exit(&svr->svr_lock);
+}
+
+/* ARGSUSED */
+static int
+spa_vdev_remove_cancel_check(void *arg, dmu_tx_t *tx)
+{
+ spa_t *spa = dmu_tx_pool(tx)->dp_spa;
+
+ if (spa->spa_vdev_removal == NULL)
+ return (ENOTACTIVE);
+ return (0);
+}
+
+/*
+ * Cancel a removal by freeing all entries from the partial mapping
+ * and marking the vdev as no longer being removing.
+ */
+/* ARGSUSED */
+static void
+spa_vdev_remove_cancel_sync(void *arg, dmu_tx_t *tx)
+{
+ spa_t *spa = dmu_tx_pool(tx)->dp_spa;
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ vdev_t *vd = svr->svr_vdev;
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+ objset_t *mos = spa->spa_meta_objset;
+
+ ASSERT3P(svr->svr_thread, ==, NULL);
+
+ spa_feature_decr(spa, SPA_FEATURE_DEVICE_REMOVAL, tx);
+ if (vdev_obsolete_counts_are_precise(vd)) {
+ spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_OBSOLETE_COUNTS_ARE_PRECISE, tx));
+ }
+
+ if (vdev_obsolete_sm_object(vd) != 0) {
+ ASSERT(vd->vdev_obsolete_sm != NULL);
+ ASSERT3U(vdev_obsolete_sm_object(vd), ==,
+ space_map_object(vd->vdev_obsolete_sm));
+
+ space_map_free(vd->vdev_obsolete_sm, tx);
+ VERIFY0(zap_remove(spa->spa_meta_objset, vd->vdev_top_zap,
+ VDEV_TOP_ZAP_INDIRECT_OBSOLETE_SM, tx));
+ space_map_close(vd->vdev_obsolete_sm);
+ vd->vdev_obsolete_sm = NULL;
+ spa_feature_decr(spa, SPA_FEATURE_OBSOLETE_COUNTS, tx);
+ }
+ for (int i = 0; i < TXG_SIZE; i++) {
+ ASSERT(list_is_empty(&svr->svr_new_segments[i]));
+ ASSERT3U(svr->svr_max_offset_to_sync[i], <=,
+ vdev_indirect_mapping_max_offset(vim));
+ }
+
+ for (uint64_t msi = 0; msi < vd->vdev_ms_count; msi++) {
+ metaslab_t *msp = vd->vdev_ms[msi];
+
+ if (msp->ms_start >= vdev_indirect_mapping_max_offset(vim))
+ break;
+
+ ASSERT0(range_tree_space(svr->svr_allocd_segs));
+
+ mutex_enter(&msp->ms_lock);
+
+ /*
+ * Assert nothing in flight -- ms_*tree is empty.
+ */
+ for (int i = 0; i < TXG_SIZE; i++)
+ ASSERT0(range_tree_space(msp->ms_alloctree[i]));
+ for (int i = 0; i < TXG_DEFER_SIZE; i++)
+ ASSERT0(range_tree_space(msp->ms_defertree[i]));
+ ASSERT0(range_tree_space(msp->ms_freedtree));
+
+ if (msp->ms_sm != NULL) {
+ /*
+ * Assert that the in-core spacemap has the same
+ * length as the on-disk one, so we can use the
+ * existing in-core spacemap to load it from disk.
+ */
+ ASSERT3U(msp->ms_sm->sm_alloc, ==,
+ msp->ms_sm->sm_phys->smp_alloc);
+ ASSERT3U(msp->ms_sm->sm_length, ==,
+ msp->ms_sm->sm_phys->smp_objsize);
+
+ mutex_enter(&svr->svr_lock);
+ VERIFY0(space_map_load(msp->ms_sm,
+ svr->svr_allocd_segs, SM_ALLOC));
+ range_tree_walk(msp->ms_freeingtree,
+ range_tree_remove, svr->svr_allocd_segs);
+
+ /*
+ * Clear everything past what has been synced,
+ * because we have not allocated mappings for it yet.
+ */
+ uint64_t syncd = vdev_indirect_mapping_max_offset(vim);
+ range_tree_clear(svr->svr_allocd_segs, syncd,
+ msp->ms_sm->sm_start + msp->ms_sm->sm_size - syncd);
+
+ mutex_exit(&svr->svr_lock);
+ }
+ mutex_exit(&msp->ms_lock);
+
+ mutex_enter(&svr->svr_lock);
+ range_tree_vacate(svr->svr_allocd_segs,
+ free_mapped_segment_cb, vd);
+ mutex_exit(&svr->svr_lock);
+ }
+
+ /*
+ * Note: this must happen after we invoke free_mapped_segment_cb,
+ * because it adds to the obsolete_segments.
+ */
+ range_tree_vacate(vd->vdev_obsolete_segments, NULL, NULL);
+
+ ASSERT3U(vic->vic_mapping_object, ==,
+ vdev_indirect_mapping_object(vd->vdev_indirect_mapping));
+ vdev_indirect_mapping_close(vd->vdev_indirect_mapping);
+ vd->vdev_indirect_mapping = NULL;
+ vdev_indirect_mapping_free(mos, vic->vic_mapping_object, tx);
+ vic->vic_mapping_object = 0;
+
+ ASSERT3U(vic->vic_births_object, ==,
+ vdev_indirect_births_object(vd->vdev_indirect_births));
+ vdev_indirect_births_close(vd->vdev_indirect_births);
+ vd->vdev_indirect_births = NULL;
+ vdev_indirect_births_free(mos, vic->vic_births_object, tx);
+ vic->vic_births_object = 0;
+
+ /*
+ * We may have processed some frees from the removing vdev in this
+ * txg, thus increasing svr_bytes_done; discard that here to
+ * satisfy the assertions in spa_vdev_removal_destroy().
+ * Note that future txg's can not have any bytes_done, because
+ * future TXG's are only modified from open context, and we have
+ * already shut down the copying thread.
+ */
+ svr->svr_bytes_done[dmu_tx_get_txg(tx) & TXG_MASK] = 0;
+ spa_finish_removal(spa, DSS_CANCELED, tx);
+
+ vd->vdev_removing = B_FALSE;
+ vdev_config_dirty(vd);
+
+ zfs_dbgmsg("canceled device removal for vdev %llu in %llu",
+ vd->vdev_id, dmu_tx_get_txg(tx));
+ spa_history_log_internal(spa, "vdev remove canceled", tx,
+ "%s vdev %llu %s", spa_name(spa),
+ vd->vdev_id, (vd->vdev_path != NULL) ? vd->vdev_path : "-");
+}
+
+int
+spa_vdev_remove_cancel(spa_t *spa)
+{
+ spa_vdev_remove_suspend(spa);
+
+ if (spa->spa_vdev_removal == NULL)
+ return (ENOTACTIVE);
+
+ uint64_t vdid = spa->spa_vdev_removal->svr_vdev->vdev_id;
+
+ int error = dsl_sync_task(spa->spa_name, spa_vdev_remove_cancel_check,
+ spa_vdev_remove_cancel_sync, NULL, 0, ZFS_SPACE_CHECK_NONE);
+
+ if (error == 0) {
+ spa_config_enter(spa, SCL_ALLOC | SCL_VDEV, FTAG, RW_WRITER);
+ vdev_t *vd = vdev_lookup_top(spa, vdid);
+ metaslab_group_activate(vd->vdev_mg);
+ spa_config_exit(spa, SCL_ALLOC | SCL_VDEV, FTAG);
+ }
+
+ return (error);
+}
+
+/*
+ * Called every sync pass of every txg if there's a svr.
+ */
+void
+svr_sync(spa_t *spa, dmu_tx_t *tx)
+{
+ spa_vdev_removal_t *svr = spa->spa_vdev_removal;
+ int txgoff = dmu_tx_get_txg(tx) & TXG_MASK;
+
+ /*
+ * This check is necessary so that we do not dirty the
+ * DIRECTORY_OBJECT via spa_sync_removing_state() when there
+ * is nothing to do. Dirtying it every time would prevent us
+ * from syncing-to-convergence.
+ */
+ if (svr->svr_bytes_done[txgoff] == 0)
+ return;
+
+ /*
+ * Update progress accounting.
+ */
+ spa->spa_removing_phys.sr_copied += svr->svr_bytes_done[txgoff];
+ svr->svr_bytes_done[txgoff] = 0;
+
+ spa_sync_removing_state(spa, tx);
+}
+
+static void
+vdev_remove_make_hole_and_free(vdev_t *vd)
+{
+ uint64_t id = vd->vdev_id;
+ spa_t *spa = vd->vdev_spa;
+ vdev_t *rvd = spa->spa_root_vdev;
+ boolean_t last_vdev = (id == (rvd->vdev_children - 1));
+
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
+
+ vdev_free(vd);
+
+ if (last_vdev) {
+ vdev_compact_children(rvd);
+ } else {
+ vd = vdev_alloc_common(spa, id, 0, &vdev_hole_ops);
+ vdev_add_child(rvd, vd);
+ }
+ vdev_config_dirty(rvd);
+
+ /*
+ * Reassess the health of our root vdev.
+ */
+ vdev_reopen(rvd);
+}
+
+/*
+ * Remove a log device. The config lock is held for the specified TXG.
+ */
+static int
+spa_vdev_remove_log(vdev_t *vd, uint64_t *txg)
+{
+ metaslab_group_t *mg = vd->vdev_mg;
+ spa_t *spa = vd->vdev_spa;
+ int error = 0;
+
+ ASSERT(vd->vdev_islog);
+ ASSERT(vd == vd->vdev_top);
+
+ /*
+ * Stop allocating from this vdev.
+ */
+ metaslab_group_passivate(mg);
+
+ /*
+ * Wait for the youngest allocations and frees to sync,
+ * and then wait for the deferral of those frees to finish.
+ */
+ spa_vdev_config_exit(spa, NULL,
+ *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
+
+ /*
+ * Evacuate the device. We don't hold the config lock as writer
+ * since we need to do I/O but we do keep the
+ * spa_namespace_lock held. Once this completes the device
+ * should no longer have any blocks allocated on it.
+ */
+ if (vd->vdev_islog) {
+ if (vd->vdev_stat.vs_alloc != 0)
+ error = spa_reset_logs(spa);
+ }
+
+ *txg = spa_vdev_config_enter(spa);
+
+ if (error != 0) {
+ metaslab_group_activate(mg);
+ return (error);
+ }
+ ASSERT0(vd->vdev_stat.vs_alloc);
+
+ /*
+ * The evacuation succeeded. Remove any remaining MOS metadata
+ * associated with this vdev, and wait for these changes to sync.
+ */
+ vd->vdev_removing = B_TRUE;
+
+ vdev_dirty_leaves(vd, VDD_DTL, *txg);
+ vdev_config_dirty(vd);
+
+ spa_history_log_internal(spa, "vdev remove", NULL,
+ "%s vdev %llu (log) %s", spa_name(spa), vd->vdev_id,
+ (vd->vdev_path != NULL) ? vd->vdev_path : "-");
+
+ spa_vdev_config_exit(spa, NULL, *txg, 0, FTAG);
+
+ *txg = spa_vdev_config_enter(spa);
+
+ sysevent_t *ev = spa_event_create(spa, vd, NULL,
+ ESC_ZFS_VDEV_REMOVE_DEV);
+ ASSERT(MUTEX_HELD(&spa_namespace_lock));
+ ASSERT(spa_config_held(spa, SCL_ALL, RW_WRITER) == SCL_ALL);
+
+ /* The top ZAP should have been destroyed by vdev_remove_empty. */
+ ASSERT0(vd->vdev_top_zap);
+ /* The leaf ZAP should have been destroyed by vdev_dtl_sync. */
+ ASSERT0(vd->vdev_leaf_zap);
+
+ (void) vdev_label_init(vd, 0, VDEV_LABEL_REMOVE);
+
+ if (list_link_active(&vd->vdev_state_dirty_node))
+ vdev_state_clean(vd);
+ if (list_link_active(&vd->vdev_config_dirty_node))
+ vdev_config_clean(vd);
+
+ /*
+ * Clean up the vdev namespace.
+ */
+ vdev_remove_make_hole_and_free(vd);
+
+ if (ev != NULL)
+ spa_event_post(ev);
+
+ return (0);
+}
+
+static int
+spa_vdev_remove_top_check(vdev_t *vd)
+{
+ spa_t *spa = vd->vdev_spa;
+
+ if (vd != vd->vdev_top)
+ return (SET_ERROR(ENOTSUP));
+
+ if (!spa_feature_is_enabled(spa, SPA_FEATURE_DEVICE_REMOVAL))
+ return (SET_ERROR(ENOTSUP));
+
+ /*
+ * There has to be enough free space to remove the
+ * device and leave double the "slop" space (i.e. we
+ * must leave at least 3% of the pool free, in addition to
+ * the normal slop space).
+ */
+ if (dsl_dir_space_available(spa->spa_dsl_pool->dp_root_dir,
+ NULL, 0, B_TRUE) <
+ vd->vdev_stat.vs_dspace + spa_get_slop_space(spa)) {
+ return (SET_ERROR(ENOSPC));
+ }
+
+ /*
+ * There can not be a removal in progress.
+ */
+ if (spa->spa_removing_phys.sr_state == DSS_SCANNING)
+ return (SET_ERROR(EBUSY));
+
+ /*
+ * The device must have all its data.
+ */
+ if (!vdev_dtl_empty(vd, DTL_MISSING) ||
+ !vdev_dtl_empty(vd, DTL_OUTAGE))
+ return (SET_ERROR(EBUSY));
+
+ /*
+ * The device must be healthy.
+ */
+ if (!vdev_readable(vd))
+ return (SET_ERROR(EIO));
+
+ /*
+ * All vdevs in normal class must have the same ashift.
+ */
+ if (spa->spa_max_ashift != spa->spa_min_ashift) {
+ return (SET_ERROR(EINVAL));
+ }
+
+ /*
+ * All vdevs in normal class must have the same ashift
+ * and not be raidz.
+ */
+ vdev_t *rvd = spa->spa_root_vdev;
+ int num_indirect = 0;
+ for (uint64_t id = 0; id < rvd->vdev_children; id++) {
+ vdev_t *cvd = rvd->vdev_child[id];
+ if (cvd->vdev_ashift != 0 && !cvd->vdev_islog)
+ ASSERT3U(cvd->vdev_ashift, ==, spa->spa_max_ashift);
+ if (cvd->vdev_ops == &vdev_indirect_ops)
+ num_indirect++;
+ if (!vdev_is_concrete(cvd))
+ continue;
+ if (cvd->vdev_ops == &vdev_raidz_ops)
+ return (SET_ERROR(EINVAL));
+ /*
+ * Need the mirror to be mirror of leaf vdevs only
+ */
+ if (cvd->vdev_ops == &vdev_mirror_ops) {
+ for (uint64_t cid = 0;
+ cid < cvd->vdev_children; cid++) {
+ if (!cvd->vdev_child[cid]->vdev_ops->
+ vdev_op_leaf)
+ return (SET_ERROR(EINVAL));
+ }
+ }
+ }
+
+ return (0);
+}
+
+/*
+ * Initiate removal of a top-level vdev, reducing the total space in the pool.
+ * The config lock is held for the specified TXG. Once initiated,
+ * evacuation of all allocated space (copying it to other vdevs) happens
+ * in the background (see spa_vdev_remove_thread()), and can be canceled
+ * (see spa_vdev_remove_cancel()). If successful, the vdev will
+ * be transformed to an indirect vdev (see spa_vdev_remove_complete()).
+ */
+static int
+spa_vdev_remove_top(vdev_t *vd, uint64_t *txg)
+{
+ spa_t *spa = vd->vdev_spa;
+ int error;
+
+ /*
+ * Check for errors up-front, so that we don't waste time
+ * passivating the metaslab group and clearing the ZIL if there
+ * are errors.
+ */
+ error = spa_vdev_remove_top_check(vd);
+ if (error != 0)
+ return (error);
+
+ /*
+ * Stop allocating from this vdev. Note that we must check
+ * that this is not the only device in the pool before
+ * passivating, otherwise we will not be able to make
+ * progress because we can't allocate from any vdevs.
+ * The above check for sufficient free space serves this
+ * purpose.
+ */
+ metaslab_group_t *mg = vd->vdev_mg;
+ metaslab_group_passivate(mg);
+
+ /*
+ * Wait for the youngest allocations and frees to sync,
+ * and then wait for the deferral of those frees to finish.
+ */
+ spa_vdev_config_exit(spa, NULL,
+ *txg + TXG_CONCURRENT_STATES + TXG_DEFER_SIZE, 0, FTAG);
+
+ /*
+ * We must ensure that no "stubby" log blocks are allocated
+ * on the device to be removed. These blocks could be
+ * written at any time, including while we are in the middle
+ * of copying them.
+ */
+ error = spa_reset_logs(spa);
+
+ *txg = spa_vdev_config_enter(spa);
+
+ /*
+ * Things might have changed while the config lock was dropped
+ * (e.g. space usage). Check for errors again.
+ */
+ if (error == 0)
+ error = spa_vdev_remove_top_check(vd);
+
+ if (error != 0) {
+ metaslab_group_activate(mg);
+ return (error);
+ }
+
+ vd->vdev_removing = B_TRUE;
+
+ vdev_dirty_leaves(vd, VDD_DTL, *txg);
+ vdev_config_dirty(vd);
+ dmu_tx_t *tx = dmu_tx_create_assigned(spa->spa_dsl_pool, *txg);
+ dsl_sync_task_nowait(spa->spa_dsl_pool,
+ vdev_remove_initiate_sync,
+ vd, 0, ZFS_SPACE_CHECK_NONE, tx);
+ dmu_tx_commit(tx);
+
+ return (0);
+}
+
+/*
+ * Remove a device from the pool.
+ *
+ * Removing a device from the vdev namespace requires several steps
+ * and can take a significant amount of time. As a result we use
+ * the spa_vdev_config_[enter/exit] functions which allow us to
+ * grab and release the spa_config_lock while still holding the namespace
+ * lock. During each step the configuration is synced out.
+ */
+int
+spa_vdev_remove(spa_t *spa, uint64_t guid, boolean_t unspare)
+{
+ vdev_t *vd;
+ nvlist_t **spares, **l2cache, *nv;
+ uint64_t txg = 0;
+ uint_t nspares, nl2cache;
+ int error = 0;
+ boolean_t locked = MUTEX_HELD(&spa_namespace_lock);
+ sysevent_t *ev = NULL;
+
+ ASSERT(spa_writeable(spa));
+
+ if (!locked)
+ txg = spa_vdev_enter(spa);
+
+ vd = spa_lookup_by_guid(spa, guid, B_FALSE);
+
+ if (spa->spa_spares.sav_vdevs != NULL &&
+ nvlist_lookup_nvlist_array(spa->spa_spares.sav_config,
+ ZPOOL_CONFIG_SPARES, &spares, &nspares) == 0 &&
+ (nv = spa_nvlist_lookup_by_guid(spares, nspares, guid)) != NULL) {
+ /*
+ * Only remove the hot spare if it's not currently in use
+ * in this pool.
+ */
+ if (vd == NULL || unspare) {
+ if (vd == NULL)
+ vd = spa_lookup_by_guid(spa, guid, B_TRUE);
+ ev = spa_event_create(spa, vd, NULL,
+ ESC_ZFS_VDEV_REMOVE_AUX);
+
+ char *nvstr = fnvlist_lookup_string(nv,
+ ZPOOL_CONFIG_PATH);
+ spa_history_log_internal(spa, "vdev remove", NULL,
+ "%s vdev (%s) %s", spa_name(spa),
+ VDEV_TYPE_SPARE, nvstr);
+ spa_vdev_remove_aux(spa->spa_spares.sav_config,
+ ZPOOL_CONFIG_SPARES, spares, nspares, nv);
+ spa_load_spares(spa);
+ spa->spa_spares.sav_sync = B_TRUE;
+ } else {
+ error = SET_ERROR(EBUSY);
+ }
+ } else if (spa->spa_l2cache.sav_vdevs != NULL &&
+ nvlist_lookup_nvlist_array(spa->spa_l2cache.sav_config,
+ ZPOOL_CONFIG_L2CACHE, &l2cache, &nl2cache) == 0 &&
+ (nv = spa_nvlist_lookup_by_guid(l2cache, nl2cache, guid)) != NULL) {
+ char *nvstr = fnvlist_lookup_string(nv, ZPOOL_CONFIG_PATH);
+ spa_history_log_internal(spa, "vdev remove", NULL,
+ "%s vdev (%s) %s", spa_name(spa), VDEV_TYPE_L2CACHE, nvstr);
+ /*
+ * Cache devices can always be removed.
+ */
+ vd = spa_lookup_by_guid(spa, guid, B_TRUE);
+ ev = spa_event_create(spa, vd, NULL, ESC_ZFS_VDEV_REMOVE_AUX);
+ spa_vdev_remove_aux(spa->spa_l2cache.sav_config,
+ ZPOOL_CONFIG_L2CACHE, l2cache, nl2cache, nv);
+ spa_load_l2cache(spa);
+ spa->spa_l2cache.sav_sync = B_TRUE;
+ } else if (vd != NULL && vd->vdev_islog) {
+ ASSERT(!locked);
+ error = spa_vdev_remove_log(vd, &txg);
+ } else if (vd != NULL) {
+ ASSERT(!locked);
+ error = spa_vdev_remove_top(vd, &txg);
+ } else {
+ /*
+ * There is no vdev of any kind with the specified guid.
+ */
+ error = SET_ERROR(ENOENT);
+ }
+
+ if (!locked)
+ error = spa_vdev_exit(spa, NULL, txg, error);
+
+ if (ev != NULL)
+ spa_event_post(ev);
+
+ return (error);
+}
+
+int
+spa_removal_get_stats(spa_t *spa, pool_removal_stat_t *prs)
+{
+ prs->prs_state = spa->spa_removing_phys.sr_state;
+
+ if (prs->prs_state == DSS_NONE)
+ return (SET_ERROR(ENOENT));
+
+ prs->prs_removing_vdev = spa->spa_removing_phys.sr_removing_vdev;
+ prs->prs_start_time = spa->spa_removing_phys.sr_start_time;
+ prs->prs_end_time = spa->spa_removing_phys.sr_end_time;
+ prs->prs_to_copy = spa->spa_removing_phys.sr_to_copy;
+ prs->prs_copied = spa->spa_removing_phys.sr_copied;
+
+ if (spa->spa_vdev_removal != NULL) {
+ for (int i = 0; i < TXG_SIZE; i++) {
+ prs->prs_copied +=
+ spa->spa_vdev_removal->svr_bytes_done[i];
+ }
+ }
+
+ prs->prs_mapping_memory = 0;
+ uint64_t indirect_vdev_id =
+ spa->spa_removing_phys.sr_prev_indirect_vdev;
+ while (indirect_vdev_id != -1) {
+ vdev_t *vd = spa->spa_root_vdev->vdev_child[indirect_vdev_id];
+ vdev_indirect_config_t *vic = &vd->vdev_indirect_config;
+ vdev_indirect_mapping_t *vim = vd->vdev_indirect_mapping;
+
+ ASSERT3P(vd->vdev_ops, ==, &vdev_indirect_ops);
+ prs->prs_mapping_memory += vdev_indirect_mapping_size(vim);
+ indirect_vdev_id = vic->vic_prev_indirect_vdev;
+ }
+
+ return (0);
+}
+
+#if defined(_KERNEL) && defined(HAVE_SPL)
+module_param(zfs_remove_max_segment, int, 0644);
+MODULE_PARM_DESC(zfs_remove_max_segment,
+ "Largest contiguous segment to allocate when removing device");
+
+EXPORT_SYMBOL(free_from_removing_vdev);
+EXPORT_SYMBOL(spa_removal_get_stats);
+EXPORT_SYMBOL(spa_remove_init);
+EXPORT_SYMBOL(spa_restart_removal);
+EXPORT_SYMBOL(spa_vdev_removal_destroy);
+EXPORT_SYMBOL(spa_vdev_remove);
+EXPORT_SYMBOL(spa_vdev_remove_cancel);
+EXPORT_SYMBOL(spa_vdev_remove_suspend);
+EXPORT_SYMBOL(svr_sync);
+#endif
diff --git a/module/zfs/vdev_root.c b/module/zfs/vdev_root.c
index 5db157d74520..8ac9ce1878cd 100644
--- a/module/zfs/vdev_root.c
+++ b/module/zfs/vdev_root.c
@@ -1,123 +1,124 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright 2010 Sun Microsystems, Inc. All rights reserved.
* Use is subject to license terms.
*/
/*
- * Copyright (c) 2013 by Delphix. All rights reserved.
+ * Copyright (c) 2012, 2014 by Delphix. All rights reserved.
*/
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/vdev_impl.h>
#include <sys/zio.h>
#include <sys/fs/zfs.h>
/*
* Virtual device vector for the pool's root vdev.
*/
/*
* We should be able to tolerate one failure with absolutely no damage
* to our metadata. Two failures will take out space maps, a bunch of
* indirect block trees, meta dnodes, dnodes, etc. Probably not a happy
* place to live. When we get smarter, we can liberalize this policy.
* e.g. If we haven't lost two consecutive top-level vdevs, then we are
* probably fine. Adding bean counters during alloc/free can make this
* future guesswork more accurate.
*/
static int
too_many_errors(vdev_t *vd, int numerrors)
{
ASSERT3U(numerrors, <=, vd->vdev_children);
return (numerrors > 0);
}
static int
vdev_root_open(vdev_t *vd, uint64_t *asize, uint64_t *max_asize,
uint64_t *ashift)
{
int lasterror = 0;
int numerrors = 0;
if (vd->vdev_children == 0) {
vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
return (SET_ERROR(EINVAL));
}
vdev_open_children(vd);
for (int c = 0; c < vd->vdev_children; c++) {
vdev_t *cvd = vd->vdev_child[c];
if (cvd->vdev_open_error && !cvd->vdev_islog) {
lasterror = cvd->vdev_open_error;
numerrors++;
}
}
if (too_many_errors(vd, numerrors)) {
vd->vdev_stat.vs_aux = VDEV_AUX_NO_REPLICAS;
return (lasterror);
}
*asize = 0;
*max_asize = 0;
*ashift = 0;
return (0);
}
static void
vdev_root_close(vdev_t *vd)
{
for (int c = 0; c < vd->vdev_children; c++)
vdev_close(vd->vdev_child[c]);
}
static void
vdev_root_state_change(vdev_t *vd, int faulted, int degraded)
{
if (too_many_errors(vd, faulted)) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_CANT_OPEN,
VDEV_AUX_NO_REPLICAS);
} else if (degraded) {
vdev_set_state(vd, B_FALSE, VDEV_STATE_DEGRADED, VDEV_AUX_NONE);
} else {
vdev_set_state(vd, B_FALSE, VDEV_STATE_HEALTHY, VDEV_AUX_NONE);
}
}
vdev_ops_t vdev_root_ops = {
vdev_root_open,
vdev_root_close,
vdev_default_asize,
NULL, /* io_start - not applicable to the root */
NULL, /* io_done - not applicable to the root */
vdev_root_state_change,
NULL,
NULL,
NULL,
+ NULL,
VDEV_TYPE_ROOT, /* name of this vdev type */
B_FALSE /* not a leaf vdev */
};
diff --git a/module/zfs/zfs_ioctl.c b/module/zfs/zfs_ioctl.c
index 1e987dc885c4..b1ac149b38af 100644
--- a/module/zfs/zfs_ioctl.c
+++ b/module/zfs/zfs_ioctl.c
@@ -1,7092 +1,7120 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Portions Copyright 2011 Martin Matuska
* Copyright 2015, OmniTI Computer Consulting, Inc. All rights reserved.
* Portions Copyright 2012 Pawel Jakub Dawidek <pawel@dawidek.net>
* Copyright (c) 2014, 2016 Joyent, Inc. All rights reserved.
* Copyright 2016 Nexenta Systems, Inc. All rights reserved.
* Copyright (c) 2014, Joyent, Inc. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
* Copyright (c) 2013 Steven Hartland. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
* Copyright 2016 Toomas Soome <tsoome@me.com>
* Copyright (c) 2016 Actifio, Inc. All rights reserved.
* Copyright (c) 2017, loli10K <ezomori.nozomu@gmail.com>. All rights reserved.
* Copyright (c) 2017 Datto Inc. All rights reserved.
* Copyright 2017 RackTop Systems.
* Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
*/
/*
* ZFS ioctls.
*
* This file handles the ioctls to /dev/zfs, used for configuring ZFS storage
* pools and filesystems, e.g. with /sbin/zfs and /sbin/zpool.
*
* There are two ways that we handle ioctls: the legacy way where almost
* all of the logic is in the ioctl callback, and the new way where most
* of the marshalling is handled in the common entry point, zfsdev_ioctl().
*
* Non-legacy ioctls should be registered by calling
* zfs_ioctl_register() from zfs_ioctl_init(). The ioctl is invoked
* from userland by lzc_ioctl().
*
* The registration arguments are as follows:
*
* const char *name
* The name of the ioctl. This is used for history logging. If the
* ioctl returns successfully (the callback returns 0), and allow_log
* is true, then a history log entry will be recorded with the input &
* output nvlists. The log entry can be printed with "zpool history -i".
*
* zfs_ioc_t ioc
* The ioctl request number, which userland will pass to ioctl(2).
* The ioctl numbers can change from release to release, because
* the caller (libzfs) must be matched to the kernel.
*
* zfs_secpolicy_func_t *secpolicy
* This function will be called before the zfs_ioc_func_t, to
* determine if this operation is permitted. It should return EPERM
* on failure, and 0 on success. Checks include determining if the
* dataset is visible in this zone, and if the user has either all
* zfs privileges in the zone (SYS_MOUNT), or has been granted permission
* to do this operation on this dataset with "zfs allow".
*
* zfs_ioc_namecheck_t namecheck
* This specifies what to expect in the zfs_cmd_t:zc_name -- a pool
* name, a dataset name, or nothing. If the name is not well-formed,
* the ioctl will fail and the callback will not be called.
* Therefore, the callback can assume that the name is well-formed
* (e.g. is null-terminated, doesn't have more than one '@' character,
* doesn't have invalid characters).
*
* zfs_ioc_poolcheck_t pool_check
* This specifies requirements on the pool state. If the pool does
* not meet them (is suspended or is readonly), the ioctl will fail
* and the callback will not be called. If any checks are specified
* (i.e. it is not POOL_CHECK_NONE), namecheck must not be NO_NAME.
* Multiple checks can be or-ed together (e.g. POOL_CHECK_SUSPENDED |
* POOL_CHECK_READONLY).
*
* boolean_t smush_outnvlist
* If smush_outnvlist is true, then the output is presumed to be a
* list of errors, and it will be "smushed" down to fit into the
* caller's buffer, by removing some entries and replacing them with a
* single "N_MORE_ERRORS" entry indicating how many were removed. See
* nvlist_smush() for details. If smush_outnvlist is false, and the
* outnvlist does not fit into the userland-provided buffer, then the
* ioctl will fail with ENOMEM.
*
* zfs_ioc_func_t *func
* The callback function that will perform the operation.
*
* The callback should return 0 on success, or an error number on
* failure. If the function fails, the userland ioctl will return -1,
* and errno will be set to the callback's return value. The callback
* will be called with the following arguments:
*
* const char *name
* The name of the pool or dataset to operate on, from
* zfs_cmd_t:zc_name. The 'namecheck' argument specifies the
* expected type (pool, dataset, or none).
*
* nvlist_t *innvl
* The input nvlist, deserialized from zfs_cmd_t:zc_nvlist_src. Or
* NULL if no input nvlist was provided. Changes to this nvlist are
* ignored. If the input nvlist could not be deserialized, the
* ioctl will fail and the callback will not be called.
*
* nvlist_t *outnvl
* The output nvlist, initially empty. The callback can fill it in,
* and it will be returned to userland by serializing it into
* zfs_cmd_t:zc_nvlist_dst. If it is non-empty, and serialization
* fails (e.g. because the caller didn't supply a large enough
* buffer), then the overall ioctl will fail. See the
* 'smush_nvlist' argument above for additional behaviors.
*
* There are two typical uses of the output nvlist:
* - To return state, e.g. property values. In this case,
* smush_outnvlist should be false. If the buffer was not large
* enough, the caller will reallocate a larger buffer and try
* the ioctl again.
*
* - To return multiple errors from an ioctl which makes on-disk
* changes. In this case, smush_outnvlist should be true.
* Ioctls which make on-disk modifications should generally not
* use the outnvl if they succeed, because the caller can not
* distinguish between the operation failing, and
* deserialization failing.
*/
#include <sys/types.h>
#include <sys/param.h>
#include <sys/errno.h>
#include <sys/uio.h>
#include <sys/buf.h>
#include <sys/modctl.h>
#include <sys/open.h>
#include <sys/file.h>
#include <sys/kmem.h>
#include <sys/conf.h>
#include <sys/cmn_err.h>
#include <sys/stat.h>
#include <sys/zfs_ioctl.h>
#include <sys/zfs_vfsops.h>
#include <sys/zfs_znode.h>
#include <sys/zap.h>
#include <sys/spa.h>
#include <sys/spa_impl.h>
#include <sys/vdev.h>
#include <sys/vdev_impl.h>
#include <sys/priv_impl.h>
#include <sys/dmu.h>
#include <sys/dsl_dir.h>
#include <sys/dsl_dataset.h>
#include <sys/dsl_prop.h>
#include <sys/dsl_deleg.h>
#include <sys/dmu_objset.h>
#include <sys/dmu_impl.h>
#include <sys/dmu_tx.h>
#include <sys/ddi.h>
#include <sys/sunddi.h>
#include <sys/sunldi.h>
#include <sys/policy.h>
#include <sys/zone.h>
#include <sys/nvpair.h>
#include <sys/pathname.h>
#include <sys/mount.h>
#include <sys/sdt.h>
#include <sys/fs/zfs.h>
#include <sys/zfs_ctldir.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_onexit.h>
#include <sys/zvol.h>
#include <sys/dsl_scan.h>
#include <sharefs/share.h>
#include <sys/fm/util.h>
#include <sys/dsl_crypt.h>
#include <sys/dmu_send.h>
#include <sys/dsl_destroy.h>
#include <sys/dsl_bookmark.h>
#include <sys/dsl_userhold.h>
#include <sys/zfeature.h>
#include <sys/zcp.h>
#include <sys/zio_checksum.h>
+#include <sys/vdev_removal.h>
#include <linux/miscdevice.h>
#include <linux/slab.h>
#include "zfs_namecheck.h"
#include "zfs_prop.h"
#include "zfs_deleg.h"
#include "zfs_comutil.h"
#include <sys/lua/lua.h>
#include <sys/lua/lauxlib.h>
/*
* Limit maximum nvlist size. We don't want users passing in insane values
* for zc->zc_nvlist_src_size, since we will need to allocate that much memory.
*/
#define MAX_NVLIST_SRC_SIZE KMALLOC_MAX_SIZE
kmutex_t zfsdev_state_lock;
zfsdev_state_t *zfsdev_state_list;
extern void zfs_init(void);
extern void zfs_fini(void);
uint_t zfs_fsyncer_key;
extern uint_t rrw_tsd_key;
static uint_t zfs_allow_log_key;
typedef int zfs_ioc_legacy_func_t(zfs_cmd_t *);
typedef int zfs_ioc_func_t(const char *, nvlist_t *, nvlist_t *);
typedef int zfs_secpolicy_func_t(zfs_cmd_t *, nvlist_t *, cred_t *);
typedef enum {
NO_NAME,
POOL_NAME,
DATASET_NAME
} zfs_ioc_namecheck_t;
typedef enum {
POOL_CHECK_NONE = 1 << 0,
POOL_CHECK_SUSPENDED = 1 << 1,
POOL_CHECK_READONLY = 1 << 2,
} zfs_ioc_poolcheck_t;
typedef struct zfs_ioc_vec {
zfs_ioc_legacy_func_t *zvec_legacy_func;
zfs_ioc_func_t *zvec_func;
zfs_secpolicy_func_t *zvec_secpolicy;
zfs_ioc_namecheck_t zvec_namecheck;
boolean_t zvec_allow_log;
zfs_ioc_poolcheck_t zvec_pool_check;
boolean_t zvec_smush_outnvlist;
const char *zvec_name;
} zfs_ioc_vec_t;
/* This array is indexed by zfs_userquota_prop_t */
static const char *userquota_perms[] = {
ZFS_DELEG_PERM_USERUSED,
ZFS_DELEG_PERM_USERQUOTA,
ZFS_DELEG_PERM_GROUPUSED,
ZFS_DELEG_PERM_GROUPQUOTA,
ZFS_DELEG_PERM_USEROBJUSED,
ZFS_DELEG_PERM_USEROBJQUOTA,
ZFS_DELEG_PERM_GROUPOBJUSED,
ZFS_DELEG_PERM_GROUPOBJQUOTA,
ZFS_DELEG_PERM_PROJECTUSED,
ZFS_DELEG_PERM_PROJECTQUOTA,
ZFS_DELEG_PERM_PROJECTOBJUSED,
ZFS_DELEG_PERM_PROJECTOBJQUOTA,
};
static int zfs_ioc_userspace_upgrade(zfs_cmd_t *zc);
static int zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc);
static int zfs_check_settable(const char *name, nvpair_t *property,
cred_t *cr);
static int zfs_check_clearable(char *dataset, nvlist_t *props,
nvlist_t **errors);
static int zfs_fill_zplprops_root(uint64_t, nvlist_t *, nvlist_t *,
boolean_t *);
int zfs_set_prop_nvlist(const char *, zprop_source_t, nvlist_t *, nvlist_t *);
static int get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp);
static void
history_str_free(char *buf)
{
kmem_free(buf, HIS_MAX_RECORD_LEN);
}
static char *
history_str_get(zfs_cmd_t *zc)
{
char *buf;
if (zc->zc_history == 0)
return (NULL);
buf = kmem_alloc(HIS_MAX_RECORD_LEN, KM_SLEEP);
if (copyinstr((void *)(uintptr_t)zc->zc_history,
buf, HIS_MAX_RECORD_LEN, NULL) != 0) {
history_str_free(buf);
return (NULL);
}
buf[HIS_MAX_RECORD_LEN -1] = '\0';
return (buf);
}
/*
* Check to see if the named dataset is currently defined as bootable
*/
static boolean_t
zfs_is_bootfs(const char *name)
{
objset_t *os;
if (dmu_objset_hold(name, FTAG, &os) == 0) {
boolean_t ret;
ret = (dmu_objset_id(os) == spa_bootfs(dmu_objset_spa(os)));
dmu_objset_rele(os, FTAG);
return (ret);
}
return (B_FALSE);
}
/*
* Return non-zero if the spa version is less than requested version.
*/
static int
zfs_earlier_version(const char *name, int version)
{
spa_t *spa;
if (spa_open(name, &spa, FTAG) == 0) {
if (spa_version(spa) < version) {
spa_close(spa, FTAG);
return (1);
}
spa_close(spa, FTAG);
}
return (0);
}
/*
* Return TRUE if the ZPL version is less than requested version.
*/
static boolean_t
zpl_earlier_version(const char *name, int version)
{
objset_t *os;
boolean_t rc = B_TRUE;
if (dmu_objset_hold(name, FTAG, &os) == 0) {
uint64_t zplversion;
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dmu_objset_rele(os, FTAG);
return (B_TRUE);
}
/* XXX reading from non-owned objset */
if (zfs_get_zplprop(os, ZFS_PROP_VERSION, &zplversion) == 0)
rc = zplversion < version;
dmu_objset_rele(os, FTAG);
}
return (rc);
}
static void
zfs_log_history(zfs_cmd_t *zc)
{
spa_t *spa;
char *buf;
if ((buf = history_str_get(zc)) == NULL)
return;
if (spa_open(zc->zc_name, &spa, FTAG) == 0) {
if (spa_version(spa) >= SPA_VERSION_ZPOOL_HISTORY)
(void) spa_history_log(spa, buf);
spa_close(spa, FTAG);
}
history_str_free(buf);
}
/*
* Policy for top-level read operations (list pools). Requires no privileges,
* and can be used in the local zone, as there is no associated dataset.
*/
/* ARGSUSED */
static int
zfs_secpolicy_none(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (0);
}
/*
* Policy for dataset read operations (list children, get statistics). Requires
* no privileges, but must be visible in the local zone.
*/
/* ARGSUSED */
static int
zfs_secpolicy_read(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
if (INGLOBALZONE(curproc) ||
zone_dataset_visible(zc->zc_name, NULL))
return (0);
return (SET_ERROR(ENOENT));
}
static int
zfs_dozonecheck_impl(const char *dataset, uint64_t zoned, cred_t *cr)
{
int writable = 1;
/*
* The dataset must be visible by this zone -- check this first
* so they don't see EPERM on something they shouldn't know about.
*/
if (!INGLOBALZONE(curproc) &&
!zone_dataset_visible(dataset, &writable))
return (SET_ERROR(ENOENT));
if (INGLOBALZONE(curproc)) {
/*
* If the fs is zoned, only root can access it from the
* global zone.
*/
if (secpolicy_zfs(cr) && zoned)
return (SET_ERROR(EPERM));
} else {
/*
* If we are in a local zone, the 'zoned' property must be set.
*/
if (!zoned)
return (SET_ERROR(EPERM));
/* must be writable by this zone */
if (!writable)
return (SET_ERROR(EPERM));
}
return (0);
}
static int
zfs_dozonecheck(const char *dataset, cred_t *cr)
{
uint64_t zoned;
if (dsl_prop_get_integer(dataset, "zoned", &zoned, NULL))
return (SET_ERROR(ENOENT));
return (zfs_dozonecheck_impl(dataset, zoned, cr));
}
static int
zfs_dozonecheck_ds(const char *dataset, dsl_dataset_t *ds, cred_t *cr)
{
uint64_t zoned;
if (dsl_prop_get_int_ds(ds, "zoned", &zoned))
return (SET_ERROR(ENOENT));
return (zfs_dozonecheck_impl(dataset, zoned, cr));
}
static int
zfs_secpolicy_write_perms_ds(const char *name, dsl_dataset_t *ds,
const char *perm, cred_t *cr)
{
int error;
error = zfs_dozonecheck_ds(name, ds, cr);
if (error == 0) {
error = secpolicy_zfs(cr);
if (error != 0)
error = dsl_deleg_access_impl(ds, perm, cr);
}
return (error);
}
static int
zfs_secpolicy_write_perms(const char *name, const char *perm, cred_t *cr)
{
int error;
dsl_dataset_t *ds;
dsl_pool_t *dp;
/*
* First do a quick check for root in the global zone, which
* is allowed to do all write_perms. This ensures that zfs_ioc_*
* will get to handle nonexistent datasets.
*/
if (INGLOBALZONE(curproc) && secpolicy_zfs(cr) == 0)
return (0);
error = dsl_pool_hold(name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, name, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
error = zfs_secpolicy_write_perms_ds(name, ds, perm, cr);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
/*
* Policy for setting the security label property.
*
* Returns 0 for success, non-zero for access and other errors.
*/
static int
zfs_set_slabel_policy(const char *name, char *strval, cred_t *cr)
{
#ifdef HAVE_MLSLABEL
char ds_hexsl[MAXNAMELEN];
bslabel_t ds_sl, new_sl;
boolean_t new_default = FALSE;
uint64_t zoned;
int needed_priv = -1;
int error;
/* First get the existing dataset label. */
error = dsl_prop_get(name, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
1, sizeof (ds_hexsl), &ds_hexsl, NULL);
if (error != 0)
return (SET_ERROR(EPERM));
if (strcasecmp(strval, ZFS_MLSLABEL_DEFAULT) == 0)
new_default = TRUE;
/* The label must be translatable */
if (!new_default && (hexstr_to_label(strval, &new_sl) != 0))
return (SET_ERROR(EINVAL));
/*
* In a non-global zone, disallow attempts to set a label that
* doesn't match that of the zone; otherwise no other checks
* are needed.
*/
if (!INGLOBALZONE(curproc)) {
if (new_default || !blequal(&new_sl, CR_SL(CRED())))
return (SET_ERROR(EPERM));
return (0);
}
/*
* For global-zone datasets (i.e., those whose zoned property is
* "off", verify that the specified new label is valid for the
* global zone.
*/
if (dsl_prop_get_integer(name,
zfs_prop_to_name(ZFS_PROP_ZONED), &zoned, NULL))
return (SET_ERROR(EPERM));
if (!zoned) {
if (zfs_check_global_label(name, strval) != 0)
return (SET_ERROR(EPERM));
}
/*
* If the existing dataset label is nondefault, check if the
* dataset is mounted (label cannot be changed while mounted).
* Get the zfsvfs_t; if there isn't one, then the dataset isn't
* mounted (or isn't a dataset, doesn't exist, ...).
*/
if (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) != 0) {
objset_t *os;
static char *setsl_tag = "setsl_tag";
/*
* Try to own the dataset; abort if there is any error,
* (e.g., already mounted, in use, or other error).
*/
error = dmu_objset_own(name, DMU_OST_ZFS, B_TRUE, B_TRUE,
setsl_tag, &os);
if (error != 0)
return (SET_ERROR(EPERM));
dmu_objset_disown(os, B_TRUE, setsl_tag);
if (new_default) {
needed_priv = PRIV_FILE_DOWNGRADE_SL;
goto out_check;
}
if (hexstr_to_label(strval, &new_sl) != 0)
return (SET_ERROR(EPERM));
if (blstrictdom(&ds_sl, &new_sl))
needed_priv = PRIV_FILE_DOWNGRADE_SL;
else if (blstrictdom(&new_sl, &ds_sl))
needed_priv = PRIV_FILE_UPGRADE_SL;
} else {
/* dataset currently has a default label */
if (!new_default)
needed_priv = PRIV_FILE_UPGRADE_SL;
}
out_check:
if (needed_priv != -1)
return (PRIV_POLICY(cr, needed_priv, B_FALSE, EPERM, NULL));
return (0);
#else
return (SET_ERROR(ENOTSUP));
#endif /* HAVE_MLSLABEL */
}
static int
zfs_secpolicy_setprop(const char *dsname, zfs_prop_t prop, nvpair_t *propval,
cred_t *cr)
{
char *strval;
/*
* Check permissions for special properties.
*/
switch (prop) {
default:
break;
case ZFS_PROP_ZONED:
/*
* Disallow setting of 'zoned' from within a local zone.
*/
if (!INGLOBALZONE(curproc))
return (SET_ERROR(EPERM));
break;
case ZFS_PROP_QUOTA:
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
if (!INGLOBALZONE(curproc)) {
uint64_t zoned;
char setpoint[ZFS_MAX_DATASET_NAME_LEN];
/*
* Unprivileged users are allowed to modify the
* limit on things *under* (ie. contained by)
* the thing they own.
*/
if (dsl_prop_get_integer(dsname, "zoned", &zoned,
setpoint))
return (SET_ERROR(EPERM));
if (!zoned || strlen(dsname) <= strlen(setpoint))
return (SET_ERROR(EPERM));
}
break;
case ZFS_PROP_MLSLABEL:
if (!is_system_labeled())
return (SET_ERROR(EPERM));
if (nvpair_value_string(propval, &strval) == 0) {
int err;
err = zfs_set_slabel_policy(dsname, strval, CRED());
if (err != 0)
return (err);
}
break;
}
return (zfs_secpolicy_write_perms(dsname, zfs_prop_to_name(prop), cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_set_fsacl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int error;
error = zfs_dozonecheck(zc->zc_name, cr);
if (error != 0)
return (error);
/*
* permission to set permissions will be evaluated later in
* dsl_deleg_can_allow()
*/
return (0);
}
/* ARGSUSED */
static int
zfs_secpolicy_rollback(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_ROLLBACK, cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_send(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
char *cp;
int error;
/*
* Generate the current snapshot name from the given objsetid, then
* use that name for the secpolicy/zone checks.
*/
cp = strchr(zc->zc_name, '@');
if (cp == NULL)
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
dsl_dataset_name(ds, zc->zc_name);
error = zfs_secpolicy_write_perms_ds(zc->zc_name, ds,
ZFS_DELEG_PERM_SEND, cr);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
/* ARGSUSED */
static int
zfs_secpolicy_send_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_SEND, cr));
}
#ifdef HAVE_SMB_SHARE
/* ARGSUSED */
static int
zfs_secpolicy_deleg_share(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
vnode_t *vp;
int error;
if ((error = lookupname(zc->zc_value, UIO_SYSSPACE,
NO_FOLLOW, NULL, &vp)) != 0)
return (error);
/* Now make sure mntpnt and dataset are ZFS */
if (vp->v_vfsp->vfs_fstype != zfsfstype ||
(strcmp((char *)refstr_value(vp->v_vfsp->vfs_resource),
zc->zc_name) != 0)) {
VN_RELE(vp);
return (SET_ERROR(EPERM));
}
VN_RELE(vp);
return (dsl_deleg_access(zc->zc_name,
ZFS_DELEG_PERM_SHARE, cr));
}
#endif /* HAVE_SMB_SHARE */
int
zfs_secpolicy_share(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
#ifdef HAVE_SMB_SHARE
if (!INGLOBALZONE(curproc))
return (SET_ERROR(EPERM));
if (secpolicy_nfs(cr) == 0) {
return (0);
} else {
return (zfs_secpolicy_deleg_share(zc, innvl, cr));
}
#else
return (SET_ERROR(ENOTSUP));
#endif /* HAVE_SMB_SHARE */
}
int
zfs_secpolicy_smb_acl(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
#ifdef HAVE_SMB_SHARE
if (!INGLOBALZONE(curproc))
return (SET_ERROR(EPERM));
if (secpolicy_smb(cr) == 0) {
return (0);
} else {
return (zfs_secpolicy_deleg_share(zc, innvl, cr));
}
#else
return (SET_ERROR(ENOTSUP));
#endif /* HAVE_SMB_SHARE */
}
static int
zfs_get_parent(const char *datasetname, char *parent, int parentsize)
{
char *cp;
/*
* Remove the @bla or /bla from the end of the name to get the parent.
*/
(void) strncpy(parent, datasetname, parentsize);
cp = strrchr(parent, '@');
if (cp != NULL) {
cp[0] = '\0';
} else {
cp = strrchr(parent, '/');
if (cp == NULL)
return (SET_ERROR(ENOENT));
cp[0] = '\0';
}
return (0);
}
int
zfs_secpolicy_destroy_perms(const char *name, cred_t *cr)
{
int error;
if ((error = zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
return (zfs_secpolicy_write_perms(name, ZFS_DELEG_PERM_DESTROY, cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_destroy(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_destroy_perms(zc->zc_name, cr));
}
/*
* Destroying snapshots with delegated permissions requires
* descendant mount and destroy permissions.
*/
/* ARGSUSED */
static int
zfs_secpolicy_destroy_snaps(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
nvlist_t *snaps;
nvpair_t *pair, *nextpair;
int error = 0;
if (nvlist_lookup_nvlist(innvl, "snaps", &snaps) != 0)
return (SET_ERROR(EINVAL));
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nextpair) {
nextpair = nvlist_next_nvpair(snaps, pair);
error = zfs_secpolicy_destroy_perms(nvpair_name(pair), cr);
if (error == ENOENT) {
/*
* Ignore any snapshots that don't exist (we consider
* them "already destroyed"). Remove the name from the
* nvl here in case the snapshot is created between
* now and when we try to destroy it (in which case
* we don't want to destroy it since we haven't
* checked for permission).
*/
fnvlist_remove_nvpair(snaps, pair);
error = 0;
}
if (error != 0)
break;
}
return (error);
}
int
zfs_secpolicy_rename_perms(const char *from, const char *to, cred_t *cr)
{
char parentname[ZFS_MAX_DATASET_NAME_LEN];
int error;
if ((error = zfs_secpolicy_write_perms(from,
ZFS_DELEG_PERM_RENAME, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(from,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
if ((error = zfs_get_parent(to, parentname,
sizeof (parentname))) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_CREATE, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
return (error);
}
/* ARGSUSED */
static int
zfs_secpolicy_rename(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_rename_perms(zc->zc_name, zc->zc_value, cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_promote(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
dsl_pool_t *dp;
dsl_dataset_t *clone;
int error;
error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_PROMOTE, cr);
if (error != 0)
return (error);
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &clone);
if (error == 0) {
char parentname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_t *origin = NULL;
dsl_dir_t *dd;
dd = clone->ds_dir;
error = dsl_dataset_hold_obj(dd->dd_pool,
dsl_dir_phys(dd)->dd_origin_obj, FTAG, &origin);
if (error != 0) {
dsl_dataset_rele(clone, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
error = zfs_secpolicy_write_perms_ds(zc->zc_name, clone,
ZFS_DELEG_PERM_MOUNT, cr);
dsl_dataset_name(origin, parentname);
if (error == 0) {
error = zfs_secpolicy_write_perms_ds(parentname, origin,
ZFS_DELEG_PERM_PROMOTE, cr);
}
dsl_dataset_rele(clone, FTAG);
dsl_dataset_rele(origin, FTAG);
}
dsl_pool_rele(dp, FTAG);
return (error);
}
/* ARGSUSED */
static int
zfs_secpolicy_recv(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int error;
if ((error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_RECEIVE, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_MOUNT, cr)) != 0)
return (error);
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_CREATE, cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_recv_new(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_recv(zc, innvl, cr));
}
int
zfs_secpolicy_snapshot_perms(const char *name, cred_t *cr)
{
return (zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_SNAPSHOT, cr));
}
/*
* Check for permission to create each snapshot in the nvlist.
*/
/* ARGSUSED */
static int
zfs_secpolicy_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
nvlist_t *snaps;
int error = 0;
nvpair_t *pair;
if (nvlist_lookup_nvlist(innvl, "snaps", &snaps) != 0)
return (SET_ERROR(EINVAL));
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
char *name = nvpair_name(pair);
char *atp = strchr(name, '@');
if (atp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
*atp = '\0';
error = zfs_secpolicy_snapshot_perms(name, cr);
*atp = '@';
if (error != 0)
break;
}
return (error);
}
/*
* Check for permission to create each snapshot in the nvlist.
*/
/* ARGSUSED */
static int
zfs_secpolicy_bookmark(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int error = 0;
for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
char *name = nvpair_name(pair);
char *hashp = strchr(name, '#');
if (hashp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
*hashp = '\0';
error = zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_BOOKMARK, cr);
*hashp = '#';
if (error != 0)
break;
}
return (error);
}
+/* ARGSUSED */
+static int
+zfs_secpolicy_remap(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
+{
+ return (zfs_secpolicy_write_perms(zc->zc_name,
+ ZFS_DELEG_PERM_REMAP, cr));
+}
+
/* ARGSUSED */
static int
zfs_secpolicy_destroy_bookmarks(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
nvpair_t *pair, *nextpair;
int error = 0;
for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL;
pair = nextpair) {
char *name = nvpair_name(pair);
char *hashp = strchr(name, '#');
nextpair = nvlist_next_nvpair(innvl, pair);
if (hashp == NULL) {
error = SET_ERROR(EINVAL);
break;
}
*hashp = '\0';
error = zfs_secpolicy_write_perms(name,
ZFS_DELEG_PERM_DESTROY, cr);
*hashp = '#';
if (error == ENOENT) {
/*
* Ignore any filesystems that don't exist (we consider
* their bookmarks "already destroyed"). Remove
* the name from the nvl here in case the filesystem
* is created between now and when we try to destroy
* the bookmark (in which case we don't want to
* destroy it since we haven't checked for permission).
*/
fnvlist_remove_nvpair(innvl, pair);
error = 0;
}
if (error != 0)
break;
}
return (error);
}
/* ARGSUSED */
static int
zfs_secpolicy_log_history(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
/*
* Even root must have a proper TSD so that we know what pool
* to log to.
*/
if (tsd_get(zfs_allow_log_key) == NULL)
return (SET_ERROR(EPERM));
return (0);
}
static int
zfs_secpolicy_create_clone(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
char parentname[ZFS_MAX_DATASET_NAME_LEN];
int error;
char *origin;
if ((error = zfs_get_parent(zc->zc_name, parentname,
sizeof (parentname))) != 0)
return (error);
if (nvlist_lookup_string(innvl, "origin", &origin) == 0 &&
(error = zfs_secpolicy_write_perms(origin,
ZFS_DELEG_PERM_CLONE, cr)) != 0)
return (error);
if ((error = zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_CREATE, cr)) != 0)
return (error);
return (zfs_secpolicy_write_perms(parentname,
ZFS_DELEG_PERM_MOUNT, cr));
}
/*
* Policy for pool operations - create/destroy pools, add vdevs, etc. Requires
* SYS_CONFIG privilege, which is not available in a local zone.
*/
/* ARGSUSED */
static int
zfs_secpolicy_config(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
if (secpolicy_sys_config(cr, B_FALSE) != 0)
return (SET_ERROR(EPERM));
return (0);
}
/*
* Policy for object to name lookups.
*/
/* ARGSUSED */
static int
zfs_secpolicy_diff(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int error;
if ((error = secpolicy_sys_config(cr, B_FALSE)) == 0)
return (0);
error = zfs_secpolicy_write_perms(zc->zc_name, ZFS_DELEG_PERM_DIFF, cr);
return (error);
}
/*
* Policy for fault injection. Requires all privileges.
*/
/* ARGSUSED */
static int
zfs_secpolicy_inject(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (secpolicy_zinject(cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_inherit_prop(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
zfs_prop_t prop = zfs_name_to_prop(zc->zc_value);
if (prop == ZPROP_INVAL) {
if (!zfs_prop_user(zc->zc_value))
return (SET_ERROR(EINVAL));
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_USERPROP, cr));
} else {
return (zfs_secpolicy_setprop(zc->zc_name, prop,
NULL, cr));
}
}
static int
zfs_secpolicy_userspace_one(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int err = zfs_secpolicy_read(zc, innvl, cr);
if (err)
return (err);
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (SET_ERROR(EINVAL));
if (zc->zc_value[0] == 0) {
/*
* They are asking about a posix uid/gid. If it's
* themself, allow it.
*/
if (zc->zc_objset_type == ZFS_PROP_USERUSED ||
zc->zc_objset_type == ZFS_PROP_USERQUOTA ||
zc->zc_objset_type == ZFS_PROP_USEROBJUSED ||
zc->zc_objset_type == ZFS_PROP_USEROBJQUOTA) {
if (zc->zc_guid == crgetuid(cr))
return (0);
} else if (zc->zc_objset_type == ZFS_PROP_GROUPUSED ||
zc->zc_objset_type == ZFS_PROP_GROUPQUOTA ||
zc->zc_objset_type == ZFS_PROP_GROUPOBJUSED ||
zc->zc_objset_type == ZFS_PROP_GROUPOBJQUOTA) {
if (groupmember(zc->zc_guid, cr))
return (0);
}
/* else is for project quota/used */
}
return (zfs_secpolicy_write_perms(zc->zc_name,
userquota_perms[zc->zc_objset_type], cr));
}
static int
zfs_secpolicy_userspace_many(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
int err = zfs_secpolicy_read(zc, innvl, cr);
if (err)
return (err);
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (SET_ERROR(EINVAL));
return (zfs_secpolicy_write_perms(zc->zc_name,
userquota_perms[zc->zc_objset_type], cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_userspace_upgrade(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_setprop(zc->zc_name, ZFS_PROP_VERSION,
NULL, cr));
}
/* ARGSUSED */
static int
zfs_secpolicy_hold(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
nvpair_t *pair;
nvlist_t *holds;
int error;
error = nvlist_lookup_nvlist(innvl, "holds", &holds);
if (error != 0)
return (SET_ERROR(EINVAL));
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
error = dmu_fsname(nvpair_name(pair), fsname);
if (error != 0)
return (error);
error = zfs_secpolicy_write_perms(fsname,
ZFS_DELEG_PERM_HOLD, cr);
if (error != 0)
return (error);
}
return (0);
}
/* ARGSUSED */
static int
zfs_secpolicy_release(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
nvpair_t *pair;
int error;
for (pair = nvlist_next_nvpair(innvl, NULL); pair != NULL;
pair = nvlist_next_nvpair(innvl, pair)) {
char fsname[ZFS_MAX_DATASET_NAME_LEN];
error = dmu_fsname(nvpair_name(pair), fsname);
if (error != 0)
return (error);
error = zfs_secpolicy_write_perms(fsname,
ZFS_DELEG_PERM_RELEASE, cr);
if (error != 0)
return (error);
}
return (0);
}
/*
* Policy for allowing temporary snapshots to be taken or released
*/
static int
zfs_secpolicy_tmp_snapshot(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
/*
* A temporary snapshot is the same as a snapshot,
* hold, destroy and release all rolled into one.
* Delegated diff alone is sufficient that we allow this.
*/
int error;
if ((error = zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_DIFF, cr)) == 0)
return (0);
error = zfs_secpolicy_snapshot_perms(zc->zc_name, cr);
if (error == 0)
error = zfs_secpolicy_hold(zc, innvl, cr);
if (error == 0)
error = zfs_secpolicy_release(zc, innvl, cr);
if (error == 0)
error = zfs_secpolicy_destroy(zc, innvl, cr);
return (error);
}
static int
zfs_secpolicy_load_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_LOAD_KEY, cr));
}
static int
zfs_secpolicy_change_key(zfs_cmd_t *zc, nvlist_t *innvl, cred_t *cr)
{
return (zfs_secpolicy_write_perms(zc->zc_name,
ZFS_DELEG_PERM_CHANGE_KEY, cr));
}
/*
* Returns the nvlist as specified by the user in the zfs_cmd_t.
*/
static int
get_nvlist(uint64_t nvl, uint64_t size, int iflag, nvlist_t **nvp)
{
char *packed;
int error;
nvlist_t *list = NULL;
/*
* Read in and unpack the user-supplied nvlist.
*/
if (size == 0)
return (SET_ERROR(EINVAL));
packed = vmem_alloc(size, KM_SLEEP);
if ((error = ddi_copyin((void *)(uintptr_t)nvl, packed, size,
iflag)) != 0) {
vmem_free(packed, size);
return (SET_ERROR(EFAULT));
}
if ((error = nvlist_unpack(packed, size, &list, 0)) != 0) {
vmem_free(packed, size);
return (error);
}
vmem_free(packed, size);
*nvp = list;
return (0);
}
/*
* Reduce the size of this nvlist until it can be serialized in 'max' bytes.
* Entries will be removed from the end of the nvlist, and one int32 entry
* named "N_MORE_ERRORS" will be added indicating how many entries were
* removed.
*/
static int
nvlist_smush(nvlist_t *errors, size_t max)
{
size_t size;
size = fnvlist_size(errors);
if (size > max) {
nvpair_t *more_errors;
int n = 0;
if (max < 1024)
return (SET_ERROR(ENOMEM));
fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, 0);
more_errors = nvlist_prev_nvpair(errors, NULL);
do {
nvpair_t *pair = nvlist_prev_nvpair(errors,
more_errors);
fnvlist_remove_nvpair(errors, pair);
n++;
size = fnvlist_size(errors);
} while (size > max);
fnvlist_remove_nvpair(errors, more_errors);
fnvlist_add_int32(errors, ZPROP_N_MORE_ERRORS, n);
ASSERT3U(fnvlist_size(errors), <=, max);
}
return (0);
}
static int
put_nvlist(zfs_cmd_t *zc, nvlist_t *nvl)
{
char *packed = NULL;
int error = 0;
size_t size;
size = fnvlist_size(nvl);
if (size > zc->zc_nvlist_dst_size) {
error = SET_ERROR(ENOMEM);
} else {
packed = fnvlist_pack(nvl, &size);
if (ddi_copyout(packed, (void *)(uintptr_t)zc->zc_nvlist_dst,
size, zc->zc_iflags) != 0)
error = SET_ERROR(EFAULT);
fnvlist_pack_free(packed, size);
}
zc->zc_nvlist_dst_size = size;
zc->zc_nvlist_dst_filled = B_TRUE;
return (error);
}
int
getzfsvfs_impl(objset_t *os, zfsvfs_t **zfvp)
{
int error = 0;
if (dmu_objset_type(os) != DMU_OST_ZFS) {
return (SET_ERROR(EINVAL));
}
mutex_enter(&os->os_user_ptr_lock);
*zfvp = dmu_objset_get_user(os);
/* bump s_active only when non-zero to prevent umount race */
if (*zfvp == NULL || (*zfvp)->z_sb == NULL ||
!atomic_inc_not_zero(&((*zfvp)->z_sb->s_active))) {
error = SET_ERROR(ESRCH);
}
mutex_exit(&os->os_user_ptr_lock);
return (error);
}
int
getzfsvfs(const char *dsname, zfsvfs_t **zfvp)
{
objset_t *os;
int error;
error = dmu_objset_hold(dsname, FTAG, &os);
if (error != 0)
return (error);
error = getzfsvfs_impl(os, zfvp);
dmu_objset_rele(os, FTAG);
return (error);
}
/*
* Find a zfsvfs_t for a mounted filesystem, or create our own, in which
* case its z_sb will be NULL, and it will be opened as the owner.
* If 'writer' is set, the z_teardown_lock will be held for RW_WRITER,
* which prevents all inode ops from running.
*/
static int
zfsvfs_hold(const char *name, void *tag, zfsvfs_t **zfvp, boolean_t writer)
{
int error = 0;
if (getzfsvfs(name, zfvp) != 0)
error = zfsvfs_create(name, B_FALSE, zfvp);
if (error == 0) {
rrm_enter(&(*zfvp)->z_teardown_lock, (writer) ? RW_WRITER :
RW_READER, tag);
if ((*zfvp)->z_unmounted) {
/*
* XXX we could probably try again, since the unmounting
* thread should be just about to disassociate the
* objset from the zfsvfs.
*/
rrm_exit(&(*zfvp)->z_teardown_lock, tag);
return (SET_ERROR(EBUSY));
}
}
return (error);
}
static void
zfsvfs_rele(zfsvfs_t *zfsvfs, void *tag)
{
rrm_exit(&zfsvfs->z_teardown_lock, tag);
if (zfsvfs->z_sb) {
deactivate_super(zfsvfs->z_sb);
} else {
dmu_objset_disown(zfsvfs->z_os, B_TRUE, zfsvfs);
zfsvfs_free(zfsvfs);
}
}
static int
zfs_ioc_pool_create(zfs_cmd_t *zc)
{
int error;
nvlist_t *config, *props = NULL;
nvlist_t *rootprops = NULL;
nvlist_t *zplprops = NULL;
dsl_crypto_params_t *dcp = NULL;
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config)))
return (error);
if (zc->zc_nvlist_src_size != 0 && (error =
get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props))) {
nvlist_free(config);
return (error);
}
if (props) {
nvlist_t *nvl = NULL;
nvlist_t *hidden_args = NULL;
uint64_t version = SPA_VERSION;
(void) nvlist_lookup_uint64(props,
zpool_prop_to_name(ZPOOL_PROP_VERSION), &version);
if (!SPA_VERSION_IS_SUPPORTED(version)) {
error = SET_ERROR(EINVAL);
goto pool_props_bad;
}
(void) nvlist_lookup_nvlist(props, ZPOOL_ROOTFS_PROPS, &nvl);
if (nvl) {
error = nvlist_dup(nvl, &rootprops, KM_SLEEP);
if (error != 0) {
nvlist_free(config);
nvlist_free(props);
return (error);
}
(void) nvlist_remove_all(props, ZPOOL_ROOTFS_PROPS);
}
(void) nvlist_lookup_nvlist(props, ZPOOL_HIDDEN_ARGS,
&hidden_args);
error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE,
rootprops, hidden_args, &dcp);
if (error != 0) {
nvlist_free(config);
nvlist_free(props);
return (error);
}
(void) nvlist_remove_all(props, ZPOOL_HIDDEN_ARGS);
VERIFY(nvlist_alloc(&zplprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
error = zfs_fill_zplprops_root(version, rootprops,
zplprops, NULL);
if (error != 0)
goto pool_props_bad;
}
error = spa_create(zc->zc_name, config, props, zplprops, dcp);
/*
* Set the remaining root properties
*/
if (!error && (error = zfs_set_prop_nvlist(zc->zc_name,
ZPROP_SRC_LOCAL, rootprops, NULL)) != 0)
(void) spa_destroy(zc->zc_name);
pool_props_bad:
nvlist_free(rootprops);
nvlist_free(zplprops);
nvlist_free(config);
nvlist_free(props);
dsl_crypto_params_free(dcp, !!error);
return (error);
}
static int
zfs_ioc_pool_destroy(zfs_cmd_t *zc)
{
int error;
zfs_log_history(zc);
error = spa_destroy(zc->zc_name);
return (error);
}
static int
zfs_ioc_pool_import(zfs_cmd_t *zc)
{
nvlist_t *config, *props = NULL;
uint64_t guid;
int error;
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config)) != 0)
return (error);
if (zc->zc_nvlist_src_size != 0 && (error =
get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props))) {
nvlist_free(config);
return (error);
}
if (nvlist_lookup_uint64(config, ZPOOL_CONFIG_POOL_GUID, &guid) != 0 ||
guid != zc->zc_guid)
error = SET_ERROR(EINVAL);
else
error = spa_import(zc->zc_name, config, props, zc->zc_cookie);
if (zc->zc_nvlist_dst != 0) {
int err;
if ((err = put_nvlist(zc, config)) != 0)
error = err;
}
nvlist_free(config);
nvlist_free(props);
return (error);
}
static int
zfs_ioc_pool_export(zfs_cmd_t *zc)
{
int error;
boolean_t force = (boolean_t)zc->zc_cookie;
boolean_t hardforce = (boolean_t)zc->zc_guid;
zfs_log_history(zc);
error = spa_export(zc->zc_name, NULL, force, hardforce);
return (error);
}
static int
zfs_ioc_pool_configs(zfs_cmd_t *zc)
{
nvlist_t *configs;
int error;
if ((configs = spa_all_configs(&zc->zc_cookie)) == NULL)
return (SET_ERROR(EEXIST));
error = put_nvlist(zc, configs);
nvlist_free(configs);
return (error);
}
/*
* inputs:
* zc_name name of the pool
*
* outputs:
* zc_cookie real errno
* zc_nvlist_dst config nvlist
* zc_nvlist_dst_size size of config nvlist
*/
static int
zfs_ioc_pool_stats(zfs_cmd_t *zc)
{
nvlist_t *config;
int error;
int ret = 0;
error = spa_get_stats(zc->zc_name, &config, zc->zc_value,
sizeof (zc->zc_value));
if (config != NULL) {
ret = put_nvlist(zc, config);
nvlist_free(config);
/*
* The config may be present even if 'error' is non-zero.
* In this case we return success, and preserve the real errno
* in 'zc_cookie'.
*/
zc->zc_cookie = error;
} else {
ret = error;
}
return (ret);
}
/*
* Try to import the given pool, returning pool stats as appropriate so that
* user land knows which devices are available and overall pool health.
*/
static int
zfs_ioc_pool_tryimport(zfs_cmd_t *zc)
{
nvlist_t *tryconfig, *config = NULL;
int error;
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &tryconfig)) != 0)
return (error);
config = spa_tryimport(tryconfig);
nvlist_free(tryconfig);
if (config == NULL)
return (SET_ERROR(EINVAL));
error = put_nvlist(zc, config);
nvlist_free(config);
return (error);
}
/*
* inputs:
* zc_name name of the pool
* zc_cookie scan func (pool_scan_func_t)
* zc_flags scrub pause/resume flag (pool_scrub_cmd_t)
*/
static int
zfs_ioc_pool_scan(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
if (zc->zc_flags >= POOL_SCRUB_FLAGS_END)
return (SET_ERROR(EINVAL));
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if (zc->zc_flags == POOL_SCRUB_PAUSE)
error = spa_scrub_pause_resume(spa, POOL_SCRUB_PAUSE);
else if (zc->zc_cookie == POOL_SCAN_NONE)
error = spa_scan_stop(spa);
else
error = spa_scan(spa, zc->zc_cookie);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_pool_freeze(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error == 0) {
spa_freeze(spa);
spa_close(spa, FTAG);
}
return (error);
}
static int
zfs_ioc_pool_upgrade(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if (zc->zc_cookie < spa_version(spa) ||
!SPA_VERSION_IS_SUPPORTED(zc->zc_cookie)) {
spa_close(spa, FTAG);
return (SET_ERROR(EINVAL));
}
spa_upgrade(spa, zc->zc_cookie);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_pool_get_history(zfs_cmd_t *zc)
{
spa_t *spa;
char *hist_buf;
uint64_t size;
int error;
if ((size = zc->zc_history_len) == 0)
return (SET_ERROR(EINVAL));
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
hist_buf = vmem_alloc(size, KM_SLEEP);
if ((error = spa_history_get(spa, &zc->zc_history_offset,
&zc->zc_history_len, hist_buf)) == 0) {
error = ddi_copyout(hist_buf,
(void *)(uintptr_t)zc->zc_history,
zc->zc_history_len, zc->zc_iflags);
}
spa_close(spa, FTAG);
vmem_free(hist_buf, size);
return (error);
}
static int
zfs_ioc_pool_reguid(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error == 0) {
error = spa_change_guid(spa);
spa_close(spa, FTAG);
}
return (error);
}
static int
zfs_ioc_dsobj_to_dsname(zfs_cmd_t *zc)
{
return (dsl_dsobj_to_dsname(zc->zc_name, zc->zc_obj, zc->zc_value));
}
/*
* inputs:
* zc_name name of filesystem
* zc_obj object to find
*
* outputs:
* zc_value name of object
*/
static int
zfs_ioc_obj_to_path(zfs_cmd_t *zc)
{
objset_t *os;
int error;
/* XXX reading from objset not owned */
if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE,
FTAG, &os)) != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (SET_ERROR(EINVAL));
}
error = zfs_obj_to_path(os, zc->zc_obj, zc->zc_value,
sizeof (zc->zc_value));
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_obj object to find
*
* outputs:
* zc_stat stats on object
* zc_value path to object
*/
static int
zfs_ioc_obj_to_stats(zfs_cmd_t *zc)
{
objset_t *os;
int error;
/* XXX reading from objset not owned */
if ((error = dmu_objset_hold_flags(zc->zc_name, B_TRUE,
FTAG, &os)) != 0)
return (error);
if (dmu_objset_type(os) != DMU_OST_ZFS) {
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (SET_ERROR(EINVAL));
}
error = zfs_obj_to_stats(os, zc->zc_obj, &zc->zc_stat, zc->zc_value,
sizeof (zc->zc_value));
dmu_objset_rele_flags(os, B_TRUE, FTAG);
return (error);
}
static int
zfs_ioc_vdev_add(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
nvlist_t *config;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config);
if (error == 0) {
error = spa_vdev_add(spa, config);
nvlist_free(config);
}
spa_close(spa, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of the pool
- * zc_nvlist_conf nvlist of devices to remove
- * zc_cookie to stop the remove?
+ * zc_guid guid of vdev to remove
+ * zc_cookie cancel removal
*/
static int
zfs_ioc_vdev_remove(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
- error = spa_vdev_remove(spa, zc->zc_guid, B_FALSE);
+ if (zc->zc_cookie != 0) {
+ error = spa_vdev_remove_cancel(spa);
+ } else {
+ error = spa_vdev_remove(spa, zc->zc_guid, B_FALSE);
+ }
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_set_state(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
vdev_state_t newstate = VDEV_STATE_UNKNOWN;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
switch (zc->zc_cookie) {
case VDEV_STATE_ONLINE:
error = vdev_online(spa, zc->zc_guid, zc->zc_obj, &newstate);
break;
case VDEV_STATE_OFFLINE:
error = vdev_offline(spa, zc->zc_guid, zc->zc_obj);
break;
case VDEV_STATE_FAULTED:
if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED &&
zc->zc_obj != VDEV_AUX_EXTERNAL &&
zc->zc_obj != VDEV_AUX_EXTERNAL_PERSIST)
zc->zc_obj = VDEV_AUX_ERR_EXCEEDED;
error = vdev_fault(spa, zc->zc_guid, zc->zc_obj);
break;
case VDEV_STATE_DEGRADED:
if (zc->zc_obj != VDEV_AUX_ERR_EXCEEDED &&
zc->zc_obj != VDEV_AUX_EXTERNAL)
zc->zc_obj = VDEV_AUX_ERR_EXCEEDED;
error = vdev_degrade(spa, zc->zc_guid, zc->zc_obj);
break;
default:
error = SET_ERROR(EINVAL);
}
zc->zc_cookie = newstate;
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_attach(zfs_cmd_t *zc)
{
spa_t *spa;
int replacing = zc->zc_cookie;
nvlist_t *config;
int error;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config)) == 0) {
error = spa_vdev_attach(spa, zc->zc_guid, config, replacing);
nvlist_free(config);
}
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_detach(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
error = spa_vdev_detach(spa, zc->zc_guid, 0, B_FALSE);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_split(zfs_cmd_t *zc)
{
spa_t *spa;
nvlist_t *config, *props = NULL;
int error;
boolean_t exp = !!(zc->zc_cookie & ZPOOL_EXPORT_AFTER_SPLIT);
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
if ((error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &config))) {
spa_close(spa, FTAG);
return (error);
}
if (zc->zc_nvlist_src_size != 0 && (error =
get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props))) {
spa_close(spa, FTAG);
nvlist_free(config);
return (error);
}
error = spa_vdev_split_mirror(spa, zc->zc_string, config, props, exp);
spa_close(spa, FTAG);
nvlist_free(config);
nvlist_free(props);
return (error);
}
static int
zfs_ioc_vdev_setpath(zfs_cmd_t *zc)
{
spa_t *spa;
char *path = zc->zc_value;
uint64_t guid = zc->zc_guid;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
error = spa_vdev_setpath(spa, guid, path);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_vdev_setfru(zfs_cmd_t *zc)
{
spa_t *spa;
char *fru = zc->zc_value;
uint64_t guid = zc->zc_guid;
int error;
error = spa_open(zc->zc_name, &spa, FTAG);
if (error != 0)
return (error);
error = spa_vdev_setfru(spa, guid, fru);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_objset_stats_impl(zfs_cmd_t *zc, objset_t *os)
{
int error = 0;
nvlist_t *nv;
dmu_objset_fast_stat(os, &zc->zc_objset_stats);
if (zc->zc_nvlist_dst != 0 &&
(error = dsl_prop_get_all(os, &nv)) == 0) {
dmu_objset_stats(os, nv);
/*
* NB: zvol_get_stats() will read the objset contents,
* which we aren't supposed to do with a
* DS_MODE_USER hold, because it could be
* inconsistent. So this is a bit of a workaround...
* XXX reading with out owning
*/
if (!zc->zc_objset_stats.dds_inconsistent &&
dmu_objset_type(os) == DMU_OST_ZVOL) {
error = zvol_get_stats(os, nv);
if (error == EIO) {
nvlist_free(nv);
return (error);
}
VERIFY0(error);
}
if (error == 0)
error = put_nvlist(zc, nv);
nvlist_free(nv);
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_dst_size size of buffer for property nvlist
*
* outputs:
* zc_objset_stats stats
* zc_nvlist_dst property nvlist
* zc_nvlist_dst_size size of property nvlist
*/
static int
zfs_ioc_objset_stats(zfs_cmd_t *zc)
{
objset_t *os;
int error;
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (error == 0) {
error = zfs_ioc_objset_stats_impl(zc, os);
dmu_objset_rele(os, FTAG);
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_dst_size size of buffer for property nvlist
*
* outputs:
* zc_nvlist_dst received property nvlist
* zc_nvlist_dst_size size of received property nvlist
*
* Gets received properties (distinct from local properties on or after
* SPA_VERSION_RECVD_PROPS) for callers who want to differentiate received from
* local property values.
*/
static int
zfs_ioc_objset_recvd_props(zfs_cmd_t *zc)
{
int error = 0;
nvlist_t *nv;
/*
* Without this check, we would return local property values if the
* caller has not already received properties on or after
* SPA_VERSION_RECVD_PROPS.
*/
if (!dsl_prop_get_hasrecvd(zc->zc_name))
return (SET_ERROR(ENOTSUP));
if (zc->zc_nvlist_dst != 0 &&
(error = dsl_prop_get_received(zc->zc_name, &nv)) == 0) {
error = put_nvlist(zc, nv);
nvlist_free(nv);
}
return (error);
}
static int
nvl_add_zplprop(objset_t *os, nvlist_t *props, zfs_prop_t prop)
{
uint64_t value;
int error;
/*
* zfs_get_zplprop() will either find a value or give us
* the default value (if there is one).
*/
if ((error = zfs_get_zplprop(os, prop, &value)) != 0)
return (error);
VERIFY(nvlist_add_uint64(props, zfs_prop_to_name(prop), value) == 0);
return (0);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_dst_size size of buffer for zpl property nvlist
*
* outputs:
* zc_nvlist_dst zpl property nvlist
* zc_nvlist_dst_size size of zpl property nvlist
*/
static int
zfs_ioc_objset_zplprops(zfs_cmd_t *zc)
{
objset_t *os;
int err;
/* XXX reading without owning */
if ((err = dmu_objset_hold(zc->zc_name, FTAG, &os)))
return (err);
dmu_objset_fast_stat(os, &zc->zc_objset_stats);
/*
* NB: nvl_add_zplprop() will read the objset contents,
* which we aren't supposed to do with a DS_MODE_USER
* hold, because it could be inconsistent.
*/
if (zc->zc_nvlist_dst != 0 &&
!zc->zc_objset_stats.dds_inconsistent &&
dmu_objset_type(os) == DMU_OST_ZFS) {
nvlist_t *nv;
VERIFY(nvlist_alloc(&nv, NV_UNIQUE_NAME, KM_SLEEP) == 0);
if ((err = nvl_add_zplprop(os, nv, ZFS_PROP_VERSION)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_NORMALIZE)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_UTF8ONLY)) == 0 &&
(err = nvl_add_zplprop(os, nv, ZFS_PROP_CASE)) == 0)
err = put_nvlist(zc, nv);
nvlist_free(nv);
} else {
err = SET_ERROR(ENOENT);
}
dmu_objset_rele(os, FTAG);
return (err);
}
boolean_t
dataset_name_hidden(const char *name)
{
/*
* Skip over datasets that are not visible in this zone,
* internal datasets (which have a $ in their name), and
* temporary datasets (which have a % in their name).
*/
if (strchr(name, '$') != NULL)
return (B_TRUE);
if (strchr(name, '%') != NULL)
return (B_TRUE);
if (!INGLOBALZONE(curproc) && !zone_dataset_visible(name, NULL))
return (B_TRUE);
return (B_FALSE);
}
/*
* inputs:
* zc_name name of filesystem
* zc_cookie zap cursor
* zc_nvlist_dst_size size of buffer for property nvlist
*
* outputs:
* zc_name name of next filesystem
* zc_cookie zap cursor
* zc_objset_stats stats
* zc_nvlist_dst property nvlist
* zc_nvlist_dst_size size of property nvlist
*/
static int
zfs_ioc_dataset_list_next(zfs_cmd_t *zc)
{
objset_t *os;
int error;
char *p;
size_t orig_len = strlen(zc->zc_name);
top:
if ((error = dmu_objset_hold(zc->zc_name, FTAG, &os))) {
if (error == ENOENT)
error = SET_ERROR(ESRCH);
return (error);
}
p = strrchr(zc->zc_name, '/');
if (p == NULL || p[1] != '\0')
(void) strlcat(zc->zc_name, "/", sizeof (zc->zc_name));
p = zc->zc_name + strlen(zc->zc_name);
do {
error = dmu_dir_list_next(os,
sizeof (zc->zc_name) - (p - zc->zc_name), p,
NULL, &zc->zc_cookie);
if (error == ENOENT)
error = SET_ERROR(ESRCH);
} while (error == 0 && dataset_name_hidden(zc->zc_name));
dmu_objset_rele(os, FTAG);
/*
* If it's an internal dataset (ie. with a '$' in its name),
* don't try to get stats for it, otherwise we'll return ENOENT.
*/
if (error == 0 && strchr(zc->zc_name, '$') == NULL) {
error = zfs_ioc_objset_stats(zc); /* fill in the stats */
if (error == ENOENT) {
/* We lost a race with destroy, get the next one. */
zc->zc_name[orig_len] = '\0';
goto top;
}
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_cookie zap cursor
* zc_nvlist_dst_size size of buffer for property nvlist
*
* outputs:
* zc_name name of next snapshot
* zc_objset_stats stats
* zc_nvlist_dst property nvlist
* zc_nvlist_dst_size size of property nvlist
*/
static int
zfs_ioc_snapshot_list_next(zfs_cmd_t *zc)
{
objset_t *os;
int error;
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (error != 0) {
return (error == ENOENT ? ESRCH : error);
}
/*
* A dataset name of maximum length cannot have any snapshots,
* so exit immediately.
*/
if (strlcat(zc->zc_name, "@", sizeof (zc->zc_name)) >=
ZFS_MAX_DATASET_NAME_LEN) {
dmu_objset_rele(os, FTAG);
return (SET_ERROR(ESRCH));
}
error = dmu_snapshot_list_next(os,
sizeof (zc->zc_name) - strlen(zc->zc_name),
zc->zc_name + strlen(zc->zc_name), &zc->zc_obj, &zc->zc_cookie,
NULL);
if (error == 0 && !zc->zc_simple) {
dsl_dataset_t *ds;
dsl_pool_t *dp = os->os_dsl_dataset->ds_dir->dd_pool;
error = dsl_dataset_hold_obj(dp, zc->zc_obj, FTAG, &ds);
if (error == 0) {
objset_t *ossnap;
error = dmu_objset_from_ds(ds, &ossnap);
if (error == 0)
error = zfs_ioc_objset_stats_impl(zc, ossnap);
dsl_dataset_rele(ds, FTAG);
}
} else if (error == ENOENT) {
error = SET_ERROR(ESRCH);
}
dmu_objset_rele(os, FTAG);
/* if we failed, undo the @ that we tacked on to zc_name */
if (error != 0)
*strchr(zc->zc_name, '@') = '\0';
return (error);
}
static int
zfs_prop_set_userquota(const char *dsname, nvpair_t *pair)
{
const char *propname = nvpair_name(pair);
uint64_t *valary;
unsigned int vallen;
const char *domain;
char *dash;
zfs_userquota_prop_t type;
uint64_t rid;
uint64_t quota;
zfsvfs_t *zfsvfs;
int err;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) != 0)
return (SET_ERROR(EINVAL));
}
/*
* A correctly constructed propname is encoded as
* userquota@<rid>-<domain>.
*/
if ((dash = strchr(propname, '-')) == NULL ||
nvpair_value_uint64_array(pair, &valary, &vallen) != 0 ||
vallen != 3)
return (SET_ERROR(EINVAL));
domain = dash + 1;
type = valary[0];
rid = valary[1];
quota = valary[2];
err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_FALSE);
if (err == 0) {
err = zfs_set_userquota(zfsvfs, type, domain, rid, quota);
zfsvfs_rele(zfsvfs, FTAG);
}
return (err);
}
/*
* If the named property is one that has a special function to set its value,
* return 0 on success and a positive error code on failure; otherwise if it is
* not one of the special properties handled by this function, return -1.
*
* XXX: It would be better for callers of the property interface if we handled
* these special cases in dsl_prop.c (in the dsl layer).
*/
static int
zfs_prop_set_special(const char *dsname, zprop_source_t source,
nvpair_t *pair)
{
const char *propname = nvpair_name(pair);
zfs_prop_t prop = zfs_name_to_prop(propname);
uint64_t intval = 0;
char *strval = NULL;
int err = -1;
if (prop == ZPROP_INVAL) {
if (zfs_prop_userquota(propname))
return (zfs_prop_set_userquota(dsname, pair));
return (-1);
}
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) == 0);
}
/* all special properties are numeric except for keylocation */
if (zfs_prop_get_type(prop) == PROP_TYPE_STRING) {
strval = fnvpair_value_string(pair);
} else {
intval = fnvpair_value_uint64(pair);
}
switch (prop) {
case ZFS_PROP_QUOTA:
err = dsl_dir_set_quota(dsname, source, intval);
break;
case ZFS_PROP_REFQUOTA:
err = dsl_dataset_set_refquota(dsname, source, intval);
break;
case ZFS_PROP_FILESYSTEM_LIMIT:
case ZFS_PROP_SNAPSHOT_LIMIT:
if (intval == UINT64_MAX) {
/* clearing the limit, just do it */
err = 0;
} else {
err = dsl_dir_activate_fs_ss_limit(dsname);
}
/*
* Set err to -1 to force the zfs_set_prop_nvlist code down the
* default path to set the value in the nvlist.
*/
if (err == 0)
err = -1;
break;
case ZFS_PROP_KEYLOCATION:
err = dsl_crypto_can_set_keylocation(dsname, strval);
/*
* Set err to -1 to force the zfs_set_prop_nvlist code down the
* default path to set the value in the nvlist.
*/
if (err == 0)
err = -1;
break;
case ZFS_PROP_RESERVATION:
err = dsl_dir_set_reservation(dsname, source, intval);
break;
case ZFS_PROP_REFRESERVATION:
err = dsl_dataset_set_refreservation(dsname, source, intval);
break;
case ZFS_PROP_VOLSIZE:
err = zvol_set_volsize(dsname, intval);
break;
case ZFS_PROP_SNAPDEV:
err = zvol_set_snapdev(dsname, source, intval);
break;
case ZFS_PROP_VOLMODE:
err = zvol_set_volmode(dsname, source, intval);
break;
case ZFS_PROP_VERSION:
{
zfsvfs_t *zfsvfs;
if ((err = zfsvfs_hold(dsname, FTAG, &zfsvfs, B_TRUE)) != 0)
break;
err = zfs_set_version(zfsvfs, intval);
zfsvfs_rele(zfsvfs, FTAG);
if (err == 0 && intval >= ZPL_VERSION_USERSPACE) {
zfs_cmd_t *zc;
zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strcpy(zc->zc_name, dsname);
(void) zfs_ioc_userspace_upgrade(zc);
(void) zfs_ioc_id_quota_upgrade(zc);
kmem_free(zc, sizeof (zfs_cmd_t));
}
break;
}
default:
err = -1;
}
return (err);
}
/*
* This function is best effort. If it fails to set any of the given properties,
* it continues to set as many as it can and returns the last error
* encountered. If the caller provides a non-NULL errlist, it will be filled in
* with the list of names of all the properties that failed along with the
* corresponding error numbers.
*
* If every property is set successfully, zero is returned and errlist is not
* modified.
*/
int
zfs_set_prop_nvlist(const char *dsname, zprop_source_t source, nvlist_t *nvl,
nvlist_t *errlist)
{
nvpair_t *pair;
nvpair_t *propval;
int rv = 0;
uint64_t intval;
char *strval;
nvlist_t *genericnvl = fnvlist_alloc();
nvlist_t *retrynvl = fnvlist_alloc();
retry:
pair = NULL;
while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) {
const char *propname = nvpair_name(pair);
zfs_prop_t prop = zfs_name_to_prop(propname);
int err = 0;
/* decode the property value */
propval = pair;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
attrs = fnvpair_value_nvlist(pair);
if (nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&propval) != 0)
err = SET_ERROR(EINVAL);
}
/* Validate value type */
if (err == 0 && source == ZPROP_SRC_INHERITED) {
/* inherited properties are expected to be booleans */
if (nvpair_type(propval) != DATA_TYPE_BOOLEAN)
err = SET_ERROR(EINVAL);
} else if (err == 0 && prop == ZPROP_INVAL) {
if (zfs_prop_user(propname)) {
if (nvpair_type(propval) != DATA_TYPE_STRING)
err = SET_ERROR(EINVAL);
} else if (zfs_prop_userquota(propname)) {
if (nvpair_type(propval) !=
DATA_TYPE_UINT64_ARRAY)
err = SET_ERROR(EINVAL);
} else {
err = SET_ERROR(EINVAL);
}
} else if (err == 0) {
if (nvpair_type(propval) == DATA_TYPE_STRING) {
if (zfs_prop_get_type(prop) != PROP_TYPE_STRING)
err = SET_ERROR(EINVAL);
} else if (nvpair_type(propval) == DATA_TYPE_UINT64) {
const char *unused;
intval = fnvpair_value_uint64(propval);
switch (zfs_prop_get_type(prop)) {
case PROP_TYPE_NUMBER:
break;
case PROP_TYPE_STRING:
err = SET_ERROR(EINVAL);
break;
case PROP_TYPE_INDEX:
if (zfs_prop_index_to_string(prop,
intval, &unused) != 0)
err = SET_ERROR(EINVAL);
break;
default:
cmn_err(CE_PANIC,
"unknown property type");
}
} else {
err = SET_ERROR(EINVAL);
}
}
/* Validate permissions */
if (err == 0)
err = zfs_check_settable(dsname, pair, CRED());
if (err == 0) {
if (source == ZPROP_SRC_INHERITED)
err = -1; /* does not need special handling */
else
err = zfs_prop_set_special(dsname, source,
pair);
if (err == -1) {
/*
* For better performance we build up a list of
* properties to set in a single transaction.
*/
err = nvlist_add_nvpair(genericnvl, pair);
} else if (err != 0 && nvl != retrynvl) {
/*
* This may be a spurious error caused by
* receiving quota and reservation out of order.
* Try again in a second pass.
*/
err = nvlist_add_nvpair(retrynvl, pair);
}
}
if (err != 0) {
if (errlist != NULL)
fnvlist_add_int32(errlist, propname, err);
rv = err;
}
}
if (nvl != retrynvl && !nvlist_empty(retrynvl)) {
nvl = retrynvl;
goto retry;
}
if (!nvlist_empty(genericnvl) &&
dsl_props_set(dsname, source, genericnvl) != 0) {
/*
* If this fails, we still want to set as many properties as we
* can, so try setting them individually.
*/
pair = NULL;
while ((pair = nvlist_next_nvpair(genericnvl, pair)) != NULL) {
const char *propname = nvpair_name(pair);
int err = 0;
propval = pair;
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
attrs = fnvpair_value_nvlist(pair);
propval = fnvlist_lookup_nvpair(attrs,
ZPROP_VALUE);
}
if (nvpair_type(propval) == DATA_TYPE_STRING) {
strval = fnvpair_value_string(propval);
err = dsl_prop_set_string(dsname, propname,
source, strval);
} else if (nvpair_type(propval) == DATA_TYPE_BOOLEAN) {
err = dsl_prop_inherit(dsname, propname,
source);
} else {
intval = fnvpair_value_uint64(propval);
err = dsl_prop_set_int(dsname, propname, source,
intval);
}
if (err != 0) {
if (errlist != NULL) {
fnvlist_add_int32(errlist, propname,
err);
}
rv = err;
}
}
}
nvlist_free(genericnvl);
nvlist_free(retrynvl);
return (rv);
}
/*
* Check that all the properties are valid user properties.
*/
static int
zfs_check_userprops(const char *fsname, nvlist_t *nvl)
{
nvpair_t *pair = NULL;
int error = 0;
while ((pair = nvlist_next_nvpair(nvl, pair)) != NULL) {
const char *propname = nvpair_name(pair);
if (!zfs_prop_user(propname) ||
nvpair_type(pair) != DATA_TYPE_STRING)
return (SET_ERROR(EINVAL));
if ((error = zfs_secpolicy_write_perms(fsname,
ZFS_DELEG_PERM_USERPROP, CRED())))
return (error);
if (strlen(propname) >= ZAP_MAXNAMELEN)
return (SET_ERROR(ENAMETOOLONG));
if (strlen(fnvpair_value_string(pair)) >= ZAP_MAXVALUELEN)
return (SET_ERROR(E2BIG));
}
return (0);
}
static void
props_skip(nvlist_t *props, nvlist_t *skipped, nvlist_t **newprops)
{
nvpair_t *pair;
VERIFY(nvlist_alloc(newprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
pair = NULL;
while ((pair = nvlist_next_nvpair(props, pair)) != NULL) {
if (nvlist_exists(skipped, nvpair_name(pair)))
continue;
VERIFY(nvlist_add_nvpair(*newprops, pair) == 0);
}
}
static int
clear_received_props(const char *dsname, nvlist_t *props,
nvlist_t *skipped)
{
int err = 0;
nvlist_t *cleared_props = NULL;
props_skip(props, skipped, &cleared_props);
if (!nvlist_empty(cleared_props)) {
/*
* Acts on local properties until the dataset has received
* properties at least once on or after SPA_VERSION_RECVD_PROPS.
*/
zprop_source_t flags = (ZPROP_SRC_NONE |
(dsl_prop_get_hasrecvd(dsname) ? ZPROP_SRC_RECEIVED : 0));
err = zfs_set_prop_nvlist(dsname, flags, cleared_props, NULL);
}
nvlist_free(cleared_props);
return (err);
}
/*
* inputs:
* zc_name name of filesystem
* zc_value name of property to set
* zc_nvlist_src{_size} nvlist of properties to apply
* zc_cookie received properties flag
*
* outputs:
* zc_nvlist_dst{_size} error for each unapplied received property
*/
static int
zfs_ioc_set_prop(zfs_cmd_t *zc)
{
nvlist_t *nvl;
boolean_t received = zc->zc_cookie;
zprop_source_t source = (received ? ZPROP_SRC_RECEIVED :
ZPROP_SRC_LOCAL);
nvlist_t *errors;
int error;
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &nvl)) != 0)
return (error);
if (received) {
nvlist_t *origprops;
if (dsl_prop_get_received(zc->zc_name, &origprops) == 0) {
(void) clear_received_props(zc->zc_name,
origprops, nvl);
nvlist_free(origprops);
}
error = dsl_prop_set_hasrecvd(zc->zc_name);
}
errors = fnvlist_alloc();
if (error == 0)
error = zfs_set_prop_nvlist(zc->zc_name, source, nvl, errors);
if (zc->zc_nvlist_dst != 0 && errors != NULL) {
(void) put_nvlist(zc, errors);
}
nvlist_free(errors);
nvlist_free(nvl);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_value name of property to inherit
* zc_cookie revert to received value if TRUE
*
* outputs: none
*/
static int
zfs_ioc_inherit_prop(zfs_cmd_t *zc)
{
const char *propname = zc->zc_value;
zfs_prop_t prop = zfs_name_to_prop(propname);
boolean_t received = zc->zc_cookie;
zprop_source_t source = (received
? ZPROP_SRC_NONE /* revert to received value, if any */
: ZPROP_SRC_INHERITED); /* explicitly inherit */
nvlist_t *dummy;
nvpair_t *pair;
zprop_type_t type;
int err;
if (!received) {
/*
* Only check this in the non-received case. We want to allow
* 'inherit -S' to revert non-inheritable properties like quota
* and reservation to the received or default values even though
* they are not considered inheritable.
*/
if (prop != ZPROP_INVAL && !zfs_prop_inheritable(prop))
return (SET_ERROR(EINVAL));
}
if (prop == ZPROP_INVAL) {
if (!zfs_prop_user(propname))
return (SET_ERROR(EINVAL));
type = PROP_TYPE_STRING;
} else if (prop == ZFS_PROP_VOLSIZE || prop == ZFS_PROP_VERSION) {
return (SET_ERROR(EINVAL));
} else {
type = zfs_prop_get_type(prop);
}
/*
* zfs_prop_set_special() expects properties in the form of an
* nvpair with type info.
*/
dummy = fnvlist_alloc();
switch (type) {
case PROP_TYPE_STRING:
VERIFY(0 == nvlist_add_string(dummy, propname, ""));
break;
case PROP_TYPE_NUMBER:
case PROP_TYPE_INDEX:
VERIFY(0 == nvlist_add_uint64(dummy, propname, 0));
break;
default:
err = SET_ERROR(EINVAL);
goto errout;
}
pair = nvlist_next_nvpair(dummy, NULL);
if (pair == NULL) {
err = SET_ERROR(EINVAL);
} else {
err = zfs_prop_set_special(zc->zc_name, source, pair);
if (err == -1) /* property is not "special", needs handling */
err = dsl_prop_inherit(zc->zc_name, zc->zc_value,
source);
}
errout:
nvlist_free(dummy);
return (err);
}
static int
zfs_ioc_pool_set_props(zfs_cmd_t *zc)
{
nvlist_t *props;
spa_t *spa;
int error;
nvpair_t *pair;
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &props)))
return (error);
/*
* If the only property is the configfile, then just do a spa_lookup()
* to handle the faulted case.
*/
pair = nvlist_next_nvpair(props, NULL);
if (pair != NULL && strcmp(nvpair_name(pair),
zpool_prop_to_name(ZPOOL_PROP_CACHEFILE)) == 0 &&
nvlist_next_nvpair(props, pair) == NULL) {
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(zc->zc_name)) != NULL) {
spa_configfile_set(spa, props, B_FALSE);
- spa_config_sync(spa, B_FALSE, B_TRUE);
+ spa_write_cachefile(spa, B_FALSE, B_TRUE);
}
mutex_exit(&spa_namespace_lock);
if (spa != NULL) {
nvlist_free(props);
return (0);
}
}
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) {
nvlist_free(props);
return (error);
}
error = spa_prop_set(spa, props);
nvlist_free(props);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_pool_get_props(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
nvlist_t *nvp = NULL;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0) {
/*
* If the pool is faulted, there may be properties we can still
* get (such as altroot and cachefile), so attempt to get them
* anyway.
*/
mutex_enter(&spa_namespace_lock);
if ((spa = spa_lookup(zc->zc_name)) != NULL)
error = spa_prop_get(spa, &nvp);
mutex_exit(&spa_namespace_lock);
} else {
error = spa_prop_get(spa, &nvp);
spa_close(spa, FTAG);
}
if (error == 0 && zc->zc_nvlist_dst != 0)
error = put_nvlist(zc, nvp);
else
error = SET_ERROR(EFAULT);
nvlist_free(nvp);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_nvlist_src{_size} nvlist of delegated permissions
* zc_perm_action allow/unallow flag
*
* outputs: none
*/
static int
zfs_ioc_set_fsacl(zfs_cmd_t *zc)
{
int error;
nvlist_t *fsaclnv = NULL;
if ((error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &fsaclnv)) != 0)
return (error);
/*
* Verify nvlist is constructed correctly
*/
if ((error = zfs_deleg_verify_nvlist(fsaclnv)) != 0) {
nvlist_free(fsaclnv);
return (SET_ERROR(EINVAL));
}
/*
* If we don't have PRIV_SYS_MOUNT, then validate
* that user is allowed to hand out each permission in
* the nvlist(s)
*/
error = secpolicy_zfs(CRED());
if (error != 0) {
if (zc->zc_perm_action == B_FALSE) {
error = dsl_deleg_can_allow(zc->zc_name,
fsaclnv, CRED());
} else {
error = dsl_deleg_can_unallow(zc->zc_name,
fsaclnv, CRED());
}
}
if (error == 0)
error = dsl_deleg_set(zc->zc_name, fsaclnv, zc->zc_perm_action);
nvlist_free(fsaclnv);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* zc_nvlist_src{_size} nvlist of delegated permissions
*/
static int
zfs_ioc_get_fsacl(zfs_cmd_t *zc)
{
nvlist_t *nvp;
int error;
if ((error = dsl_deleg_get(zc->zc_name, &nvp)) == 0) {
error = put_nvlist(zc, nvp);
nvlist_free(nvp);
}
return (error);
}
/* ARGSUSED */
static void
zfs_create_cb(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx)
{
zfs_creat_t *zct = arg;
zfs_create_fs(os, cr, zct->zct_zplprops, tx);
}
#define ZFS_PROP_UNDEFINED ((uint64_t)-1)
/*
* inputs:
* os parent objset pointer (NULL if root fs)
* fuids_ok fuids allowed in this version of the spa?
* sa_ok SAs allowed in this version of the spa?
* createprops list of properties requested by creator
*
* outputs:
* zplprops values for the zplprops we attach to the master node object
* is_ci true if requested file system will be purely case-insensitive
*
* Determine the settings for utf8only, normalization and
* casesensitivity. Specific values may have been requested by the
* creator and/or we can inherit values from the parent dataset. If
* the file system is of too early a vintage, a creator can not
* request settings for these properties, even if the requested
* setting is the default value. We don't actually want to create dsl
* properties for these, so remove them from the source nvlist after
* processing.
*/
static int
zfs_fill_zplprops_impl(objset_t *os, uint64_t zplver,
boolean_t fuids_ok, boolean_t sa_ok, nvlist_t *createprops,
nvlist_t *zplprops, boolean_t *is_ci)
{
uint64_t sense = ZFS_PROP_UNDEFINED;
uint64_t norm = ZFS_PROP_UNDEFINED;
uint64_t u8 = ZFS_PROP_UNDEFINED;
int error;
ASSERT(zplprops != NULL);
if (os != NULL && os->os_phys->os_type != DMU_OST_ZFS)
return (SET_ERROR(EINVAL));
/*
* Pull out creator prop choices, if any.
*/
if (createprops) {
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_VERSION), &zplver);
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_NORMALIZE), &norm);
(void) nvlist_remove_all(createprops,
zfs_prop_to_name(ZFS_PROP_NORMALIZE));
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), &u8);
(void) nvlist_remove_all(createprops,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY));
(void) nvlist_lookup_uint64(createprops,
zfs_prop_to_name(ZFS_PROP_CASE), &sense);
(void) nvlist_remove_all(createprops,
zfs_prop_to_name(ZFS_PROP_CASE));
}
/*
* If the zpl version requested is whacky or the file system
* or pool is version is too "young" to support normalization
* and the creator tried to set a value for one of the props,
* error out.
*/
if ((zplver < ZPL_VERSION_INITIAL || zplver > ZPL_VERSION) ||
(zplver >= ZPL_VERSION_FUID && !fuids_ok) ||
(zplver >= ZPL_VERSION_SA && !sa_ok) ||
(zplver < ZPL_VERSION_NORMALIZATION &&
(norm != ZFS_PROP_UNDEFINED || u8 != ZFS_PROP_UNDEFINED ||
sense != ZFS_PROP_UNDEFINED)))
return (SET_ERROR(ENOTSUP));
/*
* Put the version in the zplprops
*/
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_VERSION), zplver) == 0);
if (norm == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_NORMALIZE, &norm)) != 0)
return (error);
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_NORMALIZE), norm) == 0);
/*
* If we're normalizing, names must always be valid UTF-8 strings.
*/
if (norm)
u8 = 1;
if (u8 == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_UTF8ONLY, &u8)) != 0)
return (error);
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_UTF8ONLY), u8) == 0);
if (sense == ZFS_PROP_UNDEFINED &&
(error = zfs_get_zplprop(os, ZFS_PROP_CASE, &sense)) != 0)
return (error);
VERIFY(nvlist_add_uint64(zplprops,
zfs_prop_to_name(ZFS_PROP_CASE), sense) == 0);
if (is_ci)
*is_ci = (sense == ZFS_CASE_INSENSITIVE);
return (0);
}
static int
zfs_fill_zplprops(const char *dataset, nvlist_t *createprops,
nvlist_t *zplprops, boolean_t *is_ci)
{
boolean_t fuids_ok, sa_ok;
uint64_t zplver = ZPL_VERSION;
objset_t *os = NULL;
char parentname[ZFS_MAX_DATASET_NAME_LEN];
char *cp;
spa_t *spa;
uint64_t spa_vers;
int error;
(void) strlcpy(parentname, dataset, sizeof (parentname));
cp = strrchr(parentname, '/');
ASSERT(cp != NULL);
cp[0] = '\0';
if ((error = spa_open(dataset, &spa, FTAG)) != 0)
return (error);
spa_vers = spa_version(spa);
spa_close(spa, FTAG);
zplver = zfs_zpl_version_map(spa_vers);
fuids_ok = (zplver >= ZPL_VERSION_FUID);
sa_ok = (zplver >= ZPL_VERSION_SA);
/*
* Open parent object set so we can inherit zplprop values.
*/
if ((error = dmu_objset_hold(parentname, FTAG, &os)) != 0)
return (error);
error = zfs_fill_zplprops_impl(os, zplver, fuids_ok, sa_ok, createprops,
zplprops, is_ci);
dmu_objset_rele(os, FTAG);
return (error);
}
static int
zfs_fill_zplprops_root(uint64_t spa_vers, nvlist_t *createprops,
nvlist_t *zplprops, boolean_t *is_ci)
{
boolean_t fuids_ok;
boolean_t sa_ok;
uint64_t zplver = ZPL_VERSION;
int error;
zplver = zfs_zpl_version_map(spa_vers);
fuids_ok = (zplver >= ZPL_VERSION_FUID);
sa_ok = (zplver >= ZPL_VERSION_SA);
error = zfs_fill_zplprops_impl(NULL, zplver, fuids_ok, sa_ok,
createprops, zplprops, is_ci);
return (error);
}
/*
* innvl: {
* "type" -> dmu_objset_type_t (int32)
* (optional) "props" -> { prop -> value }
* (optional) "hidden_args" -> { "wkeydata" -> value }
* raw uint8_t array of encryption wrapping key data (32 bytes)
* }
*
* outnvl: propname -> error code (int32)
*/
static int
zfs_ioc_create(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
int error = 0;
zfs_creat_t zct = { 0 };
nvlist_t *nvprops = NULL;
nvlist_t *hidden_args = NULL;
void (*cbfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx);
int32_t type32;
dmu_objset_type_t type;
boolean_t is_insensitive = B_FALSE;
dsl_crypto_params_t *dcp = NULL;
if (nvlist_lookup_int32(innvl, "type", &type32) != 0)
return (SET_ERROR(EINVAL));
type = type32;
(void) nvlist_lookup_nvlist(innvl, "props", &nvprops);
(void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
switch (type) {
case DMU_OST_ZFS:
cbfunc = zfs_create_cb;
break;
case DMU_OST_ZVOL:
cbfunc = zvol_create_cb;
break;
default:
cbfunc = NULL;
break;
}
if (strchr(fsname, '@') ||
strchr(fsname, '%'))
return (SET_ERROR(EINVAL));
zct.zct_props = nvprops;
if (cbfunc == NULL)
return (SET_ERROR(EINVAL));
if (type == DMU_OST_ZVOL) {
uint64_t volsize, volblocksize;
if (nvprops == NULL)
return (SET_ERROR(EINVAL));
if (nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLSIZE), &volsize) != 0)
return (SET_ERROR(EINVAL));
if ((error = nvlist_lookup_uint64(nvprops,
zfs_prop_to_name(ZFS_PROP_VOLBLOCKSIZE),
&volblocksize)) != 0 && error != ENOENT)
return (SET_ERROR(EINVAL));
if (error != 0)
volblocksize = zfs_prop_default_numeric(
ZFS_PROP_VOLBLOCKSIZE);
if ((error = zvol_check_volblocksize(fsname,
volblocksize)) != 0 ||
(error = zvol_check_volsize(volsize,
volblocksize)) != 0)
return (error);
} else if (type == DMU_OST_ZFS) {
int error;
/*
* We have to have normalization and
* case-folding flags correct when we do the
* file system creation, so go figure them out
* now.
*/
VERIFY(nvlist_alloc(&zct.zct_zplprops,
NV_UNIQUE_NAME, KM_SLEEP) == 0);
error = zfs_fill_zplprops(fsname, nvprops,
zct.zct_zplprops, &is_insensitive);
if (error != 0) {
nvlist_free(zct.zct_zplprops);
return (error);
}
}
error = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, nvprops,
hidden_args, &dcp);
if (error != 0) {
nvlist_free(zct.zct_zplprops);
return (error);
}
error = dmu_objset_create(fsname, type,
is_insensitive ? DS_FLAG_CI_DATASET : 0, dcp, cbfunc, &zct);
nvlist_free(zct.zct_zplprops);
dsl_crypto_params_free(dcp, !!error);
/*
* It would be nice to do this atomically.
*/
if (error == 0) {
error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL,
nvprops, outnvl);
if (error != 0) {
spa_t *spa;
int error2;
/*
* Volumes will return EBUSY and cannot be destroyed
* until all asynchronous minor handling has completed.
* Wait for the spa_zvol_taskq to drain then retry.
*/
error2 = dsl_destroy_head(fsname);
while ((error2 == EBUSY) && (type == DMU_OST_ZVOL)) {
error2 = spa_open(fsname, &spa, FTAG);
if (error2 == 0) {
taskq_wait(spa->spa_zvol_taskq);
spa_close(spa, FTAG);
}
error2 = dsl_destroy_head(fsname);
}
}
}
return (error);
}
/*
* innvl: {
* "origin" -> name of origin snapshot
* (optional) "props" -> { prop -> value }
* (optional) "hidden_args" -> { "wkeydata" -> value }
* raw uint8_t array of encryption wrapping key data (32 bytes)
* }
*
* outputs:
* outnvl: propname -> error code (int32)
*/
static int
zfs_ioc_clone(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
int error = 0;
nvlist_t *nvprops = NULL;
char *origin_name;
if (nvlist_lookup_string(innvl, "origin", &origin_name) != 0)
return (SET_ERROR(EINVAL));
(void) nvlist_lookup_nvlist(innvl, "props", &nvprops);
if (strchr(fsname, '@') ||
strchr(fsname, '%'))
return (SET_ERROR(EINVAL));
if (dataset_namecheck(origin_name, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
error = dmu_objset_clone(fsname, origin_name);
/*
* It would be nice to do this atomically.
*/
if (error == 0) {
error = zfs_set_prop_nvlist(fsname, ZPROP_SRC_LOCAL,
nvprops, outnvl);
if (error != 0)
(void) dsl_destroy_head(fsname);
}
return (error);
}
+/* ARGSUSED */
+static int
+zfs_ioc_remap(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
+{
+ if (strchr(fsname, '@') ||
+ strchr(fsname, '%'))
+ return (SET_ERROR(EINVAL));
+
+ return (dmu_objset_remap_indirects(fsname));
+}
+
/*
* innvl: {
* "snaps" -> { snapshot1, snapshot2 }
* (optional) "props" -> { prop -> value (string) }
* }
*
* outnvl: snapshot -> error code (int32)
*/
static int
zfs_ioc_snapshot(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
nvlist_t *snaps;
nvlist_t *props = NULL;
int error, poollen;
nvpair_t *pair;
(void) nvlist_lookup_nvlist(innvl, "props", &props);
if ((error = zfs_check_userprops(poolname, props)) != 0)
return (error);
if (!nvlist_empty(props) &&
zfs_earlier_version(poolname, SPA_VERSION_SNAP_PROPS))
return (SET_ERROR(ENOTSUP));
if (nvlist_lookup_nvlist(innvl, "snaps", &snaps) != 0)
return (SET_ERROR(EINVAL));
poollen = strlen(poolname);
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
const char *name = nvpair_name(pair);
const char *cp = strchr(name, '@');
/*
* The snap name must contain an @, and the part after it must
* contain only valid characters.
*/
if (cp == NULL ||
zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
/*
* The snap must be in the specified pool.
*/
if (strncmp(name, poolname, poollen) != 0 ||
(name[poollen] != '/' && name[poollen] != '@'))
return (SET_ERROR(EXDEV));
/* This must be the only snap of this fs. */
for (nvpair_t *pair2 = nvlist_next_nvpair(snaps, pair);
pair2 != NULL; pair2 = nvlist_next_nvpair(snaps, pair2)) {
if (strncmp(name, nvpair_name(pair2), cp - name + 1)
== 0) {
return (SET_ERROR(EXDEV));
}
}
}
error = dsl_dataset_snapshot(snaps, props, outnvl);
return (error);
}
/*
* innvl: "message" -> string
*/
/* ARGSUSED */
static int
zfs_ioc_log_history(const char *unused, nvlist_t *innvl, nvlist_t *outnvl)
{
char *message;
spa_t *spa;
int error;
char *poolname;
/*
* The poolname in the ioctl is not set, we get it from the TSD,
* which was set at the end of the last successful ioctl that allows
* logging. The secpolicy func already checked that it is set.
* Only one log ioctl is allowed after each successful ioctl, so
* we clear the TSD here.
*/
poolname = tsd_get(zfs_allow_log_key);
if (poolname == NULL)
return (SET_ERROR(EINVAL));
(void) tsd_set(zfs_allow_log_key, NULL);
error = spa_open(poolname, &spa, FTAG);
strfree(poolname);
if (error != 0)
return (error);
if (nvlist_lookup_string(innvl, "message", &message) != 0) {
spa_close(spa, FTAG);
return (SET_ERROR(EINVAL));
}
if (spa_version(spa) < SPA_VERSION_ZPOOL_HISTORY) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
error = spa_history_log(spa, message);
spa_close(spa, FTAG);
return (error);
}
/*
* The dp_config_rwlock must not be held when calling this, because the
* unmount may need to write out data.
*
* This function is best-effort. Callers must deal gracefully if it
* remains mounted (or is remounted after this call).
*
* Returns 0 if the argument is not a snapshot, or it is not currently a
* filesystem, or we were able to unmount it. Returns error code otherwise.
*/
void
zfs_unmount_snap(const char *snapname)
{
if (strchr(snapname, '@') == NULL)
return;
(void) zfsctl_snapshot_unmount((char *)snapname, MNT_FORCE);
}
/* ARGSUSED */
static int
zfs_unmount_snap_cb(const char *snapname, void *arg)
{
zfs_unmount_snap(snapname);
return (0);
}
/*
* When a clone is destroyed, its origin may also need to be destroyed,
* in which case it must be unmounted. This routine will do that unmount
* if necessary.
*/
void
zfs_destroy_unmount_origin(const char *fsname)
{
int error;
objset_t *os;
dsl_dataset_t *ds;
error = dmu_objset_hold(fsname, FTAG, &os);
if (error != 0)
return;
ds = dmu_objset_ds(os);
if (dsl_dir_is_clone(ds->ds_dir) && DS_IS_DEFER_DESTROY(ds->ds_prev)) {
char originname[ZFS_MAX_DATASET_NAME_LEN];
dsl_dataset_name(ds->ds_prev, originname);
dmu_objset_rele(os, FTAG);
zfs_unmount_snap(originname);
} else {
dmu_objset_rele(os, FTAG);
}
}
/*
* innvl: {
* "snaps" -> { snapshot1, snapshot2 }
* (optional boolean) "defer"
* }
*
* outnvl: snapshot -> error code (int32)
*/
/* ARGSUSED */
static int
zfs_ioc_destroy_snaps(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
nvlist_t *snaps;
nvpair_t *pair;
boolean_t defer;
if (nvlist_lookup_nvlist(innvl, "snaps", &snaps) != 0)
return (SET_ERROR(EINVAL));
defer = nvlist_exists(innvl, "defer");
for (pair = nvlist_next_nvpair(snaps, NULL); pair != NULL;
pair = nvlist_next_nvpair(snaps, pair)) {
zfs_unmount_snap(nvpair_name(pair));
}
return (dsl_destroy_snapshots_nvl(snaps, defer, outnvl));
}
/*
* Create bookmarks. Bookmark names are of the form <fs>#<bmark>.
* All bookmarks must be in the same pool.
*
* innvl: {
* bookmark1 -> snapshot1, bookmark2 -> snapshot2
* }
*
* outnvl: bookmark -> error code (int32)
*
*/
/* ARGSUSED */
static int
zfs_ioc_bookmark(const char *poolname, nvlist_t *innvl, nvlist_t *outnvl)
{
for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
char *snap_name;
/*
* Verify the snapshot argument.
*/
if (nvpair_value_string(pair, &snap_name) != 0)
return (SET_ERROR(EINVAL));
/* Verify that the keys (bookmarks) are unique */
for (nvpair_t *pair2 = nvlist_next_nvpair(innvl, pair);
pair2 != NULL; pair2 = nvlist_next_nvpair(innvl, pair2)) {
if (strcmp(nvpair_name(pair), nvpair_name(pair2)) == 0)
return (SET_ERROR(EINVAL));
}
}
return (dsl_bookmark_create(innvl, outnvl));
}
/*
* innvl: {
* property 1, property 2, ...
* }
*
* outnvl: {
* bookmark name 1 -> { property 1, property 2, ... },
* bookmark name 2 -> { property 1, property 2, ... }
* }
*
*/
static int
zfs_ioc_get_bookmarks(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
return (dsl_get_bookmarks(fsname, innvl, outnvl));
}
/*
* innvl: {
* bookmark name 1, bookmark name 2
* }
*
* outnvl: bookmark -> error code (int32)
*
*/
static int
zfs_ioc_destroy_bookmarks(const char *poolname, nvlist_t *innvl,
nvlist_t *outnvl)
{
int error, poollen;
poollen = strlen(poolname);
for (nvpair_t *pair = nvlist_next_nvpair(innvl, NULL);
pair != NULL; pair = nvlist_next_nvpair(innvl, pair)) {
const char *name = nvpair_name(pair);
const char *cp = strchr(name, '#');
/*
* The bookmark name must contain an #, and the part after it
* must contain only valid characters.
*/
if (cp == NULL ||
zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
/*
* The bookmark must be in the specified pool.
*/
if (strncmp(name, poolname, poollen) != 0 ||
(name[poollen] != '/' && name[poollen] != '#'))
return (SET_ERROR(EXDEV));
}
error = dsl_bookmark_destroy(innvl, outnvl);
return (error);
}
static int
zfs_ioc_channel_program(const char *poolname, nvlist_t *innvl,
nvlist_t *outnvl)
{
char *program;
uint64_t instrlimit, memlimit;
boolean_t sync_flag;
nvpair_t *nvarg = NULL;
if (0 != nvlist_lookup_string(innvl, ZCP_ARG_PROGRAM, &program)) {
return (EINVAL);
}
if (0 != nvlist_lookup_boolean_value(innvl, ZCP_ARG_SYNC, &sync_flag)) {
sync_flag = B_TRUE;
}
if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_INSTRLIMIT, &instrlimit)) {
instrlimit = ZCP_DEFAULT_INSTRLIMIT;
}
if (0 != nvlist_lookup_uint64(innvl, ZCP_ARG_MEMLIMIT, &memlimit)) {
memlimit = ZCP_DEFAULT_MEMLIMIT;
}
if (0 != nvlist_lookup_nvpair(innvl, ZCP_ARG_ARGLIST, &nvarg)) {
return (EINVAL);
}
if (instrlimit == 0 || instrlimit > zfs_lua_max_instrlimit)
return (EINVAL);
if (memlimit == 0 || memlimit > zfs_lua_max_memlimit)
return (EINVAL);
return (zcp_eval(poolname, program, sync_flag, instrlimit, memlimit,
nvarg, outnvl));
}
/*
* inputs:
* zc_name name of dataset to destroy
* zc_objset_type type of objset
* zc_defer_destroy mark for deferred destroy
*
* outputs: none
*/
static int
zfs_ioc_destroy(zfs_cmd_t *zc)
{
int err;
if (zc->zc_objset_type == DMU_OST_ZFS)
zfs_unmount_snap(zc->zc_name);
if (strchr(zc->zc_name, '@')) {
err = dsl_destroy_snapshot(zc->zc_name, zc->zc_defer_destroy);
} else {
err = dsl_destroy_head(zc->zc_name);
if (err == EEXIST) {
/*
* It is possible that the given DS may have
* hidden child (%recv) datasets - "leftovers"
* resulting from the previously interrupted
* 'zfs receive'.
*
* 6 extra bytes for /%recv
*/
char namebuf[ZFS_MAX_DATASET_NAME_LEN + 6];
if (snprintf(namebuf, sizeof (namebuf), "%s/%s",
zc->zc_name, recv_clone_name) >=
sizeof (namebuf))
return (SET_ERROR(EINVAL));
/*
* Try to remove the hidden child (%recv) and after
* that try to remove the target dataset.
* If the hidden child (%recv) does not exist
* the original error (EEXIST) will be returned
*/
err = dsl_destroy_head(namebuf);
if (err == 0)
err = dsl_destroy_head(zc->zc_name);
else if (err == ENOENT)
err = SET_ERROR(EEXIST);
}
}
return (err);
}
/*
* fsname is name of dataset to rollback (to most recent snapshot)
*
* innvl may contain name of expected target snapshot
*
* outnvl: "target" -> name of most recent snapshot
* }
*/
/* ARGSUSED */
static int
zfs_ioc_rollback(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
zfsvfs_t *zfsvfs;
zvol_state_t *zv;
char *target = NULL;
int error;
(void) nvlist_lookup_string(innvl, "target", &target);
if (target != NULL) {
const char *cp = strchr(target, '@');
/*
* The snap name must contain an @, and the part after it must
* contain only valid characters.
*/
if (cp == NULL ||
zfs_component_namecheck(cp + 1, NULL, NULL) != 0)
return (SET_ERROR(EINVAL));
}
if (getzfsvfs(fsname, &zfsvfs) == 0) {
dsl_dataset_t *ds;
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
if (error == 0) {
int resume_err;
error = dsl_dataset_rollback(fsname, target, zfsvfs,
outnvl);
resume_err = zfs_resume_fs(zfsvfs, ds);
error = error ? error : resume_err;
}
deactivate_super(zfsvfs->z_sb);
} else if ((zv = zvol_suspend(fsname)) != NULL) {
error = dsl_dataset_rollback(fsname, target, zvol_tag(zv),
outnvl);
zvol_resume(zv);
} else {
error = dsl_dataset_rollback(fsname, target, NULL, outnvl);
}
return (error);
}
static int
recursive_unmount(const char *fsname, void *arg)
{
const char *snapname = arg;
char *fullname;
fullname = kmem_asprintf("%s@%s", fsname, snapname);
zfs_unmount_snap(fullname);
strfree(fullname);
return (0);
}
/*
* inputs:
* zc_name old name of dataset
* zc_value new name of dataset
* zc_cookie recursive flag (only valid for snapshots)
*
* outputs: none
*/
static int
zfs_ioc_rename(zfs_cmd_t *zc)
{
boolean_t recursive = zc->zc_cookie & 1;
char *at;
/* "zfs rename" from and to ...%recv datasets should both fail */
zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
zc->zc_value[sizeof (zc->zc_value) - 1] = '\0';
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 ||
dataset_namecheck(zc->zc_value, NULL, NULL) != 0 ||
strchr(zc->zc_name, '%') || strchr(zc->zc_value, '%'))
return (SET_ERROR(EINVAL));
at = strchr(zc->zc_name, '@');
if (at != NULL) {
/* snaps must be in same fs */
int error;
if (strncmp(zc->zc_name, zc->zc_value, at - zc->zc_name + 1))
return (SET_ERROR(EXDEV));
*at = '\0';
if (zc->zc_objset_type == DMU_OST_ZFS) {
error = dmu_objset_find(zc->zc_name,
recursive_unmount, at + 1,
recursive ? DS_FIND_CHILDREN : 0);
if (error != 0) {
*at = '@';
return (error);
}
}
error = dsl_dataset_rename_snapshot(zc->zc_name,
at + 1, strchr(zc->zc_value, '@') + 1, recursive);
*at = '@';
return (error);
} else {
return (dsl_dir_rename(zc->zc_name, zc->zc_value));
}
}
static int
zfs_check_settable(const char *dsname, nvpair_t *pair, cred_t *cr)
{
const char *propname = nvpair_name(pair);
boolean_t issnap = (strchr(dsname, '@') != NULL);
zfs_prop_t prop = zfs_name_to_prop(propname);
uint64_t intval;
int err;
if (prop == ZPROP_INVAL) {
if (zfs_prop_user(propname)) {
if ((err = zfs_secpolicy_write_perms(dsname,
ZFS_DELEG_PERM_USERPROP, cr)))
return (err);
return (0);
}
if (!issnap && zfs_prop_userquota(propname)) {
const char *perm = NULL;
const char *uq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_USERQUOTA];
const char *gq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPQUOTA];
const char *uiq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_USEROBJQUOTA];
const char *giq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_GROUPOBJQUOTA];
const char *pq_prefix =
zfs_userquota_prop_prefixes[ZFS_PROP_PROJECTQUOTA];
const char *piq_prefix = zfs_userquota_prop_prefixes[\
ZFS_PROP_PROJECTOBJQUOTA];
if (strncmp(propname, uq_prefix,
strlen(uq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_USERQUOTA;
} else if (strncmp(propname, uiq_prefix,
strlen(uiq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_USEROBJQUOTA;
} else if (strncmp(propname, gq_prefix,
strlen(gq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_GROUPQUOTA;
} else if (strncmp(propname, giq_prefix,
strlen(giq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_GROUPOBJQUOTA;
} else if (strncmp(propname, pq_prefix,
strlen(pq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_PROJECTQUOTA;
} else if (strncmp(propname, piq_prefix,
strlen(piq_prefix)) == 0) {
perm = ZFS_DELEG_PERM_PROJECTOBJQUOTA;
} else {
/* {USER|GROUP|PROJECT}USED are read-only */
return (SET_ERROR(EINVAL));
}
if ((err = zfs_secpolicy_write_perms(dsname, perm, cr)))
return (err);
return (0);
}
return (SET_ERROR(EINVAL));
}
if (issnap)
return (SET_ERROR(EINVAL));
if (nvpair_type(pair) == DATA_TYPE_NVLIST) {
/*
* dsl_prop_get_all_impl() returns properties in this
* format.
*/
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(pair, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&pair) == 0);
}
/*
* Check that this value is valid for this pool version
*/
switch (prop) {
case ZFS_PROP_COMPRESSION:
/*
* If the user specified gzip compression, make sure
* the SPA supports it. We ignore any errors here since
* we'll catch them later.
*/
if (nvpair_value_uint64(pair, &intval) == 0) {
if (intval >= ZIO_COMPRESS_GZIP_1 &&
intval <= ZIO_COMPRESS_GZIP_9 &&
zfs_earlier_version(dsname,
SPA_VERSION_GZIP_COMPRESSION)) {
return (SET_ERROR(ENOTSUP));
}
if (intval == ZIO_COMPRESS_ZLE &&
zfs_earlier_version(dsname,
SPA_VERSION_ZLE_COMPRESSION))
return (SET_ERROR(ENOTSUP));
if (intval == ZIO_COMPRESS_LZ4) {
spa_t *spa;
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa,
SPA_FEATURE_LZ4_COMPRESS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
/*
* If this is a bootable dataset then
* verify that the compression algorithm
* is supported for booting. We must return
* something other than ENOTSUP since it
* implies a downrev pool version.
*/
if (zfs_is_bootfs(dsname) &&
!BOOTFS_COMPRESS_VALID(intval)) {
return (SET_ERROR(ERANGE));
}
}
break;
case ZFS_PROP_COPIES:
if (zfs_earlier_version(dsname, SPA_VERSION_DITTO_BLOCKS))
return (SET_ERROR(ENOTSUP));
break;
case ZFS_PROP_VOLBLOCKSIZE:
case ZFS_PROP_RECORDSIZE:
/* Record sizes above 128k need the feature to be enabled */
if (nvpair_value_uint64(pair, &intval) == 0 &&
intval > SPA_OLD_MAXBLOCKSIZE) {
spa_t *spa;
/*
* We don't allow setting the property above 1MB,
* unless the tunable has been changed.
*/
if (intval > zfs_max_recordsize ||
intval > SPA_MAXBLOCKSIZE)
return (SET_ERROR(ERANGE));
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa,
SPA_FEATURE_LARGE_BLOCKS)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
break;
case ZFS_PROP_DNODESIZE:
/* Dnode sizes above 512 need the feature to be enabled */
if (nvpair_value_uint64(pair, &intval) == 0 &&
intval != ZFS_DNSIZE_LEGACY) {
spa_t *spa;
/*
* If this is a bootable dataset then
* we don't allow large (>512B) dnodes,
* because GRUB doesn't support them.
*/
if (zfs_is_bootfs(dsname) &&
intval != ZFS_DNSIZE_LEGACY) {
return (SET_ERROR(EDOM));
}
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
if (!spa_feature_is_enabled(spa,
SPA_FEATURE_LARGE_DNODE)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
}
break;
case ZFS_PROP_SHARESMB:
if (zpl_earlier_version(dsname, ZPL_VERSION_FUID))
return (SET_ERROR(ENOTSUP));
break;
case ZFS_PROP_ACLINHERIT:
if (nvpair_type(pair) == DATA_TYPE_UINT64 &&
nvpair_value_uint64(pair, &intval) == 0) {
if (intval == ZFS_ACL_PASSTHROUGH_X &&
zfs_earlier_version(dsname,
SPA_VERSION_PASSTHROUGH_X))
return (SET_ERROR(ENOTSUP));
}
break;
case ZFS_PROP_CHECKSUM:
case ZFS_PROP_DEDUP:
{
spa_feature_t feature;
spa_t *spa;
uint64_t intval;
int err;
/* dedup feature version checks */
if (prop == ZFS_PROP_DEDUP &&
zfs_earlier_version(dsname, SPA_VERSION_DEDUP))
return (SET_ERROR(ENOTSUP));
if (nvpair_value_uint64(pair, &intval) != 0)
return (SET_ERROR(EINVAL));
/* check prop value is enabled in features */
feature = zio_checksum_to_feature(intval & ZIO_CHECKSUM_MASK);
if (feature == SPA_FEATURE_NONE)
break;
if ((err = spa_open(dsname, &spa, FTAG)) != 0)
return (err);
/*
* Salted checksums are not supported on root pools.
*/
if (spa_bootfs(spa) != 0 &&
intval < ZIO_CHECKSUM_FUNCTIONS &&
(zio_checksum_table[intval].ci_flags &
ZCHECKSUM_FLAG_SALTED)) {
spa_close(spa, FTAG);
return (SET_ERROR(ERANGE));
}
if (!spa_feature_is_enabled(spa, feature)) {
spa_close(spa, FTAG);
return (SET_ERROR(ENOTSUP));
}
spa_close(spa, FTAG);
break;
}
default:
break;
}
return (zfs_secpolicy_setprop(dsname, prop, pair, CRED()));
}
/*
* Removes properties from the given props list that fail permission checks
* needed to clear them and to restore them in case of a receive error. For each
* property, make sure we have both set and inherit permissions.
*
* Returns the first error encountered if any permission checks fail. If the
* caller provides a non-NULL errlist, it also gives the complete list of names
* of all the properties that failed a permission check along with the
* corresponding error numbers. The caller is responsible for freeing the
* returned errlist.
*
* If every property checks out successfully, zero is returned and the list
* pointed at by errlist is NULL.
*/
static int
zfs_check_clearable(char *dataset, nvlist_t *props, nvlist_t **errlist)
{
zfs_cmd_t *zc;
nvpair_t *pair, *next_pair;
nvlist_t *errors;
int err, rv = 0;
if (props == NULL)
return (0);
VERIFY(nvlist_alloc(&errors, NV_UNIQUE_NAME, KM_SLEEP) == 0);
zc = kmem_alloc(sizeof (zfs_cmd_t), KM_SLEEP);
(void) strlcpy(zc->zc_name, dataset, sizeof (zc->zc_name));
pair = nvlist_next_nvpair(props, NULL);
while (pair != NULL) {
next_pair = nvlist_next_nvpair(props, pair);
(void) strlcpy(zc->zc_value, nvpair_name(pair),
sizeof (zc->zc_value));
if ((err = zfs_check_settable(dataset, pair, CRED())) != 0 ||
(err = zfs_secpolicy_inherit_prop(zc, NULL, CRED())) != 0) {
VERIFY(nvlist_remove_nvpair(props, pair) == 0);
VERIFY(nvlist_add_int32(errors,
zc->zc_value, err) == 0);
}
pair = next_pair;
}
kmem_free(zc, sizeof (zfs_cmd_t));
if ((pair = nvlist_next_nvpair(errors, NULL)) == NULL) {
nvlist_free(errors);
errors = NULL;
} else {
VERIFY(nvpair_value_int32(pair, &rv) == 0);
}
if (errlist == NULL)
nvlist_free(errors);
else
*errlist = errors;
return (rv);
}
static boolean_t
propval_equals(nvpair_t *p1, nvpair_t *p2)
{
if (nvpair_type(p1) == DATA_TYPE_NVLIST) {
/* dsl_prop_get_all_impl() format */
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(p1, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&p1) == 0);
}
if (nvpair_type(p2) == DATA_TYPE_NVLIST) {
nvlist_t *attrs;
VERIFY(nvpair_value_nvlist(p2, &attrs) == 0);
VERIFY(nvlist_lookup_nvpair(attrs, ZPROP_VALUE,
&p2) == 0);
}
if (nvpair_type(p1) != nvpair_type(p2))
return (B_FALSE);
if (nvpair_type(p1) == DATA_TYPE_STRING) {
char *valstr1, *valstr2;
VERIFY(nvpair_value_string(p1, (char **)&valstr1) == 0);
VERIFY(nvpair_value_string(p2, (char **)&valstr2) == 0);
return (strcmp(valstr1, valstr2) == 0);
} else {
uint64_t intval1, intval2;
VERIFY(nvpair_value_uint64(p1, &intval1) == 0);
VERIFY(nvpair_value_uint64(p2, &intval2) == 0);
return (intval1 == intval2);
}
}
/*
* Remove properties from props if they are not going to change (as determined
* by comparison with origprops). Remove them from origprops as well, since we
* do not need to clear or restore properties that won't change.
*/
static void
props_reduce(nvlist_t *props, nvlist_t *origprops)
{
nvpair_t *pair, *next_pair;
if (origprops == NULL)
return; /* all props need to be received */
pair = nvlist_next_nvpair(props, NULL);
while (pair != NULL) {
const char *propname = nvpair_name(pair);
nvpair_t *match;
next_pair = nvlist_next_nvpair(props, pair);
if ((nvlist_lookup_nvpair(origprops, propname,
&match) != 0) || !propval_equals(pair, match))
goto next; /* need to set received value */
/* don't clear the existing received value */
(void) nvlist_remove_nvpair(origprops, match);
/* don't bother receiving the property */
(void) nvlist_remove_nvpair(props, pair);
next:
pair = next_pair;
}
}
/*
* Extract properties that cannot be set PRIOR to the receipt of a dataset.
* For example, refquota cannot be set until after the receipt of a dataset,
* because in replication streams, an older/earlier snapshot may exceed the
* refquota. We want to receive the older/earlier snapshot, but setting
* refquota pre-receipt will set the dsl's ACTUAL quota, which will prevent
* the older/earlier snapshot from being received (with EDQUOT).
*
* The ZFS test "zfs_receive_011_pos" demonstrates such a scenario.
*
* libzfs will need to be judicious handling errors encountered by props
* extracted by this function.
*/
static nvlist_t *
extract_delay_props(nvlist_t *props)
{
nvlist_t *delayprops;
nvpair_t *nvp, *tmp;
static const zfs_prop_t delayable[] = {
ZFS_PROP_REFQUOTA,
ZFS_PROP_KEYLOCATION,
0
};
int i;
VERIFY(nvlist_alloc(&delayprops, NV_UNIQUE_NAME, KM_SLEEP) == 0);
for (nvp = nvlist_next_nvpair(props, NULL); nvp != NULL;
nvp = nvlist_next_nvpair(props, nvp)) {
/*
* strcmp() is safe because zfs_prop_to_name() always returns
* a bounded string.
*/
for (i = 0; delayable[i] != 0; i++) {
if (strcmp(zfs_prop_to_name(delayable[i]),
nvpair_name(nvp)) == 0) {
break;
}
}
if (delayable[i] != 0) {
tmp = nvlist_prev_nvpair(props, nvp);
VERIFY(nvlist_add_nvpair(delayprops, nvp) == 0);
VERIFY(nvlist_remove_nvpair(props, nvp) == 0);
nvp = tmp;
}
}
if (nvlist_empty(delayprops)) {
nvlist_free(delayprops);
delayprops = NULL;
}
return (delayprops);
}
#ifdef DEBUG
static boolean_t zfs_ioc_recv_inject_err;
#endif
/*
* nvlist 'errors' is always allocated. It will contain descriptions of
* encountered errors, if any. It's the callers responsibility to free.
*/
static int
zfs_ioc_recv_impl(char *tofs, char *tosnap, char *origin, nvlist_t *recvprops,
nvlist_t *localprops, boolean_t force, boolean_t resumable, int input_fd,
dmu_replay_record_t *begin_record, int cleanup_fd, uint64_t *read_bytes,
uint64_t *errflags, uint64_t *action_handle, nvlist_t **errors)
{
dmu_recv_cookie_t drc;
int error = 0;
int props_error = 0;
offset_t off;
nvlist_t *delayprops = NULL; /* sent properties applied post-receive */
nvlist_t *origprops = NULL; /* existing properties */
nvlist_t *origrecvd = NULL; /* existing received properties */
boolean_t first_recvd_props = B_FALSE;
file_t *input_fp;
*read_bytes = 0;
*errflags = 0;
*errors = fnvlist_alloc();
input_fp = getf(input_fd);
if (input_fp == NULL)
return (SET_ERROR(EBADF));
error = dmu_recv_begin(tofs, tosnap,
begin_record, force, resumable, origin, &drc);
if (error != 0)
goto out;
/*
* Set properties before we receive the stream so that they are applied
* to the new data. Note that we must call dmu_recv_stream() if
* dmu_recv_begin() succeeds.
*/
if (recvprops != NULL && !drc.drc_newfs) {
if (spa_version(dsl_dataset_get_spa(drc.drc_ds)) >=
SPA_VERSION_RECVD_PROPS &&
!dsl_prop_get_hasrecvd(tofs))
first_recvd_props = B_TRUE;
/*
* If new received properties are supplied, they are to
* completely replace the existing received properties, so stash
* away the existing ones.
*/
if (dsl_prop_get_received(tofs, &origrecvd) == 0) {
nvlist_t *errlist = NULL;
/*
* Don't bother writing a property if its value won't
* change (and avoid the unnecessary security checks).
*
* The first receive after SPA_VERSION_RECVD_PROPS is a
* special case where we blow away all local properties
* regardless.
*/
if (!first_recvd_props)
props_reduce(recvprops, origrecvd);
if (zfs_check_clearable(tofs, origrecvd, &errlist) != 0)
(void) nvlist_merge(*errors, errlist, 0);
nvlist_free(errlist);
if (clear_received_props(tofs, origrecvd,
first_recvd_props ? NULL : recvprops) != 0)
*errflags |= ZPROP_ERR_NOCLEAR;
} else {
*errflags |= ZPROP_ERR_NOCLEAR;
}
}
/*
* Stash away existing properties so we can restore them on error unless
* we're doing the first receive after SPA_VERSION_RECVD_PROPS, in which
* case "origrecvd" will take care of that.
*/
if (localprops != NULL && !drc.drc_newfs && !first_recvd_props) {
objset_t *os;
if (dmu_objset_hold(tofs, FTAG, &os) == 0) {
if (dsl_prop_get_all(os, &origprops) != 0) {
*errflags |= ZPROP_ERR_NOCLEAR;
}
dmu_objset_rele(os, FTAG);
} else {
*errflags |= ZPROP_ERR_NOCLEAR;
}
}
if (recvprops != NULL) {
props_error = dsl_prop_set_hasrecvd(tofs);
if (props_error == 0) {
delayprops = extract_delay_props(recvprops);
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED,
recvprops, *errors);
}
}
if (localprops != NULL) {
nvlist_t *oprops = fnvlist_alloc();
nvlist_t *xprops = fnvlist_alloc();
nvpair_t *nvp = NULL;
while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) {
if (nvpair_type(nvp) == DATA_TYPE_BOOLEAN) {
/* -x property */
const char *name = nvpair_name(nvp);
zfs_prop_t prop = zfs_name_to_prop(name);
if (prop != ZPROP_INVAL) {
if (!zfs_prop_inheritable(prop))
continue;
} else if (!zfs_prop_user(name))
continue;
fnvlist_add_boolean(xprops, name);
} else {
/* -o property=value */
fnvlist_add_nvpair(oprops, nvp);
}
}
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL,
oprops, *errors);
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED,
xprops, *errors);
nvlist_free(oprops);
nvlist_free(xprops);
}
off = input_fp->f_offset;
error = dmu_recv_stream(&drc, input_fp->f_vnode, &off, cleanup_fd,
action_handle);
if (error == 0) {
zfsvfs_t *zfsvfs = NULL;
zvol_state_t *zv = NULL;
if (getzfsvfs(tofs, &zfsvfs) == 0) {
/* online recv */
dsl_dataset_t *ds;
int end_err;
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
/*
* If the suspend fails, then the recv_end will
* likely also fail, and clean up after itself.
*/
end_err = dmu_recv_end(&drc, zfsvfs);
if (error == 0)
error = zfs_resume_fs(zfsvfs, ds);
error = error ? error : end_err;
deactivate_super(zfsvfs->z_sb);
} else if ((zv = zvol_suspend(tofs)) != NULL) {
error = dmu_recv_end(&drc, zvol_tag(zv));
zvol_resume(zv);
} else {
error = dmu_recv_end(&drc, NULL);
}
/* Set delayed properties now, after we're done receiving. */
if (delayprops != NULL && error == 0) {
(void) zfs_set_prop_nvlist(tofs, ZPROP_SRC_RECEIVED,
delayprops, *errors);
}
}
if (delayprops != NULL) {
/*
* Merge delayed props back in with initial props, in case
* we're DEBUG and zfs_ioc_recv_inject_err is set (which means
* we have to make sure clear_received_props() includes
* the delayed properties).
*
* Since zfs_ioc_recv_inject_err is only in DEBUG kernels,
* using ASSERT() will be just like a VERIFY.
*/
ASSERT(nvlist_merge(recvprops, delayprops, 0) == 0);
nvlist_free(delayprops);
}
*read_bytes = off - input_fp->f_offset;
if (VOP_SEEK(input_fp->f_vnode, input_fp->f_offset, &off, NULL) == 0)
input_fp->f_offset = off;
#ifdef DEBUG
if (zfs_ioc_recv_inject_err) {
zfs_ioc_recv_inject_err = B_FALSE;
error = 1;
}
#endif
/*
* On error, restore the original props.
*/
if (error != 0 && recvprops != NULL && !drc.drc_newfs) {
if (clear_received_props(tofs, recvprops, NULL) != 0) {
/*
* We failed to clear the received properties.
* Since we may have left a $recvd value on the
* system, we can't clear the $hasrecvd flag.
*/
*errflags |= ZPROP_ERR_NORESTORE;
} else if (first_recvd_props) {
dsl_prop_unset_hasrecvd(tofs);
}
if (origrecvd == NULL && !drc.drc_newfs) {
/* We failed to stash the original properties. */
*errflags |= ZPROP_ERR_NORESTORE;
}
/*
* dsl_props_set() will not convert RECEIVED to LOCAL on or
* after SPA_VERSION_RECVD_PROPS, so we need to specify LOCAL
* explicitly if we're restoring local properties cleared in the
* first new-style receive.
*/
if (origrecvd != NULL &&
zfs_set_prop_nvlist(tofs, (first_recvd_props ?
ZPROP_SRC_LOCAL : ZPROP_SRC_RECEIVED),
origrecvd, NULL) != 0) {
/*
* We stashed the original properties but failed to
* restore them.
*/
*errflags |= ZPROP_ERR_NORESTORE;
}
}
if (error != 0 && localprops != NULL && !drc.drc_newfs &&
!first_recvd_props) {
nvlist_t *setprops;
nvlist_t *inheritprops;
nvpair_t *nvp;
if (origprops == NULL) {
/* We failed to stash the original properties. */
*errflags |= ZPROP_ERR_NORESTORE;
goto out;
}
/* Restore original props */
setprops = fnvlist_alloc();
inheritprops = fnvlist_alloc();
nvp = NULL;
while ((nvp = nvlist_next_nvpair(localprops, nvp)) != NULL) {
const char *name = nvpair_name(nvp);
const char *source;
nvlist_t *attrs;
if (!nvlist_exists(origprops, name)) {
/*
* Property was not present or was explicitly
* inherited before the receive, restore this.
*/
fnvlist_add_boolean(inheritprops, name);
continue;
}
attrs = fnvlist_lookup_nvlist(origprops, name);
source = fnvlist_lookup_string(attrs, ZPROP_SOURCE);
/* Skip received properties */
if (strcmp(source, ZPROP_SOURCE_VAL_RECVD) == 0)
continue;
if (strcmp(source, tofs) == 0) {
/* Property was locally set */
fnvlist_add_nvlist(setprops, name, attrs);
} else {
/* Property was implicitly inherited */
fnvlist_add_boolean(inheritprops, name);
}
}
if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_LOCAL, setprops,
NULL) != 0)
*errflags |= ZPROP_ERR_NORESTORE;
if (zfs_set_prop_nvlist(tofs, ZPROP_SRC_INHERITED, inheritprops,
NULL) != 0)
*errflags |= ZPROP_ERR_NORESTORE;
nvlist_free(setprops);
nvlist_free(inheritprops);
}
out:
releasef(input_fd);
nvlist_free(origrecvd);
nvlist_free(origprops);
if (error == 0)
error = props_error;
return (error);
}
/*
* inputs:
* zc_name name of containing filesystem (unused)
* zc_nvlist_src{_size} nvlist of properties to apply
* zc_nvlist_conf{_size} nvlist of properties to exclude
* (DATA_TYPE_BOOLEAN) and override (everything else)
* zc_value name of snapshot to create
* zc_string name of clone origin (if DRR_FLAG_CLONE)
* zc_cookie file descriptor to recv from
* zc_begin_record the BEGIN record of the stream (not byteswapped)
* zc_guid force flag
* zc_cleanup_fd cleanup-on-exit file descriptor
* zc_action_handle handle for this guid/ds mapping (or zero on first call)
*
* outputs:
* zc_cookie number of bytes read
* zc_obj zprop_errflags_t
* zc_action_handle handle for this guid/ds mapping
* zc_nvlist_dst{_size} error for each unapplied received property
*/
static int
zfs_ioc_recv(zfs_cmd_t *zc)
{
dmu_replay_record_t begin_record;
nvlist_t *errors = NULL;
nvlist_t *recvdprops = NULL;
nvlist_t *localprops = NULL;
char *origin = NULL;
char *tosnap;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
int error = 0;
if (dataset_namecheck(zc->zc_value, NULL, NULL) != 0 ||
strchr(zc->zc_value, '@') == NULL ||
strchr(zc->zc_value, '%'))
return (SET_ERROR(EINVAL));
(void) strlcpy(tofs, zc->zc_value, sizeof (tofs));
tosnap = strchr(tofs, '@');
*tosnap++ = '\0';
if (zc->zc_nvlist_src != 0 &&
(error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &recvdprops)) != 0)
return (error);
if (zc->zc_nvlist_conf != 0 &&
(error = get_nvlist(zc->zc_nvlist_conf, zc->zc_nvlist_conf_size,
zc->zc_iflags, &localprops)) != 0)
return (error);
if (zc->zc_string[0])
origin = zc->zc_string;
begin_record.drr_type = DRR_BEGIN;
begin_record.drr_payloadlen = 0;
begin_record.drr_u.drr_begin = zc->zc_begin_record;
error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvdprops, localprops,
zc->zc_guid, B_FALSE, zc->zc_cookie, &begin_record,
zc->zc_cleanup_fd, &zc->zc_cookie, &zc->zc_obj,
&zc->zc_action_handle, &errors);
nvlist_free(recvdprops);
nvlist_free(localprops);
/*
* Now that all props, initial and delayed, are set, report the prop
* errors to the caller.
*/
if (zc->zc_nvlist_dst_size != 0 && errors != NULL &&
(nvlist_smush(errors, zc->zc_nvlist_dst_size) != 0 ||
put_nvlist(zc, errors) != 0)) {
/*
* Caller made zc->zc_nvlist_dst less than the minimum expected
* size or supplied an invalid address.
*/
error = SET_ERROR(EINVAL);
}
nvlist_free(errors);
return (error);
}
/*
* innvl: {
* "snapname" -> full name of the snapshot to create
* (optional) "props" -> received properties to set (nvlist)
* (optional) "localprops" -> override and exclude properties (nvlist)
* (optional) "origin" -> name of clone origin (DRR_FLAG_CLONE)
* "begin_record" -> non-byteswapped dmu_replay_record_t
* "input_fd" -> file descriptor to read stream from (int32)
* (optional) "force" -> force flag (value ignored)
* (optional) "resumable" -> resumable flag (value ignored)
* (optional) "cleanup_fd" -> cleanup-on-exit file descriptor
* (optional) "action_handle" -> handle for this guid/ds mapping
* }
*
* outnvl: {
* "read_bytes" -> number of bytes read
* "error_flags" -> zprop_errflags_t
* "action_handle" -> handle for this guid/ds mapping
* "errors" -> error for each unapplied received property (nvlist)
* }
*/
static int
zfs_ioc_recv_new(const char *fsname, nvlist_t *innvl, nvlist_t *outnvl)
{
dmu_replay_record_t *begin_record;
uint_t begin_record_size;
nvlist_t *errors = NULL;
nvlist_t *recvprops = NULL;
nvlist_t *localprops = NULL;
char *snapname = NULL;
char *origin = NULL;
char *tosnap;
char tofs[ZFS_MAX_DATASET_NAME_LEN];
boolean_t force;
boolean_t resumable;
uint64_t action_handle = 0;
uint64_t read_bytes = 0;
uint64_t errflags = 0;
int input_fd = -1;
int cleanup_fd = -1;
int error;
error = nvlist_lookup_string(innvl, "snapname", &snapname);
if (error != 0)
return (SET_ERROR(EINVAL));
if (dataset_namecheck(snapname, NULL, NULL) != 0 ||
strchr(snapname, '@') == NULL ||
strchr(snapname, '%'))
return (SET_ERROR(EINVAL));
(void) strcpy(tofs, snapname);
tosnap = strchr(tofs, '@');
*tosnap++ = '\0';
error = nvlist_lookup_string(innvl, "origin", &origin);
if (error && error != ENOENT)
return (error);
error = nvlist_lookup_byte_array(innvl, "begin_record",
(uchar_t **)&begin_record, &begin_record_size);
if (error != 0 || begin_record_size != sizeof (*begin_record))
return (SET_ERROR(EINVAL));
error = nvlist_lookup_int32(innvl, "input_fd", &input_fd);
if (error != 0)
return (SET_ERROR(EINVAL));
force = nvlist_exists(innvl, "force");
resumable = nvlist_exists(innvl, "resumable");
error = nvlist_lookup_int32(innvl, "cleanup_fd", &cleanup_fd);
if (error && error != ENOENT)
return (error);
error = nvlist_lookup_uint64(innvl, "action_handle", &action_handle);
if (error && error != ENOENT)
return (error);
/* we still use "props" here for backwards compatibility */
error = nvlist_lookup_nvlist(innvl, "props", &recvprops);
if (error && error != ENOENT)
return (error);
error = nvlist_lookup_nvlist(innvl, "localprops", &localprops);
if (error && error != ENOENT)
return (error);
error = zfs_ioc_recv_impl(tofs, tosnap, origin, recvprops, localprops,
force, resumable, input_fd, begin_record, cleanup_fd, &read_bytes,
&errflags, &action_handle, &errors);
fnvlist_add_uint64(outnvl, "read_bytes", read_bytes);
fnvlist_add_uint64(outnvl, "error_flags", errflags);
fnvlist_add_uint64(outnvl, "action_handle", action_handle);
fnvlist_add_nvlist(outnvl, "errors", errors);
nvlist_free(errors);
nvlist_free(recvprops);
nvlist_free(localprops);
return (error);
}
/*
* inputs:
* zc_name name of snapshot to send
* zc_cookie file descriptor to send stream to
* zc_obj fromorigin flag (mutually exclusive with zc_fromobj)
* zc_sendobj objsetid of snapshot to send
* zc_fromobj objsetid of incremental fromsnap (may be zero)
* zc_guid if set, estimate size of stream only. zc_cookie is ignored.
* output size in zc_objset_type.
* zc_flags lzc_send_flags
*
* outputs:
* zc_objset_type estimated size, if zc_guid is set
*
* NOTE: This is no longer the preferred interface, any new functionality
* should be added to zfs_ioc_send_new() instead.
*/
static int
zfs_ioc_send(zfs_cmd_t *zc)
{
int error;
offset_t off;
boolean_t estimate = (zc->zc_guid != 0);
boolean_t embedok = (zc->zc_flags & 0x1);
boolean_t large_block_ok = (zc->zc_flags & 0x2);
boolean_t compressok = (zc->zc_flags & 0x4);
boolean_t rawok = (zc->zc_flags & 0x8);
if (zc->zc_obj != 0) {
dsl_pool_t *dp;
dsl_dataset_t *tosnap;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, zc->zc_sendobj, FTAG, &tosnap);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
if (dsl_dir_is_clone(tosnap->ds_dir))
zc->zc_fromobj =
dsl_dir_phys(tosnap->ds_dir)->dd_origin_obj;
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
}
if (estimate) {
dsl_pool_t *dp;
dsl_dataset_t *tosnap;
dsl_dataset_t *fromsnap = NULL;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold_obj(dp, zc->zc_sendobj,
FTAG, &tosnap);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
if (zc->zc_fromobj != 0) {
error = dsl_dataset_hold_obj(dp, zc->zc_fromobj,
FTAG, &fromsnap);
if (error != 0) {
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
}
error = dmu_send_estimate(tosnap, fromsnap, compressok || rawok,
&zc->zc_objset_type);
if (fromsnap != NULL)
dsl_dataset_rele(fromsnap, FTAG);
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
} else {
file_t *fp = getf(zc->zc_cookie);
if (fp == NULL)
return (SET_ERROR(EBADF));
off = fp->f_offset;
error = dmu_send_obj(zc->zc_name, zc->zc_sendobj,
zc->zc_fromobj, embedok, large_block_ok, compressok, rawok,
zc->zc_cookie, fp->f_vnode, &off);
if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0)
fp->f_offset = off;
releasef(zc->zc_cookie);
}
return (error);
}
/*
* inputs:
* zc_name name of snapshot on which to report progress
* zc_cookie file descriptor of send stream
*
* outputs:
* zc_cookie number of bytes written in send stream thus far
*/
static int
zfs_ioc_send_progress(zfs_cmd_t *zc)
{
dsl_pool_t *dp;
dsl_dataset_t *ds;
dmu_sendarg_t *dsp = NULL;
int error;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
mutex_enter(&ds->ds_sendstream_lock);
/*
* Iterate over all the send streams currently active on this dataset.
* If there's one which matches the specified file descriptor _and_ the
* stream was started by the current process, return the progress of
* that stream.
*/
for (dsp = list_head(&ds->ds_sendstreams); dsp != NULL;
dsp = list_next(&ds->ds_sendstreams, dsp)) {
if (dsp->dsa_outfd == zc->zc_cookie &&
dsp->dsa_proc->group_leader == curproc->group_leader)
break;
}
if (dsp != NULL)
zc->zc_cookie = *(dsp->dsa_off);
else
error = SET_ERROR(ENOENT);
mutex_exit(&ds->ds_sendstream_lock);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
static int
zfs_ioc_inject_fault(zfs_cmd_t *zc)
{
int id, error;
error = zio_inject_fault(zc->zc_name, (int)zc->zc_guid, &id,
&zc->zc_inject_record);
if (error == 0)
zc->zc_guid = (uint64_t)id;
return (error);
}
static int
zfs_ioc_clear_fault(zfs_cmd_t *zc)
{
return (zio_clear_fault((int)zc->zc_guid));
}
static int
zfs_ioc_inject_list_next(zfs_cmd_t *zc)
{
int id = (int)zc->zc_guid;
int error;
error = zio_inject_list_next(&id, zc->zc_name, sizeof (zc->zc_name),
&zc->zc_inject_record);
zc->zc_guid = id;
return (error);
}
static int
zfs_ioc_error_log(zfs_cmd_t *zc)
{
spa_t *spa;
int error;
size_t count = (size_t)zc->zc_nvlist_dst_size;
if ((error = spa_open(zc->zc_name, &spa, FTAG)) != 0)
return (error);
error = spa_get_errlog(spa, (void *)(uintptr_t)zc->zc_nvlist_dst,
&count);
if (error == 0)
zc->zc_nvlist_dst_size = count;
else
zc->zc_nvlist_dst_size = spa_get_errlog_size(spa);
spa_close(spa, FTAG);
return (error);
}
static int
zfs_ioc_clear(zfs_cmd_t *zc)
{
spa_t *spa;
vdev_t *vd;
int error;
/*
* On zpool clear we also fix up missing slogs
*/
mutex_enter(&spa_namespace_lock);
spa = spa_lookup(zc->zc_name);
if (spa == NULL) {
mutex_exit(&spa_namespace_lock);
return (SET_ERROR(EIO));
}
if (spa_get_log_state(spa) == SPA_LOG_MISSING) {
/* we need to let spa_open/spa_load clear the chains */
spa_set_log_state(spa, SPA_LOG_CLEAR);
}
spa->spa_last_open_failed = 0;
mutex_exit(&spa_namespace_lock);
if (zc->zc_cookie & ZPOOL_NO_REWIND) {
error = spa_open(zc->zc_name, &spa, FTAG);
} else {
nvlist_t *policy;
nvlist_t *config = NULL;
if (zc->zc_nvlist_src == 0)
return (SET_ERROR(EINVAL));
if ((error = get_nvlist(zc->zc_nvlist_src,
zc->zc_nvlist_src_size, zc->zc_iflags, &policy)) == 0) {
error = spa_open_rewind(zc->zc_name, &spa, FTAG,
policy, &config);
if (config != NULL) {
int err;
if ((err = put_nvlist(zc, config)) != 0)
error = err;
nvlist_free(config);
}
nvlist_free(policy);
}
}
if (error != 0)
return (error);
spa_vdev_state_enter(spa, SCL_NONE);
if (zc->zc_guid == 0) {
vd = NULL;
} else {
vd = spa_lookup_by_guid(spa, zc->zc_guid, B_TRUE);
if (vd == NULL) {
(void) spa_vdev_state_exit(spa, NULL, ENODEV);
spa_close(spa, FTAG);
return (SET_ERROR(ENODEV));
}
}
vdev_clear(spa, vd);
(void) spa_vdev_state_exit(spa, spa_suspended(spa) ?
NULL : spa->spa_root_vdev, 0);
/*
* Resume any suspended I/Os.
*/
if (zio_resume(spa) != 0)
error = SET_ERROR(EIO);
spa_close(spa, FTAG);
return (error);
}
/*
* Reopen all the vdevs associated with the pool.
*
* innvl: {
* "scrub_restart" -> when true and scrub is running, allow to restart
* scrub as the side effect of the reopen (boolean).
* }
*
* outnvl is unused
*/
/* ARGSUSED */
static int
zfs_ioc_pool_reopen(const char *pool, nvlist_t *innvl, nvlist_t *outnvl)
{
spa_t *spa;
int error;
boolean_t scrub_restart = B_TRUE;
if (innvl) {
if (nvlist_lookup_boolean_value(innvl, "scrub_restart",
&scrub_restart) != 0) {
return (SET_ERROR(EINVAL));
}
}
error = spa_open(pool, &spa, FTAG);
if (error != 0)
return (error);
spa_vdev_state_enter(spa, SCL_NONE);
/*
* If the scrub_restart flag is B_FALSE and a scrub is already
* in progress then set spa_scrub_reopen flag to B_TRUE so that
* we don't restart the scrub as a side effect of the reopen.
* Otherwise, let vdev_open() decided if a resilver is required.
*/
spa->spa_scrub_reopen = (!scrub_restart &&
dsl_scan_scrubbing(spa->spa_dsl_pool));
vdev_reopen(spa->spa_root_vdev);
spa->spa_scrub_reopen = B_FALSE;
(void) spa_vdev_state_exit(spa, NULL, 0);
spa_close(spa, FTAG);
return (0);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* zc_string name of conflicting snapshot, if there is one
*/
static int
zfs_ioc_promote(zfs_cmd_t *zc)
{
dsl_pool_t *dp;
dsl_dataset_t *ds, *ods;
char origin[ZFS_MAX_DATASET_NAME_LEN];
char *cp;
int error;
zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0 ||
strchr(zc->zc_name, '%'))
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &ds);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
if (!dsl_dir_is_clone(ds->ds_dir)) {
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (SET_ERROR(EINVAL));
}
error = dsl_dataset_hold_obj(dp,
dsl_dir_phys(ds->ds_dir)->dd_origin_obj, FTAG, &ods);
if (error != 0) {
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
dsl_dataset_name(ods, origin);
dsl_dataset_rele(ods, FTAG);
dsl_dataset_rele(ds, FTAG);
dsl_pool_rele(dp, FTAG);
/*
* We don't need to unmount *all* the origin fs's snapshots, but
* it's easier.
*/
cp = strchr(origin, '@');
if (cp)
*cp = '\0';
(void) dmu_objset_find(origin,
zfs_unmount_snap_cb, NULL, DS_FIND_SNAPSHOTS);
return (dsl_dataset_promote(zc->zc_name, zc->zc_string));
}
/*
* Retrieve a single {user|group|project}{used|quota}@... property.
*
* inputs:
* zc_name name of filesystem
* zc_objset_type zfs_userquota_prop_t
* zc_value domain name (eg. "S-1-234-567-89")
* zc_guid RID/UID/GID
*
* outputs:
* zc_cookie property value
*/
static int
zfs_ioc_userspace_one(zfs_cmd_t *zc)
{
zfsvfs_t *zfsvfs;
int error;
if (zc->zc_objset_type >= ZFS_NUM_USERQUOTA_PROPS)
return (SET_ERROR(EINVAL));
error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error != 0)
return (error);
error = zfs_userspace_one(zfsvfs,
zc->zc_objset_type, zc->zc_value, zc->zc_guid, &zc->zc_cookie);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_cookie zap cursor
* zc_objset_type zfs_userquota_prop_t
* zc_nvlist_dst[_size] buffer to fill (not really an nvlist)
*
* outputs:
* zc_nvlist_dst[_size] data buffer (array of zfs_useracct_t)
* zc_cookie zap cursor
*/
static int
zfs_ioc_userspace_many(zfs_cmd_t *zc)
{
zfsvfs_t *zfsvfs;
int bufsize = zc->zc_nvlist_dst_size;
if (bufsize <= 0)
return (SET_ERROR(ENOMEM));
int error = zfsvfs_hold(zc->zc_name, FTAG, &zfsvfs, B_FALSE);
if (error != 0)
return (error);
void *buf = vmem_alloc(bufsize, KM_SLEEP);
error = zfs_userspace_many(zfsvfs, zc->zc_objset_type, &zc->zc_cookie,
buf, &zc->zc_nvlist_dst_size);
if (error == 0) {
error = xcopyout(buf,
(void *)(uintptr_t)zc->zc_nvlist_dst,
zc->zc_nvlist_dst_size);
}
vmem_free(buf, bufsize);
zfsvfs_rele(zfsvfs, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* none
*/
static int
zfs_ioc_userspace_upgrade(zfs_cmd_t *zc)
{
objset_t *os;
int error = 0;
zfsvfs_t *zfsvfs;
if (getzfsvfs(zc->zc_name, &zfsvfs) == 0) {
if (!dmu_objset_userused_enabled(zfsvfs->z_os)) {
/*
* If userused is not enabled, it may be because the
* objset needs to be closed & reopened (to grow the
* objset_phys_t). Suspend/resume the fs will do that.
*/
dsl_dataset_t *ds, *newds;
ds = dmu_objset_ds(zfsvfs->z_os);
error = zfs_suspend_fs(zfsvfs);
if (error == 0) {
dmu_objset_refresh_ownership(ds, &newds,
B_TRUE, zfsvfs);
error = zfs_resume_fs(zfsvfs, newds);
}
}
if (error == 0)
error = dmu_objset_userspace_upgrade(zfsvfs->z_os);
deactivate_super(zfsvfs->z_sb);
} else {
/* XXX kind of reading contents without owning */
error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os);
if (error != 0)
return (error);
error = dmu_objset_userspace_upgrade(os);
dmu_objset_rele_flags(os, B_TRUE, FTAG);
}
return (error);
}
/*
* inputs:
* zc_name name of filesystem
*
* outputs:
* none
*/
static int
zfs_ioc_id_quota_upgrade(zfs_cmd_t *zc)
{
objset_t *os;
int error;
error = dmu_objset_hold_flags(zc->zc_name, B_TRUE, FTAG, &os);
if (error != 0)
return (error);
if (dmu_objset_userobjspace_upgradable(os) ||
dmu_objset_projectquota_upgradable(os)) {
mutex_enter(&os->os_upgrade_lock);
if (os->os_upgrade_id == 0) {
/* clear potential error code and retry */
os->os_upgrade_status = 0;
mutex_exit(&os->os_upgrade_lock);
dmu_objset_id_quota_upgrade(os);
} else {
mutex_exit(&os->os_upgrade_lock);
}
dsl_pool_rele(dmu_objset_pool(os), FTAG);
taskq_wait_id(os->os_spa->spa_upgrade_taskq, os->os_upgrade_id);
error = os->os_upgrade_status;
} else {
dsl_pool_rele(dmu_objset_pool(os), FTAG);
}
dsl_dataset_rele_flags(dmu_objset_ds(os), DS_HOLD_FLAG_DECRYPT, FTAG);
return (error);
}
static int
zfs_ioc_share(zfs_cmd_t *zc)
{
return (SET_ERROR(ENOSYS));
}
ace_t full_access[] = {
{(uid_t)-1, ACE_ALL_PERMS, ACE_EVERYONE, 0}
};
/*
* inputs:
* zc_name name of containing filesystem
* zc_obj object # beyond which we want next in-use object #
*
* outputs:
* zc_obj next in-use object #
*/
static int
zfs_ioc_next_obj(zfs_cmd_t *zc)
{
objset_t *os = NULL;
int error;
error = dmu_objset_hold(zc->zc_name, FTAG, &os);
if (error != 0)
return (error);
error = dmu_object_next(os, &zc->zc_obj, B_FALSE, 0);
dmu_objset_rele(os, FTAG);
return (error);
}
/*
* inputs:
* zc_name name of filesystem
* zc_value prefix name for snapshot
* zc_cleanup_fd cleanup-on-exit file descriptor for calling process
*
* outputs:
* zc_value short name of new snapshot
*/
static int
zfs_ioc_tmp_snapshot(zfs_cmd_t *zc)
{
char *snap_name;
char *hold_name;
int error;
minor_t minor;
error = zfs_onexit_fd_hold(zc->zc_cleanup_fd, &minor);
if (error != 0)
return (error);
snap_name = kmem_asprintf("%s-%016llx", zc->zc_value,
(u_longlong_t)ddi_get_lbolt64());
hold_name = kmem_asprintf("%%%s", zc->zc_value);
error = dsl_dataset_snapshot_tmp(zc->zc_name, snap_name, minor,
hold_name);
if (error == 0)
(void) strlcpy(zc->zc_value, snap_name,
sizeof (zc->zc_value));
strfree(snap_name);
strfree(hold_name);
zfs_onexit_fd_rele(zc->zc_cleanup_fd);
return (error);
}
/*
* inputs:
* zc_name name of "to" snapshot
* zc_value name of "from" snapshot
* zc_cookie file descriptor to write diff data on
*
* outputs:
* dmu_diff_record_t's to the file descriptor
*/
static int
zfs_ioc_diff(zfs_cmd_t *zc)
{
file_t *fp;
offset_t off;
int error;
fp = getf(zc->zc_cookie);
if (fp == NULL)
return (SET_ERROR(EBADF));
off = fp->f_offset;
error = dmu_diff(zc->zc_name, zc->zc_value, fp->f_vnode, &off);
if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0)
fp->f_offset = off;
releasef(zc->zc_cookie);
return (error);
}
/*
* Remove all ACL files in shares dir
*/
#ifdef HAVE_SMB_SHARE
static int
zfs_smb_acl_purge(znode_t *dzp)
{
zap_cursor_t zc;
zap_attribute_t zap;
zfsvfs_t *zfsvfs = ZTOZSB(dzp);
int error;
for (zap_cursor_init(&zc, zfsvfs->z_os, dzp->z_id);
(error = zap_cursor_retrieve(&zc, &zap)) == 0;
zap_cursor_advance(&zc)) {
if ((error = VOP_REMOVE(ZTOV(dzp), zap.za_name, kcred,
NULL, 0)) != 0)
break;
}
zap_cursor_fini(&zc);
return (error);
}
#endif /* HAVE_SMB_SHARE */
static int
zfs_ioc_smb_acl(zfs_cmd_t *zc)
{
#ifdef HAVE_SMB_SHARE
vnode_t *vp;
znode_t *dzp;
vnode_t *resourcevp = NULL;
znode_t *sharedir;
zfsvfs_t *zfsvfs;
nvlist_t *nvlist;
char *src, *target;
vattr_t vattr;
vsecattr_t vsec;
int error = 0;
if ((error = lookupname(zc->zc_value, UIO_SYSSPACE,
NO_FOLLOW, NULL, &vp)) != 0)
return (error);
/* Now make sure mntpnt and dataset are ZFS */
if (vp->v_vfsp->vfs_fstype != zfsfstype ||
(strcmp((char *)refstr_value(vp->v_vfsp->vfs_resource),
zc->zc_name) != 0)) {
VN_RELE(vp);
return (SET_ERROR(EINVAL));
}
dzp = VTOZ(vp);
zfsvfs = ZTOZSB(dzp);
ZFS_ENTER(zfsvfs);
/*
* Create share dir if its missing.
*/
mutex_enter(&zfsvfs->z_lock);
if (zfsvfs->z_shares_dir == 0) {
dmu_tx_t *tx;
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, MASTER_NODE_OBJ, TRUE,
ZFS_SHARES_DIR);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error != 0) {
dmu_tx_abort(tx);
} else {
error = zfs_create_share_dir(zfsvfs, tx);
dmu_tx_commit(tx);
}
if (error != 0) {
mutex_exit(&zfsvfs->z_lock);
VN_RELE(vp);
ZFS_EXIT(zfsvfs);
return (error);
}
}
mutex_exit(&zfsvfs->z_lock);
ASSERT(zfsvfs->z_shares_dir);
if ((error = zfs_zget(zfsvfs, zfsvfs->z_shares_dir, &sharedir)) != 0) {
VN_RELE(vp);
ZFS_EXIT(zfsvfs);
return (error);
}
switch (zc->zc_cookie) {
case ZFS_SMB_ACL_ADD:
vattr.va_mask = AT_MODE|AT_UID|AT_GID|AT_TYPE;
vattr.va_mode = S_IFREG|0777;
vattr.va_uid = 0;
vattr.va_gid = 0;
vsec.vsa_mask = VSA_ACE;
vsec.vsa_aclentp = &full_access;
vsec.vsa_aclentsz = sizeof (full_access);
vsec.vsa_aclcnt = 1;
error = VOP_CREATE(ZTOV(sharedir), zc->zc_string,
&vattr, EXCL, 0, &resourcevp, kcred, 0, NULL, &vsec);
if (resourcevp)
VN_RELE(resourcevp);
break;
case ZFS_SMB_ACL_REMOVE:
error = VOP_REMOVE(ZTOV(sharedir), zc->zc_string, kcred,
NULL, 0);
break;
case ZFS_SMB_ACL_RENAME:
if ((error = get_nvlist(zc->zc_nvlist_src,
zc->zc_nvlist_src_size, zc->zc_iflags, &nvlist)) != 0) {
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
ZFS_EXIT(zfsvfs);
return (error);
}
if (nvlist_lookup_string(nvlist, ZFS_SMB_ACL_SRC, &src) ||
nvlist_lookup_string(nvlist, ZFS_SMB_ACL_TARGET,
&target)) {
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
ZFS_EXIT(zfsvfs);
nvlist_free(nvlist);
return (error);
}
error = VOP_RENAME(ZTOV(sharedir), src, ZTOV(sharedir), target,
kcred, NULL, 0);
nvlist_free(nvlist);
break;
case ZFS_SMB_ACL_PURGE:
error = zfs_smb_acl_purge(sharedir);
break;
default:
error = SET_ERROR(EINVAL);
break;
}
VN_RELE(vp);
VN_RELE(ZTOV(sharedir));
ZFS_EXIT(zfsvfs);
return (error);
#else
return (SET_ERROR(ENOTSUP));
#endif /* HAVE_SMB_SHARE */
}
/*
* innvl: {
* "holds" -> { snapname -> holdname (string), ... }
* (optional) "cleanup_fd" -> fd (int32)
* }
*
* outnvl: {
* snapname -> error value (int32)
* ...
* }
*/
/* ARGSUSED */
static int
zfs_ioc_hold(const char *pool, nvlist_t *args, nvlist_t *errlist)
{
nvpair_t *pair;
nvlist_t *holds;
int cleanup_fd = -1;
int error;
minor_t minor = 0;
error = nvlist_lookup_nvlist(args, "holds", &holds);
if (error != 0)
return (SET_ERROR(EINVAL));
/* make sure the user didn't pass us any invalid (empty) tags */
for (pair = nvlist_next_nvpair(holds, NULL); pair != NULL;
pair = nvlist_next_nvpair(holds, pair)) {
char *htag;
error = nvpair_value_string(pair, &htag);
if (error != 0)
return (SET_ERROR(error));
if (strlen(htag) == 0)
return (SET_ERROR(EINVAL));
}
if (nvlist_lookup_int32(args, "cleanup_fd", &cleanup_fd) == 0) {
error = zfs_onexit_fd_hold(cleanup_fd, &minor);
if (error != 0)
return (error);
}
error = dsl_dataset_user_hold(holds, minor, errlist);
if (minor != 0)
zfs_onexit_fd_rele(cleanup_fd);
return (error);
}
/*
* innvl is not used.
*
* outnvl: {
* holdname -> time added (uint64 seconds since epoch)
* ...
* }
*/
/* ARGSUSED */
static int
zfs_ioc_get_holds(const char *snapname, nvlist_t *args, nvlist_t *outnvl)
{
ASSERT3P(args, ==, NULL);
return (dsl_dataset_get_holds(snapname, outnvl));
}
/*
* innvl: {
* snapname -> { holdname, ... }
* ...
* }
*
* outnvl: {
* snapname -> error value (int32)
* ...
* }
*/
/* ARGSUSED */
static int
zfs_ioc_release(const char *pool, nvlist_t *holds, nvlist_t *errlist)
{
return (dsl_dataset_user_release(holds, errlist));
}
/*
* inputs:
* zc_guid flags (ZEVENT_NONBLOCK)
* zc_cleanup_fd zevent file descriptor
*
* outputs:
* zc_nvlist_dst next nvlist event
* zc_cookie dropped events since last get
*/
static int
zfs_ioc_events_next(zfs_cmd_t *zc)
{
zfs_zevent_t *ze;
nvlist_t *event = NULL;
minor_t minor;
uint64_t dropped = 0;
int error;
error = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze);
if (error != 0)
return (error);
do {
error = zfs_zevent_next(ze, &event,
&zc->zc_nvlist_dst_size, &dropped);
if (event != NULL) {
zc->zc_cookie = dropped;
error = put_nvlist(zc, event);
nvlist_free(event);
}
if (zc->zc_guid & ZEVENT_NONBLOCK)
break;
if ((error == 0) || (error != ENOENT))
break;
error = zfs_zevent_wait(ze);
if (error != 0)
break;
} while (1);
zfs_zevent_fd_rele(zc->zc_cleanup_fd);
return (error);
}
/*
* outputs:
* zc_cookie cleared events count
*/
static int
zfs_ioc_events_clear(zfs_cmd_t *zc)
{
int count;
zfs_zevent_drain_all(&count);
zc->zc_cookie = count;
return (0);
}
/*
* inputs:
* zc_guid eid | ZEVENT_SEEK_START | ZEVENT_SEEK_END
* zc_cleanup zevent file descriptor
*/
static int
zfs_ioc_events_seek(zfs_cmd_t *zc)
{
zfs_zevent_t *ze;
minor_t minor;
int error;
error = zfs_zevent_fd_hold(zc->zc_cleanup_fd, &minor, &ze);
if (error != 0)
return (error);
error = zfs_zevent_seek(ze, zc->zc_guid);
zfs_zevent_fd_rele(zc->zc_cleanup_fd);
return (error);
}
/*
* inputs:
* zc_name name of new filesystem or snapshot
* zc_value full name of old snapshot
*
* outputs:
* zc_cookie space in bytes
* zc_objset_type compressed space in bytes
* zc_perm_action uncompressed space in bytes
*/
static int
zfs_ioc_space_written(zfs_cmd_t *zc)
{
int error;
dsl_pool_t *dp;
dsl_dataset_t *new, *old;
error = dsl_pool_hold(zc->zc_name, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, zc->zc_name, FTAG, &new);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
error = dsl_dataset_hold(dp, zc->zc_value, FTAG, &old);
if (error != 0) {
dsl_dataset_rele(new, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
error = dsl_dataset_space_written(old, new, &zc->zc_cookie,
&zc->zc_objset_type, &zc->zc_perm_action);
dsl_dataset_rele(old, FTAG);
dsl_dataset_rele(new, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
/*
* innvl: {
* "firstsnap" -> snapshot name
* }
*
* outnvl: {
* "used" -> space in bytes
* "compressed" -> compressed space in bytes
* "uncompressed" -> uncompressed space in bytes
* }
*/
static int
zfs_ioc_space_snaps(const char *lastsnap, nvlist_t *innvl, nvlist_t *outnvl)
{
int error;
dsl_pool_t *dp;
dsl_dataset_t *new, *old;
char *firstsnap;
uint64_t used, comp, uncomp;
if (nvlist_lookup_string(innvl, "firstsnap", &firstsnap) != 0)
return (SET_ERROR(EINVAL));
error = dsl_pool_hold(lastsnap, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, lastsnap, FTAG, &new);
if (error == 0 && !new->ds_is_snapshot) {
dsl_dataset_rele(new, FTAG);
error = SET_ERROR(EINVAL);
}
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
error = dsl_dataset_hold(dp, firstsnap, FTAG, &old);
if (error == 0 && !old->ds_is_snapshot) {
dsl_dataset_rele(old, FTAG);
error = SET_ERROR(EINVAL);
}
if (error != 0) {
dsl_dataset_rele(new, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
error = dsl_dataset_space_wouldfree(old, new, &used, &comp, &uncomp);
dsl_dataset_rele(old, FTAG);
dsl_dataset_rele(new, FTAG);
dsl_pool_rele(dp, FTAG);
fnvlist_add_uint64(outnvl, "used", used);
fnvlist_add_uint64(outnvl, "compressed", comp);
fnvlist_add_uint64(outnvl, "uncompressed", uncomp);
return (error);
}
/*
* innvl: {
* "fd" -> file descriptor to write stream to (int32)
* (optional) "fromsnap" -> full snap name to send an incremental from
* (optional) "largeblockok" -> (value ignored)
* indicates that blocks > 128KB are permitted
* (optional) "embedok" -> (value ignored)
* presence indicates DRR_WRITE_EMBEDDED records are permitted
* (optional) "compressok" -> (value ignored)
* presence indicates compressed DRR_WRITE records are permitted
* (optional) "rawok" -> (value ignored)
* presence indicates raw encrypted records should be used.
* (optional) "resume_object" and "resume_offset" -> (uint64)
* if present, resume send stream from specified object and offset.
* }
*
* outnvl is unused
*/
/* ARGSUSED */
static int
zfs_ioc_send_new(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
{
int error;
offset_t off;
char *fromname = NULL;
int fd;
file_t *fp;
boolean_t largeblockok;
boolean_t embedok;
boolean_t compressok;
boolean_t rawok;
uint64_t resumeobj = 0;
uint64_t resumeoff = 0;
error = nvlist_lookup_int32(innvl, "fd", &fd);
if (error != 0)
return (SET_ERROR(EINVAL));
(void) nvlist_lookup_string(innvl, "fromsnap", &fromname);
largeblockok = nvlist_exists(innvl, "largeblockok");
embedok = nvlist_exists(innvl, "embedok");
compressok = nvlist_exists(innvl, "compressok");
rawok = nvlist_exists(innvl, "rawok");
(void) nvlist_lookup_uint64(innvl, "resume_object", &resumeobj);
(void) nvlist_lookup_uint64(innvl, "resume_offset", &resumeoff);
if ((fp = getf(fd)) == NULL)
return (SET_ERROR(EBADF));
off = fp->f_offset;
error = dmu_send(snapname, fromname, embedok, largeblockok, compressok,
rawok, fd, resumeobj, resumeoff, fp->f_vnode, &off);
if (VOP_SEEK(fp->f_vnode, fp->f_offset, &off, NULL) == 0)
fp->f_offset = off;
releasef(fd);
return (error);
}
/*
* Determine approximately how large a zfs send stream will be -- the number
* of bytes that will be written to the fd supplied to zfs_ioc_send_new().
*
* innvl: {
* (optional) "from" -> full snap or bookmark name to send an incremental
* from
* (optional) "largeblockok" -> (value ignored)
* indicates that blocks > 128KB are permitted
* (optional) "embedok" -> (value ignored)
* presence indicates DRR_WRITE_EMBEDDED records are permitted
* (optional) "compressok" -> (value ignored)
* presence indicates compressed DRR_WRITE records are permitted
* (optional) "rawok" -> (value ignored)
* presence indicates raw encrypted records should be used.
* }
*
* outnvl: {
* "space" -> bytes of space (uint64)
* }
*/
static int
zfs_ioc_send_space(const char *snapname, nvlist_t *innvl, nvlist_t *outnvl)
{
dsl_pool_t *dp;
dsl_dataset_t *tosnap;
int error;
char *fromname;
boolean_t compressok;
boolean_t rawok;
uint64_t space;
error = dsl_pool_hold(snapname, FTAG, &dp);
if (error != 0)
return (error);
error = dsl_dataset_hold(dp, snapname, FTAG, &tosnap);
if (error != 0) {
dsl_pool_rele(dp, FTAG);
return (error);
}
compressok = nvlist_exists(innvl, "compressok");
rawok = nvlist_exists(innvl, "rawok");
error = nvlist_lookup_string(innvl, "from", &fromname);
if (error == 0) {
if (strchr(fromname, '@') != NULL) {
/*
* If from is a snapshot, hold it and use the more
* efficient dmu_send_estimate to estimate send space
* size using deadlists.
*/
dsl_dataset_t *fromsnap;
error = dsl_dataset_hold(dp, fromname, FTAG, &fromsnap);
if (error != 0)
goto out;
error = dmu_send_estimate(tosnap, fromsnap,
compressok || rawok, &space);
dsl_dataset_rele(fromsnap, FTAG);
} else if (strchr(fromname, '#') != NULL) {
/*
* If from is a bookmark, fetch the creation TXG of the
* snapshot it was created from and use that to find
* blocks that were born after it.
*/
zfs_bookmark_phys_t frombm;
error = dsl_bookmark_lookup(dp, fromname, tosnap,
&frombm);
if (error != 0)
goto out;
error = dmu_send_estimate_from_txg(tosnap,
frombm.zbm_creation_txg, compressok || rawok,
&space);
} else {
/*
* from is not properly formatted as a snapshot or
* bookmark
*/
error = SET_ERROR(EINVAL);
goto out;
}
} else {
/*
* If estimating the size of a full send, use dmu_send_estimate.
*/
error = dmu_send_estimate(tosnap, NULL, compressok || rawok,
&space);
}
fnvlist_add_uint64(outnvl, "space", space);
out:
dsl_dataset_rele(tosnap, FTAG);
dsl_pool_rele(dp, FTAG);
return (error);
}
/*
* Sync the currently open TXG to disk for the specified pool.
* This is somewhat similar to 'zfs_sync()'.
* For cases that do not result in error this ioctl will wait for
* the currently open TXG to commit before returning back to the caller.
*
* innvl: {
* "force" -> when true, force uberblock update even if there is no dirty data.
* In addition this will cause the vdev configuration to be written
* out including updating the zpool cache file. (boolean_t)
* }
*
* onvl is unused
*/
/* ARGSUSED */
static int
zfs_ioc_pool_sync(const char *pool, nvlist_t *innvl, nvlist_t *onvl)
{
int err;
boolean_t force = B_FALSE;
spa_t *spa;
if ((err = spa_open(pool, &spa, FTAG)) != 0)
return (err);
if (innvl) {
if (nvlist_lookup_boolean_value(innvl, "force", &force) != 0) {
err = SET_ERROR(EINVAL);
goto out;
}
}
if (force) {
spa_config_enter(spa, SCL_CONFIG, FTAG, RW_WRITER);
vdev_config_dirty(spa->spa_root_vdev);
spa_config_exit(spa, SCL_CONFIG, FTAG);
}
txg_wait_synced(spa_get_dsl(spa), 0);
out:
spa_close(spa, FTAG);
return (err);
}
/*
* Load a user's wrapping key into the kernel.
* innvl: {
* "hidden_args" -> { "wkeydata" -> value }
* raw uint8_t array of encryption wrapping key data (32 bytes)
* (optional) "noop" -> (value ignored)
* presence indicated key should only be verified, not loaded
* }
*/
/* ARGSUSED */
static int
zfs_ioc_load_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
{
int ret;
dsl_crypto_params_t *dcp = NULL;
nvlist_t *hidden_args;
boolean_t noop = nvlist_exists(innvl, "noop");
if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
ret = SET_ERROR(EINVAL);
goto error;
}
ret = nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
if (ret != 0) {
ret = SET_ERROR(EINVAL);
goto error;
}
ret = dsl_crypto_params_create_nvlist(DCP_CMD_NONE, NULL,
hidden_args, &dcp);
if (ret != 0)
goto error;
ret = spa_keystore_load_wkey(dsname, dcp, noop);
if (ret != 0)
goto error;
dsl_crypto_params_free(dcp, noop);
return (0);
error:
dsl_crypto_params_free(dcp, B_TRUE);
return (ret);
}
/*
* Unload a user's wrapping key from the kernel.
* Both innvl and outnvl are unused.
*/
/* ARGSUSED */
static int
zfs_ioc_unload_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
{
int ret = 0;
if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
ret = (SET_ERROR(EINVAL));
goto out;
}
ret = spa_keystore_unload_wkey(dsname);
if (ret != 0)
goto out;
out:
return (ret);
}
/*
* Changes a user's wrapping key used to decrypt a dataset. The keyformat,
* keylocation, pbkdf2salt, and pbkdf2iters properties can also be specified
* here to change how the key is derived in userspace.
*
* innvl: {
* "hidden_args" (optional) -> { "wkeydata" -> value }
* raw uint8_t array of new encryption wrapping key data (32 bytes)
* "props" (optional) -> { prop -> value }
* }
*
* outnvl is unused
*/
/* ARGSUSED */
static int
zfs_ioc_change_key(const char *dsname, nvlist_t *innvl, nvlist_t *outnvl)
{
int ret;
uint64_t cmd = DCP_CMD_NONE;
dsl_crypto_params_t *dcp = NULL;
nvlist_t *args = NULL, *hidden_args = NULL;
if (strchr(dsname, '@') != NULL || strchr(dsname, '%') != NULL) {
ret = (SET_ERROR(EINVAL));
goto error;
}
(void) nvlist_lookup_uint64(innvl, "crypt_cmd", &cmd);
(void) nvlist_lookup_nvlist(innvl, "props", &args);
(void) nvlist_lookup_nvlist(innvl, ZPOOL_HIDDEN_ARGS, &hidden_args);
ret = dsl_crypto_params_create_nvlist(cmd, args, hidden_args, &dcp);
if (ret != 0)
goto error;
ret = spa_keystore_change_key(dsname, dcp);
if (ret != 0)
goto error;
dsl_crypto_params_free(dcp, B_FALSE);
return (0);
error:
dsl_crypto_params_free(dcp, B_TRUE);
return (ret);
}
static zfs_ioc_vec_t zfs_ioc_vec[ZFS_IOC_LAST - ZFS_IOC_FIRST];
static void
zfs_ioctl_register_legacy(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck,
boolean_t log_history, zfs_ioc_poolcheck_t pool_check)
{
zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST];
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
ASSERT3P(vec->zvec_legacy_func, ==, NULL);
ASSERT3P(vec->zvec_func, ==, NULL);
vec->zvec_legacy_func = func;
vec->zvec_secpolicy = secpolicy;
vec->zvec_namecheck = namecheck;
vec->zvec_allow_log = log_history;
vec->zvec_pool_check = pool_check;
}
/*
* See the block comment at the beginning of this file for details on
* each argument to this function.
*/
static void
zfs_ioctl_register(const char *name, zfs_ioc_t ioc, zfs_ioc_func_t *func,
zfs_secpolicy_func_t *secpolicy, zfs_ioc_namecheck_t namecheck,
zfs_ioc_poolcheck_t pool_check, boolean_t smush_outnvlist,
boolean_t allow_log)
{
zfs_ioc_vec_t *vec = &zfs_ioc_vec[ioc - ZFS_IOC_FIRST];
ASSERT3U(ioc, >=, ZFS_IOC_FIRST);
ASSERT3U(ioc, <, ZFS_IOC_LAST);
ASSERT3P(vec->zvec_legacy_func, ==, NULL);
ASSERT3P(vec->zvec_func, ==, NULL);
/* if we are logging, the name must be valid */
ASSERT(!allow_log || namecheck != NO_NAME);
vec->zvec_name = name;
vec->zvec_func = func;
vec->zvec_secpolicy = secpolicy;
vec->zvec_namecheck = namecheck;
vec->zvec_pool_check = pool_check;
vec->zvec_smush_outnvlist = smush_outnvlist;
vec->zvec_allow_log = allow_log;
}
static void
zfs_ioctl_register_pool(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy, boolean_t log_history,
zfs_ioc_poolcheck_t pool_check)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
POOL_NAME, log_history, pool_check);
}
static void
zfs_ioctl_register_dataset_nolog(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy, zfs_ioc_poolcheck_t pool_check)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
DATASET_NAME, B_FALSE, pool_check);
}
static void
zfs_ioctl_register_pool_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func)
{
zfs_ioctl_register_legacy(ioc, func, zfs_secpolicy_config,
POOL_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
}
static void
zfs_ioctl_register_pool_meta(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
NO_NAME, B_FALSE, POOL_CHECK_NONE);
}
static void
zfs_ioctl_register_dataset_read_secpolicy(zfs_ioc_t ioc,
zfs_ioc_legacy_func_t *func, zfs_secpolicy_func_t *secpolicy)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
DATASET_NAME, B_FALSE, POOL_CHECK_SUSPENDED);
}
static void
zfs_ioctl_register_dataset_read(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func)
{
zfs_ioctl_register_dataset_read_secpolicy(ioc, func,
zfs_secpolicy_read);
}
static void
zfs_ioctl_register_dataset_modify(zfs_ioc_t ioc, zfs_ioc_legacy_func_t *func,
zfs_secpolicy_func_t *secpolicy)
{
zfs_ioctl_register_legacy(ioc, func, secpolicy,
DATASET_NAME, B_TRUE, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
}
static void
zfs_ioctl_init(void)
{
zfs_ioctl_register("snapshot", ZFS_IOC_SNAPSHOT,
zfs_ioc_snapshot, zfs_secpolicy_snapshot, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("log_history", ZFS_IOC_LOG_HISTORY,
zfs_ioc_log_history, zfs_secpolicy_log_history, NO_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE);
zfs_ioctl_register("space_snaps", ZFS_IOC_SPACE_SNAPS,
zfs_ioc_space_snaps, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE);
zfs_ioctl_register("send", ZFS_IOC_SEND_NEW,
zfs_ioc_send_new, zfs_secpolicy_send_new, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE);
zfs_ioctl_register("send_space", ZFS_IOC_SEND_SPACE,
zfs_ioc_send_space, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE);
zfs_ioctl_register("create", ZFS_IOC_CREATE,
zfs_ioc_create, zfs_secpolicy_create_clone, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("clone", ZFS_IOC_CLONE,
zfs_ioc_clone, zfs_secpolicy_create_clone, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
+ zfs_ioctl_register("remap", ZFS_IOC_REMAP,
+ zfs_ioc_remap, zfs_secpolicy_remap, DATASET_NAME,
+ POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE);
+
zfs_ioctl_register("destroy_snaps", ZFS_IOC_DESTROY_SNAPS,
zfs_ioc_destroy_snaps, zfs_secpolicy_destroy_snaps, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("hold", ZFS_IOC_HOLD,
zfs_ioc_hold, zfs_secpolicy_hold, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("release", ZFS_IOC_RELEASE,
zfs_ioc_release, zfs_secpolicy_release, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("get_holds", ZFS_IOC_GET_HOLDS,
zfs_ioc_get_holds, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE);
zfs_ioctl_register("rollback", ZFS_IOC_ROLLBACK,
zfs_ioc_rollback, zfs_secpolicy_rollback, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_TRUE);
zfs_ioctl_register("bookmark", ZFS_IOC_BOOKMARK,
zfs_ioc_bookmark, zfs_secpolicy_bookmark, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("get_bookmarks", ZFS_IOC_GET_BOOKMARKS,
zfs_ioc_get_bookmarks, zfs_secpolicy_read, DATASET_NAME,
POOL_CHECK_SUSPENDED, B_FALSE, B_FALSE);
zfs_ioctl_register("destroy_bookmarks", ZFS_IOC_DESTROY_BOOKMARKS,
zfs_ioc_destroy_bookmarks, zfs_secpolicy_destroy_bookmarks,
POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("receive", ZFS_IOC_RECV_NEW,
zfs_ioc_recv_new, zfs_secpolicy_recv_new, DATASET_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE, B_TRUE);
zfs_ioctl_register("load-key", ZFS_IOC_LOAD_KEY,
zfs_ioc_load_key, zfs_secpolicy_load_key,
DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE);
zfs_ioctl_register("unload-key", ZFS_IOC_UNLOAD_KEY,
zfs_ioc_unload_key, zfs_secpolicy_load_key,
DATASET_NAME, POOL_CHECK_SUSPENDED, B_TRUE, B_TRUE);
zfs_ioctl_register("change-key", ZFS_IOC_CHANGE_KEY,
zfs_ioc_change_key, zfs_secpolicy_change_key,
DATASET_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY,
B_TRUE, B_TRUE);
zfs_ioctl_register("sync", ZFS_IOC_POOL_SYNC,
zfs_ioc_pool_sync, zfs_secpolicy_none, POOL_NAME,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_FALSE, B_FALSE);
zfs_ioctl_register("reopen", ZFS_IOC_POOL_REOPEN, zfs_ioc_pool_reopen,
zfs_secpolicy_config, POOL_NAME, POOL_CHECK_SUSPENDED, B_TRUE,
B_TRUE);
zfs_ioctl_register("channel_program", ZFS_IOC_CHANNEL_PROGRAM,
zfs_ioc_channel_program, zfs_secpolicy_config,
POOL_NAME, POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY, B_TRUE,
B_TRUE);
/* IOCTLS that use the legacy function signature */
zfs_ioctl_register_legacy(ZFS_IOC_POOL_FREEZE, zfs_ioc_pool_freeze,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_READONLY);
zfs_ioctl_register_pool(ZFS_IOC_POOL_CREATE, zfs_ioc_pool_create,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SCAN,
zfs_ioc_pool_scan);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_UPGRADE,
zfs_ioc_pool_upgrade);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ADD,
zfs_ioc_vdev_add);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_REMOVE,
zfs_ioc_vdev_remove);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SET_STATE,
zfs_ioc_vdev_set_state);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_ATTACH,
zfs_ioc_vdev_attach);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_DETACH,
zfs_ioc_vdev_detach);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETPATH,
zfs_ioc_vdev_setpath);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SETFRU,
zfs_ioc_vdev_setfru);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_SET_PROPS,
zfs_ioc_pool_set_props);
zfs_ioctl_register_pool_modify(ZFS_IOC_VDEV_SPLIT,
zfs_ioc_vdev_split);
zfs_ioctl_register_pool_modify(ZFS_IOC_POOL_REGUID,
zfs_ioc_pool_reguid);
zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_CONFIGS,
zfs_ioc_pool_configs, zfs_secpolicy_none);
zfs_ioctl_register_pool_meta(ZFS_IOC_POOL_TRYIMPORT,
zfs_ioc_pool_tryimport, zfs_secpolicy_config);
zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_FAULT,
zfs_ioc_inject_fault, zfs_secpolicy_inject);
zfs_ioctl_register_pool_meta(ZFS_IOC_CLEAR_FAULT,
zfs_ioc_clear_fault, zfs_secpolicy_inject);
zfs_ioctl_register_pool_meta(ZFS_IOC_INJECT_LIST_NEXT,
zfs_ioc_inject_list_next, zfs_secpolicy_inject);
/*
* pool destroy, and export don't log the history as part of
* zfsdev_ioctl, but rather zfs_ioc_pool_export
* does the logging of those commands.
*/
zfs_ioctl_register_pool(ZFS_IOC_POOL_DESTROY, zfs_ioc_pool_destroy,
zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_EXPORT, zfs_ioc_pool_export,
zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_STATS, zfs_ioc_pool_stats,
zfs_secpolicy_read, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_pool(ZFS_IOC_POOL_GET_PROPS, zfs_ioc_pool_get_props,
zfs_secpolicy_read, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_pool(ZFS_IOC_ERROR_LOG, zfs_ioc_error_log,
zfs_secpolicy_inject, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_DSOBJ_TO_DSNAME,
zfs_ioc_dsobj_to_dsname,
zfs_secpolicy_diff, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_GET_HISTORY,
zfs_ioc_pool_get_history,
zfs_secpolicy_config, B_FALSE, POOL_CHECK_SUSPENDED);
zfs_ioctl_register_pool(ZFS_IOC_POOL_IMPORT, zfs_ioc_pool_import,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_NONE);
zfs_ioctl_register_pool(ZFS_IOC_CLEAR, zfs_ioc_clear,
zfs_secpolicy_config, B_TRUE, POOL_CHECK_READONLY);
zfs_ioctl_register_dataset_read(ZFS_IOC_SPACE_WRITTEN,
zfs_ioc_space_written);
zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_RECVD_PROPS,
zfs_ioc_objset_recvd_props);
zfs_ioctl_register_dataset_read(ZFS_IOC_NEXT_OBJ,
zfs_ioc_next_obj);
zfs_ioctl_register_dataset_read(ZFS_IOC_GET_FSACL,
zfs_ioc_get_fsacl);
zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_STATS,
zfs_ioc_objset_stats);
zfs_ioctl_register_dataset_read(ZFS_IOC_OBJSET_ZPLPROPS,
zfs_ioc_objset_zplprops);
zfs_ioctl_register_dataset_read(ZFS_IOC_DATASET_LIST_NEXT,
zfs_ioc_dataset_list_next);
zfs_ioctl_register_dataset_read(ZFS_IOC_SNAPSHOT_LIST_NEXT,
zfs_ioc_snapshot_list_next);
zfs_ioctl_register_dataset_read(ZFS_IOC_SEND_PROGRESS,
zfs_ioc_send_progress);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_DIFF,
zfs_ioc_diff, zfs_secpolicy_diff);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_STATS,
zfs_ioc_obj_to_stats, zfs_secpolicy_diff);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_OBJ_TO_PATH,
zfs_ioc_obj_to_path, zfs_secpolicy_diff);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_ONE,
zfs_ioc_userspace_one, zfs_secpolicy_userspace_one);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_USERSPACE_MANY,
zfs_ioc_userspace_many, zfs_secpolicy_userspace_many);
zfs_ioctl_register_dataset_read_secpolicy(ZFS_IOC_SEND,
zfs_ioc_send, zfs_secpolicy_send);
zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_PROP, zfs_ioc_set_prop,
zfs_secpolicy_none);
zfs_ioctl_register_dataset_modify(ZFS_IOC_DESTROY, zfs_ioc_destroy,
zfs_secpolicy_destroy);
zfs_ioctl_register_dataset_modify(ZFS_IOC_RENAME, zfs_ioc_rename,
zfs_secpolicy_rename);
zfs_ioctl_register_dataset_modify(ZFS_IOC_RECV, zfs_ioc_recv,
zfs_secpolicy_recv);
zfs_ioctl_register_dataset_modify(ZFS_IOC_PROMOTE, zfs_ioc_promote,
zfs_secpolicy_promote);
zfs_ioctl_register_dataset_modify(ZFS_IOC_INHERIT_PROP,
zfs_ioc_inherit_prop, zfs_secpolicy_inherit_prop);
zfs_ioctl_register_dataset_modify(ZFS_IOC_SET_FSACL, zfs_ioc_set_fsacl,
zfs_secpolicy_set_fsacl);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_SHARE, zfs_ioc_share,
zfs_secpolicy_share, POOL_CHECK_NONE);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_SMB_ACL, zfs_ioc_smb_acl,
zfs_secpolicy_smb_acl, POOL_CHECK_NONE);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_USERSPACE_UPGRADE,
zfs_ioc_userspace_upgrade, zfs_secpolicy_userspace_upgrade,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
zfs_ioctl_register_dataset_nolog(ZFS_IOC_TMP_SNAPSHOT,
zfs_ioc_tmp_snapshot, zfs_secpolicy_tmp_snapshot,
POOL_CHECK_SUSPENDED | POOL_CHECK_READONLY);
/*
* ZoL functions
*/
zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_NEXT, zfs_ioc_events_next,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_CLEAR, zfs_ioc_events_clear,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
zfs_ioctl_register_legacy(ZFS_IOC_EVENTS_SEEK, zfs_ioc_events_seek,
zfs_secpolicy_config, NO_NAME, B_FALSE, POOL_CHECK_NONE);
}
int
pool_status_check(const char *name, zfs_ioc_namecheck_t type,
zfs_ioc_poolcheck_t check)
{
spa_t *spa;
int error;
ASSERT(type == POOL_NAME || type == DATASET_NAME);
if (check & POOL_CHECK_NONE)
return (0);
error = spa_open(name, &spa, FTAG);
if (error == 0) {
if ((check & POOL_CHECK_SUSPENDED) && spa_suspended(spa))
error = SET_ERROR(EAGAIN);
else if ((check & POOL_CHECK_READONLY) && !spa_writeable(spa))
error = SET_ERROR(EROFS);
spa_close(spa, FTAG);
}
return (error);
}
static void *
zfsdev_get_state_impl(minor_t minor, enum zfsdev_state_type which)
{
zfsdev_state_t *zs;
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == minor) {
smp_rmb();
switch (which) {
case ZST_ONEXIT:
return (zs->zs_onexit);
case ZST_ZEVENT:
return (zs->zs_zevent);
case ZST_ALL:
return (zs);
}
}
}
return (NULL);
}
void *
zfsdev_get_state(minor_t minor, enum zfsdev_state_type which)
{
void *ptr;
ptr = zfsdev_get_state_impl(minor, which);
return (ptr);
}
int
zfsdev_getminor(struct file *filp, minor_t *minorp)
{
zfsdev_state_t *zs, *fpd;
ASSERT(filp != NULL);
ASSERT(!MUTEX_HELD(&zfsdev_state_lock));
fpd = filp->private_data;
if (fpd == NULL)
return (SET_ERROR(EBADF));
mutex_enter(&zfsdev_state_lock);
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == -1)
continue;
if (fpd == zs) {
*minorp = fpd->zs_minor;
mutex_exit(&zfsdev_state_lock);
return (0);
}
}
mutex_exit(&zfsdev_state_lock);
return (SET_ERROR(EBADF));
}
/*
* Find a free minor number. The zfsdev_state_list is expected to
* be short since it is only a list of currently open file handles.
*/
minor_t
zfsdev_minor_alloc(void)
{
static minor_t last_minor = 0;
minor_t m;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
for (m = last_minor + 1; m != last_minor; m++) {
if (m > ZFSDEV_MAX_MINOR)
m = 1;
if (zfsdev_get_state_impl(m, ZST_ALL) == NULL) {
last_minor = m;
return (m);
}
}
return (0);
}
static int
zfsdev_state_init(struct file *filp)
{
zfsdev_state_t *zs, *zsprev = NULL;
minor_t minor;
boolean_t newzs = B_FALSE;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
minor = zfsdev_minor_alloc();
if (minor == 0)
return (SET_ERROR(ENXIO));
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zs->zs_minor == -1)
break;
zsprev = zs;
}
if (!zs) {
zs = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
newzs = B_TRUE;
}
zs->zs_file = filp;
filp->private_data = zs;
zfs_onexit_init((zfs_onexit_t **)&zs->zs_onexit);
zfs_zevent_init((zfs_zevent_t **)&zs->zs_zevent);
/*
* In order to provide for lock-free concurrent read access
* to the minor list in zfsdev_get_state_impl(), new entries
* must be completely written before linking them into the
* list whereas existing entries are already linked; the last
* operation must be updating zs_minor (from -1 to the new
* value).
*/
if (newzs) {
zs->zs_minor = minor;
smp_wmb();
zsprev->zs_next = zs;
} else {
smp_wmb();
zs->zs_minor = minor;
}
return (0);
}
static int
zfsdev_state_destroy(struct file *filp)
{
zfsdev_state_t *zs;
ASSERT(MUTEX_HELD(&zfsdev_state_lock));
ASSERT(filp->private_data != NULL);
zs = filp->private_data;
zs->zs_minor = -1;
zfs_onexit_destroy(zs->zs_onexit);
zfs_zevent_destroy(zs->zs_zevent);
return (0);
}
static int
zfsdev_open(struct inode *ino, struct file *filp)
{
int error;
mutex_enter(&zfsdev_state_lock);
error = zfsdev_state_init(filp);
mutex_exit(&zfsdev_state_lock);
return (-error);
}
static int
zfsdev_release(struct inode *ino, struct file *filp)
{
int error;
mutex_enter(&zfsdev_state_lock);
error = zfsdev_state_destroy(filp);
mutex_exit(&zfsdev_state_lock);
return (-error);
}
static long
zfsdev_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
{
zfs_cmd_t *zc;
uint_t vecnum;
int error, rc, flag = 0;
const zfs_ioc_vec_t *vec;
char *saved_poolname = NULL;
nvlist_t *innvl = NULL;
fstrans_cookie_t cookie;
vecnum = cmd - ZFS_IOC_FIRST;
if (vecnum >= sizeof (zfs_ioc_vec) / sizeof (zfs_ioc_vec[0]))
return (-SET_ERROR(EINVAL));
vec = &zfs_ioc_vec[vecnum];
/*
* The registered ioctl list may be sparse, verify that either
* a normal or legacy handler are registered.
*/
if (vec->zvec_func == NULL && vec->zvec_legacy_func == NULL)
return (-SET_ERROR(EINVAL));
zc = kmem_zalloc(sizeof (zfs_cmd_t), KM_SLEEP);
error = ddi_copyin((void *)arg, zc, sizeof (zfs_cmd_t), flag);
if (error != 0) {
error = SET_ERROR(EFAULT);
goto out;
}
zc->zc_iflags = flag & FKIOCTL;
if (zc->zc_nvlist_src_size > MAX_NVLIST_SRC_SIZE) {
/*
* Make sure the user doesn't pass in an insane value for
* zc_nvlist_src_size. We have to check, since we will end
* up allocating that much memory inside of get_nvlist(). This
* prevents a nefarious user from allocating tons of kernel
* memory.
*
* Also, we return EINVAL instead of ENOMEM here. The reason
* being that returning ENOMEM from an ioctl() has a special
* connotation; that the user's size value is too small and
* needs to be expanded to hold the nvlist. See
* zcmd_expand_dst_nvlist() for details.
*/
error = SET_ERROR(EINVAL); /* User's size too big */
} else if (zc->zc_nvlist_src_size != 0) {
error = get_nvlist(zc->zc_nvlist_src, zc->zc_nvlist_src_size,
zc->zc_iflags, &innvl);
if (error != 0)
goto out;
}
/*
* Ensure that all pool/dataset names are valid before we pass down to
* the lower layers.
*/
zc->zc_name[sizeof (zc->zc_name) - 1] = '\0';
switch (vec->zvec_namecheck) {
case POOL_NAME:
if (pool_namecheck(zc->zc_name, NULL, NULL) != 0)
error = SET_ERROR(EINVAL);
else
error = pool_status_check(zc->zc_name,
vec->zvec_namecheck, vec->zvec_pool_check);
break;
case DATASET_NAME:
if (dataset_namecheck(zc->zc_name, NULL, NULL) != 0)
error = SET_ERROR(EINVAL);
else
error = pool_status_check(zc->zc_name,
vec->zvec_namecheck, vec->zvec_pool_check);
break;
case NO_NAME:
break;
}
if (error == 0) {
cookie = spl_fstrans_mark();
error = vec->zvec_secpolicy(zc, innvl, CRED());
spl_fstrans_unmark(cookie);
}
if (error != 0)
goto out;
/* legacy ioctls can modify zc_name */
saved_poolname = strdup(zc->zc_name);
if (saved_poolname == NULL) {
error = SET_ERROR(ENOMEM);
goto out;
} else {
saved_poolname[strcspn(saved_poolname, "/@#")] = '\0';
}
if (vec->zvec_func != NULL) {
nvlist_t *outnvl;
int puterror = 0;
spa_t *spa;
nvlist_t *lognv = NULL;
ASSERT(vec->zvec_legacy_func == NULL);
/*
* Add the innvl to the lognv before calling the func,
* in case the func changes the innvl.
*/
if (vec->zvec_allow_log) {
lognv = fnvlist_alloc();
fnvlist_add_string(lognv, ZPOOL_HIST_IOCTL,
vec->zvec_name);
if (!nvlist_empty(innvl)) {
fnvlist_add_nvlist(lognv, ZPOOL_HIST_INPUT_NVL,
innvl);
}
}
outnvl = fnvlist_alloc();
cookie = spl_fstrans_mark();
error = vec->zvec_func(zc->zc_name, innvl, outnvl);
spl_fstrans_unmark(cookie);
/*
* Some commands can partially execute, modify state, and still
* return an error. In these cases, attempt to record what
* was modified.
*/
if ((error == 0 ||
(cmd == ZFS_IOC_CHANNEL_PROGRAM && error != EINVAL)) &&
vec->zvec_allow_log &&
spa_open(zc->zc_name, &spa, FTAG) == 0) {
if (!nvlist_empty(outnvl)) {
fnvlist_add_nvlist(lognv, ZPOOL_HIST_OUTPUT_NVL,
outnvl);
}
if (error != 0) {
fnvlist_add_int64(lognv, ZPOOL_HIST_ERRNO,
error);
}
(void) spa_history_log_nvl(spa, lognv);
spa_close(spa, FTAG);
}
fnvlist_free(lognv);
if (!nvlist_empty(outnvl) || zc->zc_nvlist_dst_size != 0) {
int smusherror = 0;
if (vec->zvec_smush_outnvlist) {
smusherror = nvlist_smush(outnvl,
zc->zc_nvlist_dst_size);
}
if (smusherror == 0)
puterror = put_nvlist(zc, outnvl);
}
if (puterror != 0)
error = puterror;
nvlist_free(outnvl);
} else {
cookie = spl_fstrans_mark();
error = vec->zvec_legacy_func(zc);
spl_fstrans_unmark(cookie);
}
out:
nvlist_free(innvl);
rc = ddi_copyout(zc, (void *)arg, sizeof (zfs_cmd_t), flag);
if (error == 0 && rc != 0)
error = SET_ERROR(EFAULT);
if (error == 0 && vec->zvec_allow_log) {
char *s = tsd_get(zfs_allow_log_key);
if (s != NULL)
strfree(s);
(void) tsd_set(zfs_allow_log_key, saved_poolname);
} else {
if (saved_poolname != NULL)
strfree(saved_poolname);
}
kmem_free(zc, sizeof (zfs_cmd_t));
return (-error);
}
#ifdef CONFIG_COMPAT
static long
zfsdev_compat_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
{
return (zfsdev_ioctl(filp, cmd, arg));
}
#else
#define zfsdev_compat_ioctl NULL
#endif
static const struct file_operations zfsdev_fops = {
.open = zfsdev_open,
.release = zfsdev_release,
.unlocked_ioctl = zfsdev_ioctl,
.compat_ioctl = zfsdev_compat_ioctl,
.owner = THIS_MODULE,
};
static struct miscdevice zfs_misc = {
.minor = ZFS_MINOR,
.name = ZFS_DRIVER,
.fops = &zfsdev_fops,
};
MODULE_ALIAS_MISCDEV(ZFS_MINOR);
MODULE_ALIAS("devname:zfs");
static int
zfs_attach(void)
{
int error;
mutex_init(&zfsdev_state_lock, NULL, MUTEX_DEFAULT, NULL);
zfsdev_state_list = kmem_zalloc(sizeof (zfsdev_state_t), KM_SLEEP);
zfsdev_state_list->zs_minor = -1;
error = misc_register(&zfs_misc);
if (error == -EBUSY) {
/*
* Fallback to dynamic minor allocation in the event of a
* collision with a reserved minor in linux/miscdevice.h.
* In this case the kernel modules must be manually loaded.
*/
printk(KERN_INFO "ZFS: misc_register() with static minor %d "
"failed %d, retrying with MISC_DYNAMIC_MINOR\n",
ZFS_MINOR, error);
zfs_misc.minor = MISC_DYNAMIC_MINOR;
error = misc_register(&zfs_misc);
}
if (error)
printk(KERN_INFO "ZFS: misc_register() failed %d\n", error);
return (error);
}
static void
zfs_detach(void)
{
zfsdev_state_t *zs, *zsprev = NULL;
misc_deregister(&zfs_misc);
mutex_destroy(&zfsdev_state_lock);
for (zs = zfsdev_state_list; zs != NULL; zs = zs->zs_next) {
if (zsprev)
kmem_free(zsprev, sizeof (zfsdev_state_t));
zsprev = zs;
}
if (zsprev)
kmem_free(zsprev, sizeof (zfsdev_state_t));
}
static void
zfs_allow_log_destroy(void *arg)
{
char *poolname = arg;
if (poolname != NULL)
strfree(poolname);
}
#ifdef DEBUG
#define ZFS_DEBUG_STR " (DEBUG mode)"
#else
#define ZFS_DEBUG_STR ""
#endif
static int __init
_init(void)
{
int error;
error = -vn_set_pwd("/");
if (error) {
printk(KERN_NOTICE
"ZFS: Warning unable to set pwd to '/': %d\n", error);
return (error);
}
if ((error = -zvol_init()) != 0)
return (error);
spa_init(FREAD | FWRITE);
zfs_init();
zfs_ioctl_init();
if ((error = zfs_attach()) != 0)
goto out;
tsd_create(&zfs_fsyncer_key, NULL);
tsd_create(&rrw_tsd_key, rrw_tsd_destroy);
tsd_create(&zfs_allow_log_key, zfs_allow_log_destroy);
printk(KERN_NOTICE "ZFS: Loaded module v%s-%s%s, "
"ZFS pool version %s, ZFS filesystem version %s\n",
ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR,
SPA_VERSION_STRING, ZPL_VERSION_STRING);
#ifndef CONFIG_FS_POSIX_ACL
printk(KERN_NOTICE "ZFS: Posix ACLs disabled by kernel\n");
#endif /* CONFIG_FS_POSIX_ACL */
return (0);
out:
zfs_fini();
spa_fini();
(void) zvol_fini();
printk(KERN_NOTICE "ZFS: Failed to Load ZFS Filesystem v%s-%s%s"
", rc = %d\n", ZFS_META_VERSION, ZFS_META_RELEASE,
ZFS_DEBUG_STR, error);
return (error);
}
static void __exit
_fini(void)
{
zfs_detach();
zfs_fini();
spa_fini();
zvol_fini();
tsd_destroy(&zfs_fsyncer_key);
tsd_destroy(&rrw_tsd_key);
tsd_destroy(&zfs_allow_log_key);
printk(KERN_NOTICE "ZFS: Unloaded module v%s-%s%s\n",
ZFS_META_VERSION, ZFS_META_RELEASE, ZFS_DEBUG_STR);
}
#ifdef HAVE_SPL
module_init(_init);
module_exit(_fini);
MODULE_DESCRIPTION("ZFS");
MODULE_AUTHOR(ZFS_META_AUTHOR);
MODULE_LICENSE(ZFS_META_LICENSE);
MODULE_VERSION(ZFS_META_VERSION "-" ZFS_META_RELEASE);
#endif /* HAVE_SPL */
diff --git a/module/zfs/zfs_vnops.c b/module/zfs/zfs_vnops.c
index fd8debdcf382..f7f2f6167b03 100644
--- a/module/zfs/zfs_vnops.c
+++ b/module/zfs/zfs_vnops.c
@@ -1,5223 +1,5235 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2012, 2015 by Delphix. All rights reserved.
* Copyright (c) 2015 by Chunwei Chen. All rights reserved.
* Copyright 2017 Nexenta Systems, Inc.
*/
/* Portions Copyright 2007 Jeremy Teo */
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/types.h>
#include <sys/param.h>
#include <sys/time.h>
#include <sys/systm.h>
#include <sys/sysmacros.h>
#include <sys/resource.h>
#include <sys/vfs.h>
#include <sys/vfs_opreg.h>
#include <sys/file.h>
#include <sys/stat.h>
#include <sys/kmem.h>
#include <sys/taskq.h>
#include <sys/uio.h>
#include <sys/vmsystm.h>
#include <sys/atomic.h>
#include <vm/pvn.h>
#include <sys/pathname.h>
#include <sys/cmn_err.h>
#include <sys/errno.h>
#include <sys/unistd.h>
#include <sys/zfs_dir.h>
#include <sys/zfs_acl.h>
#include <sys/zfs_ioctl.h>
#include <sys/fs/zfs.h>
#include <sys/dmu.h>
#include <sys/dmu_objset.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/dbuf.h>
#include <sys/zap.h>
#include <sys/sa.h>
#include <sys/dirent.h>
#include <sys/policy.h>
#include <sys/sunddi.h>
#include <sys/sid.h>
#include <sys/mode.h>
#include "fs/fs_subr.h"
#include <sys/zfs_ctldir.h>
#include <sys/zfs_fuid.h>
#include <sys/zfs_sa.h>
#include <sys/zfs_vnops.h>
#include <sys/dnlc.h>
#include <sys/zfs_rlock.h>
#include <sys/extdirent.h>
#include <sys/kidmap.h>
#include <sys/cred.h>
#include <sys/attr.h>
#include <sys/zpl.h>
#include <sys/zil.h>
#include <sys/sa_impl.h>
/*
* Programming rules.
*
* Each vnode op performs some logical unit of work. To do this, the ZPL must
* properly lock its in-core state, create a DMU transaction, do the work,
* record this work in the intent log (ZIL), commit the DMU transaction,
* and wait for the intent log to commit if it is a synchronous operation.
* Moreover, the vnode ops must work in both normal and log replay context.
* The ordering of events is important to avoid deadlocks and references
* to freed memory. The example below illustrates the following Big Rules:
*
* (1) A check must be made in each zfs thread for a mounted file system.
* This is done avoiding races using ZFS_ENTER(zfsvfs).
* A ZFS_EXIT(zfsvfs) is needed before all returns. Any znodes
* must be checked with ZFS_VERIFY_ZP(zp). Both of these macros
* can return EIO from the calling function.
*
* (2) iput() should always be the last thing except for zil_commit()
* (if necessary) and ZFS_EXIT(). This is for 3 reasons:
* First, if it's the last reference, the vnode/znode
* can be freed, so the zp may point to freed memory. Second, the last
* reference will call zfs_zinactive(), which may induce a lot of work --
* pushing cached pages (which acquires range locks) and syncing out
* cached atime changes. Third, zfs_zinactive() may require a new tx,
* which could deadlock the system if you were already holding one.
* If you must call iput() within a tx then use zfs_iput_async().
*
* (3) All range locks must be grabbed before calling dmu_tx_assign(),
* as they can span dmu_tx_assign() calls.
*
* (4) If ZPL locks are held, pass TXG_NOWAIT as the second argument to
* dmu_tx_assign(). This is critical because we don't want to block
* while holding locks.
*
* If no ZPL locks are held (aside from ZFS_ENTER()), use TXG_WAIT. This
* reduces lock contention and CPU usage when we must wait (note that if
* throughput is constrained by the storage, nearly every transaction
* must wait).
*
* Note, in particular, that if a lock is sometimes acquired before
* the tx assigns, and sometimes after (e.g. z_lock), then failing
* to use a non-blocking assign can deadlock the system. The scenario:
*
* Thread A has grabbed a lock before calling dmu_tx_assign().
* Thread B is in an already-assigned tx, and blocks for this lock.
* Thread A calls dmu_tx_assign(TXG_WAIT) and blocks in txg_wait_open()
* forever, because the previous txg can't quiesce until B's tx commits.
*
* If dmu_tx_assign() returns ERESTART and zfsvfs->z_assign is TXG_NOWAIT,
* then drop all locks, call dmu_tx_wait(), and try again. On subsequent
* calls to dmu_tx_assign(), pass TXG_NOTHROTTLE in addition to TXG_NOWAIT,
* to indicate that this operation has already called dmu_tx_wait().
* This will ensure that we don't retry forever, waiting a short bit
* each time.
*
* (5) If the operation succeeded, generate the intent log entry for it
* before dropping locks. This ensures that the ordering of events
* in the intent log matches the order in which they actually occurred.
* During ZIL replay the zfs_log_* functions will update the sequence
* number to indicate the zil transaction has replayed.
*
* (6) At the end of each vnode op, the DMU tx must always commit,
* regardless of whether there were any errors.
*
* (7) After dropping all locks, invoke zil_commit(zilog, foid)
* to ensure that synchronous semantics are provided when necessary.
*
* In general, this is how things should be ordered in each vnode op:
*
* ZFS_ENTER(zfsvfs); // exit if unmounted
* top:
* zfs_dirent_lock(&dl, ...) // lock directory entry (may igrab())
* rw_enter(...); // grab any other locks you need
* tx = dmu_tx_create(...); // get DMU tx
* dmu_tx_hold_*(); // hold each object you might modify
* error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
* if (error) {
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* iput(...); // release held vnodes
* if (error == ERESTART) {
* waited = B_TRUE;
* dmu_tx_wait(tx);
* dmu_tx_abort(tx);
* goto top;
* }
* dmu_tx_abort(tx); // abort DMU tx
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // really out of space
* }
* error = do_real_work(); // do whatever this VOP does
* if (error == 0)
* zfs_log_*(...); // on success, make ZIL entry
* dmu_tx_commit(tx); // commit DMU tx -- error or not
* rw_exit(...); // drop locks
* zfs_dirent_unlock(dl); // unlock directory entry
* iput(...); // release held vnodes
* zil_commit(zilog, foid); // synchronous when necessary
* ZFS_EXIT(zfsvfs); // finished in zfs
* return (error); // done, report error
*/
/*
* Virus scanning is unsupported. It would be possible to add a hook
* here to performance the required virus scan. This could be done
* entirely in the kernel or potentially as an update to invoke a
* scanning utility.
*/
static int
zfs_vscan(struct inode *ip, cred_t *cr, int async)
{
return (0);
}
/* ARGSUSED */
int
zfs_open(struct inode *ip, int mode, int flag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/* Honor ZFS_APPENDONLY file attribute */
if ((mode & FMODE_WRITE) && (zp->z_pflags & ZFS_APPENDONLY) &&
((flag & O_APPEND) == 0)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
/* Virus scan eligible files on open */
if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0) {
if (zfs_vscan(ip, cr, 0) != 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
}
/* Keep a count of the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_inc_32(&zp->z_sync_cnt);
ZFS_EXIT(zfsvfs);
return (0);
}
/* ARGSUSED */
int
zfs_close(struct inode *ip, int flag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/* Decrement the synchronous opens in the znode */
if (flag & O_SYNC)
atomic_dec_32(&zp->z_sync_cnt);
if (!zfs_has_ctldir(zp) && zfsvfs->z_vscan && S_ISREG(ip->i_mode) &&
!(zp->z_pflags & ZFS_AV_QUARANTINED) && zp->z_size > 0)
VERIFY(zfs_vscan(ip, cr, 1) == 0);
ZFS_EXIT(zfsvfs);
return (0);
}
#if defined(SEEK_HOLE) && defined(SEEK_DATA)
/*
* Lseek support for finding holes (cmd == SEEK_HOLE) and
* data (cmd == SEEK_DATA). "off" is an in/out parameter.
*/
static int
zfs_holey_common(struct inode *ip, int cmd, loff_t *off)
{
znode_t *zp = ITOZ(ip);
uint64_t noff = (uint64_t)*off; /* new offset */
uint64_t file_sz;
int error;
boolean_t hole;
file_sz = zp->z_size;
if (noff >= file_sz) {
return (SET_ERROR(ENXIO));
}
if (cmd == SEEK_HOLE)
hole = B_TRUE;
else
hole = B_FALSE;
error = dmu_offset_next(ZTOZSB(zp)->z_os, zp->z_id, hole, &noff);
if (error == ESRCH)
return (SET_ERROR(ENXIO));
/* file was dirty, so fall back to using generic logic */
if (error == EBUSY) {
if (hole)
*off = file_sz;
return (0);
}
/*
* We could find a hole that begins after the logical end-of-file,
* because dmu_offset_next() only works on whole blocks. If the
* EOF falls mid-block, then indicate that the "virtual hole"
* at the end of the file begins at the logical EOF, rather than
* at the end of the last block.
*/
if (noff > file_sz) {
ASSERT(hole);
noff = file_sz;
}
if (noff < *off)
return (error);
*off = noff;
return (error);
}
int
zfs_holey(struct inode *ip, int cmd, loff_t *off)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_holey_common(ip, cmd, off);
ZFS_EXIT(zfsvfs);
return (error);
}
#endif /* SEEK_HOLE && SEEK_DATA */
#if defined(_KERNEL)
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Write: If we find a memory mapped page, we write to *both*
* the page and the dmu buffer.
*/
static void
update_pages(struct inode *ip, int64_t start, int len,
objset_t *os, uint64_t oid)
{
struct address_space *mp = ip->i_mapping;
struct page *pp;
uint64_t nbytes;
int64_t off;
void *pb;
off = start & (PAGE_SIZE-1);
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
nbytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
pb = kmap(pp);
(void) dmu_read(os, oid, start+off, nbytes, pb+off,
DMU_READ_PREFETCH);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
SetPageUptodate(pp);
ClearPageError(pp);
unlock_page(pp);
put_page(pp);
}
len -= nbytes;
off = 0;
}
}
/*
* When a file is memory mapped, we must keep the IO data synchronized
* between the DMU cache and the memory mapped pages. What this means:
*
* On Read: We "read" preferentially from memory mapped pages,
* else we default from the dmu buffer.
*
* NOTE: We will always "break up" the IO into PAGESIZE uiomoves when
* the file is memory mapped.
*/
static int
mappedread(struct inode *ip, int nbytes, uio_t *uio)
{
struct address_space *mp = ip->i_mapping;
struct page *pp;
znode_t *zp = ITOZ(ip);
int64_t start, off;
uint64_t bytes;
int len = nbytes;
int error = 0;
void *pb;
start = uio->uio_loffset;
off = start & (PAGE_SIZE-1);
for (start &= PAGE_MASK; len > 0; start += PAGE_SIZE) {
bytes = MIN(PAGE_SIZE - off, len);
pp = find_lock_page(mp, start >> PAGE_SHIFT);
if (pp) {
ASSERT(PageUptodate(pp));
unlock_page(pp);
pb = kmap(pp);
error = uiomove(pb + off, bytes, UIO_READ, uio);
kunmap(pp);
if (mapping_writably_mapped(mp))
flush_dcache_page(pp);
mark_page_accessed(pp);
put_page(pp);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, bytes);
}
len -= bytes;
off = 0;
if (error)
break;
}
return (error);
}
#endif /* _KERNEL */
unsigned long zfs_read_chunk_size = 1024 * 1024; /* Tunable */
unsigned long zfs_delete_blocks = DMU_MAX_DELETEBLKCNT;
/*
* Read bytes from specified file into supplied buffer.
*
* IN: ip - inode of file to be read from.
* uio - structure supplying read location, range info,
* and return buffer.
* ioflag - FSYNC flags; used to provide FRSYNC semantics.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range, buffer filled.
*
* RETURN: 0 on success, error code on failure.
*
* Side Effects:
* inode - atime updated if byte count > 0
*/
/* ARGSUSED */
int
zfs_read(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
ssize_t n, nbytes;
int error = 0;
rl_t *rl;
#ifdef HAVE_UIO_ZEROCOPY
xuio_t *xuio = NULL;
#endif /* HAVE_UIO_ZEROCOPY */
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (zp->z_pflags & ZFS_AV_QUARANTINED) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
/*
* Validate file offset
*/
if (uio->uio_loffset < (offset_t)0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Fasttrack empty reads
*/
if (uio->uio_resid == 0) {
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* If we're in FRSYNC mode, sync out this znode before reading it.
* Only do this for non-snapshots.
*/
if (zfsvfs->z_log &&
(ioflag & FRSYNC || zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS))
zil_commit(zfsvfs->z_log, zp->z_id);
/*
* Lock the range against changes.
*/
rl = zfs_range_lock(&zp->z_range_lock, uio->uio_loffset, uio->uio_resid,
RL_READER);
/*
* If we are reading past end-of-file we can skip
* to the end; but we might still need to set atime.
*/
if (uio->uio_loffset >= zp->z_size) {
error = 0;
goto out;
}
ASSERT(uio->uio_loffset < zp->z_size);
n = MIN(uio->uio_resid, zp->z_size - uio->uio_loffset);
#ifdef HAVE_UIO_ZEROCOPY
if ((uio->uio_extflg == UIO_XUIO) &&
(((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY)) {
int nblk;
int blksz = zp->z_blksz;
uint64_t offset = uio->uio_loffset;
xuio = (xuio_t *)uio;
if ((ISP2(blksz))) {
nblk = (P2ROUNDUP(offset + n, blksz) - P2ALIGN(offset,
blksz)) / blksz;
} else {
ASSERT(offset + n <= blksz);
nblk = 1;
}
(void) dmu_xuio_init(xuio, nblk);
if (vn_has_cached_data(ip)) {
/*
* For simplicity, we always allocate a full buffer
* even if we only expect to read a portion of a block.
*/
while (--nblk >= 0) {
(void) dmu_xuio_add(xuio,
dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
blksz), 0, blksz);
}
}
}
#endif /* HAVE_UIO_ZEROCOPY */
while (n > 0) {
nbytes = MIN(n, zfs_read_chunk_size -
P2PHASE(uio->uio_loffset, zfs_read_chunk_size));
if (zp->z_is_mapped && !(ioflag & O_DIRECT)) {
error = mappedread(ip, nbytes, uio);
} else {
error = dmu_read_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes);
}
if (error) {
/* convert checksum errors into IO errors */
if (error == ECKSUM)
error = SET_ERROR(EIO);
break;
}
n -= nbytes;
}
out:
zfs_range_unlock(rl);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Write the bytes to a file.
*
* IN: ip - inode of file to be written to.
* uio - structure supplying write location, range info,
* and data buffer.
* ioflag - FAPPEND flag set if in append mode.
* O_DIRECT flag; used to bypass page cache.
* cr - credentials of caller.
*
* OUT: uio - updated offset and range.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated if byte count > 0
*/
/* ARGSUSED */
int
zfs_write(struct inode *ip, uio_t *uio, int ioflag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
rlim64_t limit = uio->uio_limit;
ssize_t start_resid = uio->uio_resid;
ssize_t tx_bytes;
uint64_t end_size;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ZTOZSB(zp);
zilog_t *zilog;
offset_t woff;
ssize_t n, nbytes;
rl_t *rl;
int max_blksz = zfsvfs->z_max_blksz;
int error = 0;
arc_buf_t *abuf;
const iovec_t *aiov = NULL;
xuio_t *xuio = NULL;
int write_eof;
int count = 0;
sa_bulk_attr_t bulk[4];
uint64_t mtime[2], ctime[2];
uint32_t uid;
#ifdef HAVE_UIO_ZEROCOPY
int i_iov = 0;
const iovec_t *iovp = uio->uio_iov;
ASSERTV(int iovcnt = uio->uio_iovcnt);
#endif
/*
* Fasttrack empty write
*/
n = start_resid;
if (n == 0)
return (0);
if (limit == RLIM64_INFINITY || limit > MAXOFFSET_T)
limit = MAXOFFSET_T;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL,
&zp->z_size, 8);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
/*
* If immutable or not appending then return EPERM
*/
if ((zp->z_pflags & (ZFS_IMMUTABLE | ZFS_READONLY)) ||
((zp->z_pflags & ZFS_APPENDONLY) && !(ioflag & FAPPEND) &&
(uio->uio_loffset < zp->z_size))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
zilog = zfsvfs->z_log;
/*
* Validate file offset
*/
woff = ioflag & FAPPEND ? zp->z_size : uio->uio_loffset;
if (woff < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Pre-fault the pages to ensure slow (eg NFS) pages
* don't hold up txg.
* Skip this if uio contains loaned arc_buf.
*/
#ifdef HAVE_UIO_ZEROCOPY
if ((uio->uio_extflg == UIO_XUIO) &&
(((xuio_t *)uio)->xu_type == UIOTYPE_ZEROCOPY))
xuio = (xuio_t *)uio;
else
#endif
uio_prefaultpages(MIN(n, max_blksz), uio);
/*
* If in append mode, set the io offset pointer to eof.
*/
if (ioflag & FAPPEND) {
/*
* Obtain an appending range lock to guarantee file append
* semantics. We reset the write offset once we have the lock.
*/
rl = zfs_range_lock(&zp->z_range_lock, 0, n, RL_APPEND);
woff = rl->r_off;
if (rl->r_len == UINT64_MAX) {
/*
* We overlocked the file because this write will cause
* the file block size to increase.
* Note that zp_size cannot change with this lock held.
*/
woff = zp->z_size;
}
uio->uio_loffset = woff;
} else {
/*
* Note that if the file block size will change as a result of
* this write, then this range lock will lock the entire file
* so that we can re-write the block safely.
*/
rl = zfs_range_lock(&zp->z_range_lock, woff, n, RL_WRITER);
}
if (woff >= limit) {
zfs_range_unlock(rl);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EFBIG));
}
if ((woff + n) > limit || woff > (limit - n))
n = limit - woff;
/* Will this write extend the file length? */
write_eof = (woff + n > zp->z_size);
end_size = MAX(zp->z_size, woff + n);
/*
* Write the file in reasonable size chunks. Each chunk is written
* in a separate transaction; this keeps the intent log records small
* and allows us to do more fine-grained space accounting.
*/
while (n > 0) {
abuf = NULL;
woff = uio->uio_loffset;
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
KUID_TO_SUID(ip->i_uid)) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
KGID_TO_SGID(ip->i_gid)) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
if (abuf != NULL)
dmu_return_arcbuf(abuf);
error = SET_ERROR(EDQUOT);
break;
}
if (xuio && abuf == NULL) {
#ifdef HAVE_UIO_ZEROCOPY
ASSERT(i_iov < iovcnt);
ASSERT3U(uio->uio_segflg, !=, UIO_BVEC);
aiov = &iovp[i_iov];
abuf = dmu_xuio_arcbuf(xuio, i_iov);
dmu_xuio_clear(xuio, i_iov);
ASSERT((aiov->iov_base == abuf->b_data) ||
((char *)aiov->iov_base - (char *)abuf->b_data +
aiov->iov_len == arc_buf_size(abuf)));
i_iov++;
#endif
} else if (abuf == NULL && n >= max_blksz &&
woff >= zp->z_size &&
P2PHASE(woff, max_blksz) == 0 &&
zp->z_blksz == max_blksz) {
/*
* This write covers a full block. "Borrow" a buffer
* from the dmu so that we can fill it before we enter
* a transaction. This avoids the possibility of
* holding up the transaction if the data copy hangs
* up on a pagefault (e.g., from an NFS server mapping).
*/
size_t cbytes;
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
max_blksz);
ASSERT(abuf != NULL);
ASSERT(arc_buf_size(abuf) == max_blksz);
if ((error = uiocopy(abuf->b_data, max_blksz,
UIO_WRITE, uio, &cbytes))) {
dmu_return_arcbuf(abuf);
break;
}
ASSERT(cbytes == max_blksz);
}
/*
* Start a transaction.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_write(tx, zp->z_id, woff, MIN(n, max_blksz));
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
if (abuf != NULL)
dmu_return_arcbuf(abuf);
break;
}
/*
* If zfs_range_lock() over-locked we grow the blocksize
* and then reduce the lock range. This will only happen
* on the first iteration since zfs_range_reduce() will
* shrink down r_len to the appropriate size.
*/
if (rl->r_len == UINT64_MAX) {
uint64_t new_blksz;
if (zp->z_blksz > max_blksz) {
/*
* File's blocksize is already larger than the
* "recordsize" property. Only let it grow to
* the next power of 2.
*/
ASSERT(!ISP2(zp->z_blksz));
new_blksz = MIN(end_size,
1 << highbit64(zp->z_blksz));
} else {
new_blksz = MIN(end_size, max_blksz);
}
zfs_grow_blocksize(zp, new_blksz, tx);
zfs_range_reduce(rl, woff, n);
}
/*
* XXX - should we really limit each write to z_max_blksz?
* Perhaps we should use SPA_MAXBLOCKSIZE chunks?
*/
nbytes = MIN(n, max_blksz - P2PHASE(woff, max_blksz));
if (abuf == NULL) {
tx_bytes = uio->uio_resid;
error = dmu_write_uio_dbuf(sa_get_db(zp->z_sa_hdl),
uio, nbytes, tx);
tx_bytes -= uio->uio_resid;
} else {
tx_bytes = nbytes;
ASSERT(xuio == NULL || tx_bytes == aiov->iov_len);
/*
* If this is not a full block write, but we are
* extending the file past EOF and this data starts
* block-aligned, use assign_arcbuf(). Otherwise,
* write via dmu_write().
*/
if (tx_bytes < max_blksz && (!write_eof ||
aiov->iov_base != abuf->b_data)) {
ASSERT(xuio);
dmu_write(zfsvfs->z_os, zp->z_id, woff,
/* cppcheck-suppress nullPointer */
aiov->iov_len, aiov->iov_base, tx);
dmu_return_arcbuf(abuf);
xuio_stat_wbuf_copied();
} else {
ASSERT(xuio || tx_bytes == max_blksz);
dmu_assign_arcbuf_by_dbuf(
sa_get_db(zp->z_sa_hdl), woff, abuf, tx);
}
ASSERT(tx_bytes <= uio->uio_resid);
uioskip(uio, tx_bytes);
}
if (tx_bytes && zp->z_is_mapped && !(ioflag & O_DIRECT)) {
update_pages(ip, woff,
tx_bytes, zfsvfs->z_os, zp->z_id);
}
/*
* If we made no progress, we're done. If we made even
* partial progress, update the znode and ZIL accordingly.
*/
if (tx_bytes == 0) {
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
(void *)&zp->z_size, sizeof (uint64_t), tx);
dmu_tx_commit(tx);
ASSERT(error != 0);
break;
}
/*
* Clear Set-UID/Set-GID bits on successful write if not
* privileged and at least one of the execute bits is set.
*
* It would be nice to to this after all writes have
* been done, but that would still expose the ISUID/ISGID
* to another app after the partial write is committed.
*
* Note: we don't call zfs_fuid_map_id() here because
* user 0 is not an ephemeral uid.
*/
mutex_enter(&zp->z_acl_lock);
uid = KUID_TO_SUID(ip->i_uid);
if ((zp->z_mode & (S_IXUSR | (S_IXUSR >> 3) |
(S_IXUSR >> 6))) != 0 &&
(zp->z_mode & (S_ISUID | S_ISGID)) != 0 &&
secpolicy_vnode_setid_retain(cr,
((zp->z_mode & S_ISUID) != 0 && uid == 0)) != 0) {
uint64_t newmode;
zp->z_mode &= ~(S_ISUID | S_ISGID);
ip->i_mode = newmode = zp->z_mode;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_MODE(zfsvfs),
(void *)&newmode, sizeof (uint64_t), tx);
}
mutex_exit(&zp->z_acl_lock);
zfs_tstamp_update_setup(zp, CONTENT_MODIFIED, mtime, ctime);
/*
* Update the file size (zp_size) if it has changed;
* account for possible concurrent updates.
*/
while ((end_size = zp->z_size) < uio->uio_loffset) {
(void) atomic_cas_64(&zp->z_size, end_size,
uio->uio_loffset);
ASSERT(error == 0);
}
/*
* If we are replaying and eof is non zero then force
* the file size to the specified eof. Note, there's no
* concurrency during replay.
*/
if (zfsvfs->z_replay && zfsvfs->z_replay_eof != 0)
zp->z_size = zfsvfs->z_replay_eof;
error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
zfs_log_write(zilog, tx, TX_WRITE, zp, woff, tx_bytes, ioflag,
NULL, NULL);
dmu_tx_commit(tx);
if (error != 0)
break;
ASSERT(tx_bytes == nbytes);
n -= nbytes;
if (!xuio && n > 0)
uio_prefaultpages(MIN(n, max_blksz), uio);
}
zfs_inode_update(zp);
zfs_range_unlock(rl);
/*
* If we're in replay mode, or we made no progress, return error.
* Otherwise, it's at least a partial write, so it's successful.
*/
if (zfsvfs->z_replay || uio->uio_resid == start_resid) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (ioflag & (FSYNC | FDSYNC) ||
zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, zp->z_id);
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Drop a reference on the passed inode asynchronously. This ensures
* that the caller will never drop the last reference on an inode in
* the current context. Doing so while holding open a tx could result
* in a deadlock if iput_final() re-enters the filesystem code.
*/
void
zfs_iput_async(struct inode *ip)
{
objset_t *os = ITOZSB(ip)->z_os;
ASSERT(atomic_read(&ip->i_count) > 0);
ASSERT(os != NULL);
if (atomic_read(&ip->i_count) == 1)
VERIFY(taskq_dispatch(dsl_pool_iput_taskq(dmu_objset_pool(os)),
(task_func_t *)iput, ip, TQ_SLEEP) != TASKQID_INVALID);
else
iput(ip);
}
void
zfs_get_done(zgd_t *zgd, int error)
{
znode_t *zp = zgd->zgd_private;
if (zgd->zgd_db)
dmu_buf_rele(zgd->zgd_db, zgd);
zfs_range_unlock(zgd->zgd_rl);
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_iput_async(ZTOI(zp));
if (error == 0 && zgd->zgd_bp)
zil_lwb_add_block(zgd->zgd_lwb, zgd->zgd_bp);
kmem_free(zgd, sizeof (zgd_t));
}
#ifdef DEBUG
static int zil_fault_io = 0;
#endif
/*
* Get data to generate a TX_WRITE intent log record.
*/
int
zfs_get_data(void *arg, lr_write_t *lr, char *buf, struct lwb *lwb, zio_t *zio)
{
zfsvfs_t *zfsvfs = arg;
objset_t *os = zfsvfs->z_os;
znode_t *zp;
uint64_t object = lr->lr_foid;
uint64_t offset = lr->lr_offset;
uint64_t size = lr->lr_length;
dmu_buf_t *db;
zgd_t *zgd;
int error = 0;
ASSERT3P(lwb, !=, NULL);
ASSERT3P(zio, !=, NULL);
ASSERT3U(size, !=, 0);
/*
* Nothing to do if the file has been removed
*/
if (zfs_zget(zfsvfs, object, &zp) != 0)
return (SET_ERROR(ENOENT));
if (zp->z_unlinked) {
/*
* Release the vnode asynchronously as we currently have the
* txg stopped from syncing.
*/
zfs_iput_async(ZTOI(zp));
return (SET_ERROR(ENOENT));
}
zgd = (zgd_t *)kmem_zalloc(sizeof (zgd_t), KM_SLEEP);
zgd->zgd_lwb = lwb;
zgd->zgd_private = zp;
/*
* Write records come in two flavors: immediate and indirect.
* For small writes it's cheaper to store the data with the
* log record (immediate); for large writes it's cheaper to
* sync the data and get a pointer to it (indirect) so that
* we don't have to write the data twice.
*/
if (buf != NULL) { /* immediate write */
zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset, size,
RL_READER);
/* test for truncation needs to be done while range locked */
if (offset >= zp->z_size) {
error = SET_ERROR(ENOENT);
} else {
error = dmu_read(os, object, offset, size, buf,
DMU_READ_NO_PREFETCH);
}
ASSERT(error == 0 || error == ENOENT);
} else { /* indirect write */
/*
* Have to lock the whole block to ensure when it's
* written out and its checksum is being calculated
* that no one can change the data. We need to re-check
* blocksize after we get the lock in case it's changed!
*/
for (;;) {
uint64_t blkoff;
size = zp->z_blksz;
blkoff = ISP2(size) ? P2PHASE(offset, size) : offset;
offset -= blkoff;
zgd->zgd_rl = zfs_range_lock(&zp->z_range_lock, offset,
size, RL_READER);
if (zp->z_blksz == size)
break;
offset += blkoff;
zfs_range_unlock(zgd->zgd_rl);
}
/* test for truncation needs to be done while range locked */
if (lr->lr_offset >= zp->z_size)
error = SET_ERROR(ENOENT);
#ifdef DEBUG
if (zil_fault_io) {
error = SET_ERROR(EIO);
zil_fault_io = 0;
}
#endif
if (error == 0)
error = dmu_buf_hold(os, object, offset, zgd, &db,
DMU_READ_NO_PREFETCH);
if (error == 0) {
blkptr_t *bp = &lr->lr_blkptr;
zgd->zgd_db = db;
zgd->zgd_bp = bp;
ASSERT(db->db_offset == offset);
ASSERT(db->db_size == size);
error = dmu_sync(zio, lr->lr_common.lrc_txg,
zfs_get_done, zgd);
ASSERT(error || lr->lr_length <= size);
/*
* On success, we need to wait for the write I/O
* initiated by dmu_sync() to complete before we can
* release this dbuf. We will finish everything up
* in the zfs_get_done() callback.
*/
if (error == 0)
return (0);
if (error == EALREADY) {
lr->lr_common.lrc_txtype = TX_WRITE2;
+ /*
+ * TX_WRITE2 relies on the data previously
+ * written by the TX_WRITE that caused
+ * EALREADY. We zero out the BP because
+ * it is the old, currently-on-disk BP,
+ * so there's no need to zio_flush() its
+ * vdevs (flushing would needlesly hurt
+ * performance, and doesn't work on
+ * indirect vdevs).
+ */
+ zgd->zgd_bp = NULL;
+ BP_ZERO(bp);
error = 0;
}
}
}
zfs_get_done(zgd, error);
return (error);
}
/*ARGSUSED*/
int
zfs_access(struct inode *ip, int mode, int flag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (flag & V_ACE_MASK)
error = zfs_zaccess(zp, mode, flag, B_FALSE, cr);
else
error = zfs_zaccess_rwx(zp, mode, flag, cr);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Lookup an entry in a directory, or an extended attribute directory.
* If it exists, return a held inode reference for it.
*
* IN: dip - inode of directory to search.
* nm - name of entry to lookup.
* flags - LOOKUP_XATTR set if looking for an attribute.
* cr - credentials of caller.
* direntflags - directory lookup flags
* realpnp - returned pathname.
*
* OUT: ipp - inode of located entry, NULL if not found.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* NA
*/
/* ARGSUSED */
int
zfs_lookup(struct inode *dip, char *nm, struct inode **ipp, int flags,
cred_t *cr, int *direntflags, pathname_t *realpnp)
{
znode_t *zdp = ITOZ(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
int error = 0;
/*
* Fast path lookup, however we must skip DNLC lookup
* for case folding or normalizing lookups because the
* DNLC code only stores the passed in name. This means
* creating 'a' and removing 'A' on a case insensitive
* file system would work, but DNLC still thinks 'a'
* exists and won't let you create it again on the next
* pass through fast path.
*/
if (!(flags & (LOOKUP_XATTR | FIGNORECASE))) {
if (!S_ISDIR(dip->i_mode)) {
return (SET_ERROR(ENOTDIR));
} else if (zdp->z_sa_hdl == NULL) {
return (SET_ERROR(EIO));
}
if (nm[0] == 0 || (nm[0] == '.' && nm[1] == '\0')) {
error = zfs_fastaccesschk_execute(zdp, cr);
if (!error) {
*ipp = dip;
igrab(*ipp);
return (0);
}
return (error);
#ifdef HAVE_DNLC
} else if (!zdp->z_zfsvfs->z_norm &&
(zdp->z_zfsvfs->z_case == ZFS_CASE_SENSITIVE)) {
vnode_t *tvp = dnlc_lookup(dvp, nm);
if (tvp) {
error = zfs_fastaccesschk_execute(zdp, cr);
if (error) {
iput(tvp);
return (error);
}
if (tvp == DNLC_NO_VNODE) {
iput(tvp);
return (SET_ERROR(ENOENT));
} else {
*vpp = tvp;
return (specvp_check(vpp, cr));
}
}
#endif /* HAVE_DNLC */
}
}
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zdp);
*ipp = NULL;
if (flags & LOOKUP_XATTR) {
/*
* We don't allow recursive attributes..
* Maybe someday we will.
*/
if (zdp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if ((error = zfs_get_xattrdir(zdp, ipp, cr, flags))) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Do we have permission to get into attribute directory?
*/
if ((error = zfs_zaccess(ITOZ(*ipp), ACE_EXECUTE, 0,
B_FALSE, cr))) {
iput(*ipp);
*ipp = NULL;
}
ZFS_EXIT(zfsvfs);
return (error);
}
if (!S_ISDIR(dip->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOTDIR));
}
/*
* Check accessibility of directory.
*/
if ((error = zfs_zaccess(zdp, ACE_EXECUTE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfsvfs->z_utf8 && u8_validate(nm, strlen(nm),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
error = zfs_dirlook(zdp, nm, ipp, flags, direntflags, realpnp);
if ((error == 0) && (*ipp))
zfs_inode_update(ITOZ(*ipp));
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Attempt to create a new entry in a directory. If the entry
* already exists, truncate the file if permissible, else return
* an error. Return the ip of the created or trunc'd file.
*
* IN: dip - inode of directory to put new file entry in.
* name - name of new file entry.
* vap - attributes of new file.
* excl - flag indicating exclusive or non-exclusive mode.
* mode - mode to open file with.
* cr - credentials of caller.
* flag - large file flag [UNUSED].
* vsecp - ACL to be set
*
* OUT: ipp - inode of created or trunc'd entry.
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated if new entry created
* ip - ctime|mtime always, atime if new
*/
/* ARGSUSED */
int
zfs_create(struct inode *dip, char *name, vattr_t *vap, int excl,
int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
znode_t *zp, *dzp = ITOZ(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
zilog_t *zilog;
objset_t *os;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
os = zfsvfs->z_os;
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
top:
*ipp = NULL;
if (*name == '\0') {
/*
* Null component name refers to the directory itself.
*/
igrab(dip);
zp = dzp;
dl = NULL;
error = 0;
} else {
/* possible igrab(zp) */
int zflg = 0;
if (flag & FIGNORECASE)
zflg |= ZCILOOK;
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL);
if (error) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
if (strcmp(name, "..") == 0)
error = SET_ERROR(EISDIR);
ZFS_EXIT(zfsvfs);
return (error);
}
}
if (zp == NULL) {
uint64_t txtype;
uint64_t projid = ZFS_DEFAULT_PROJID;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
/*
* We only support the creation of regular files in
* extended attribute directories.
*/
if ((dzp->z_pflags & ZFS_XATTR) && !S_ISREG(vap->va_mode)) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EINVAL);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx,
(waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
(void) zfs_link_create(dl, zp, tx, ZNEW);
txtype = zfs_log_create_txtype(Z_FILE, vsecp, vap);
if (flag & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, name,
vsecp, acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
} else {
int aflags = (flag & FAPPEND) ? V_APPEND : 0;
if (have_acl)
zfs_acl_ids_free(&acl_ids);
have_acl = B_FALSE;
/*
* A directory entry already exists for this name.
*/
/*
* Can't truncate an existing file if in exclusive mode.
*/
if (excl) {
error = SET_ERROR(EEXIST);
goto out;
}
/*
* Can't open a directory for writing.
*/
if (S_ISDIR(ZTOI(zp)->i_mode)) {
error = SET_ERROR(EISDIR);
goto out;
}
/*
* Verify requested access to file.
*/
if (mode && (error = zfs_zaccess_rwx(zp, mode, aflags, cr))) {
goto out;
}
mutex_enter(&dzp->z_lock);
dzp->z_seq++;
mutex_exit(&dzp->z_lock);
/*
* Truncate regular files if requested.
*/
if (S_ISREG(ZTOI(zp)->i_mode) &&
(vap->va_mask & ATTR_SIZE) && (vap->va_size == 0)) {
/* we can't hold any locks when calling zfs_freesp() */
if (dl) {
zfs_dirent_unlock(dl);
dl = NULL;
}
error = zfs_freesp(zp, 0, 0, mode, TRUE);
}
}
out:
if (dl)
zfs_dirent_unlock(dl);
if (error) {
if (zp)
iput(ZTOI(zp));
} else {
zfs_inode_update(dzp);
zfs_inode_update(zp);
*ipp = ZTOI(zp);
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/* ARGSUSED */
int
zfs_tmpfile(struct inode *dip, vattr_t *vap, int excl,
int mode, struct inode **ipp, cred_t *cr, int flag, vsecattr_t *vsecp)
{
znode_t *zp = NULL, *dzp = ITOZ(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
objset_t *os;
dmu_tx_t *tx;
int error;
uid_t uid;
gid_t gid;
zfs_acl_ids_t acl_ids;
uint64_t projid = ZFS_DEFAULT_PROJID;
boolean_t fuid_dirtied;
boolean_t have_acl = B_FALSE;
boolean_t waited = B_FALSE;
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
gid = crgetgid(cr);
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
os = zfsvfs->z_os;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
top:
*ipp = NULL;
/*
* Create a new file object and update the directory
* to reference it.
*/
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
if (have_acl)
zfs_acl_ids_free(&acl_ids);
goto out;
}
if (!have_acl && (error = zfs_acl_ids_create(dzp, 0, vap,
cr, vsecp, &acl_ids)) != 0)
goto out;
have_acl = B_TRUE;
if (S_ISREG(vap->va_mode) || S_ISDIR(vap->va_mode))
projid = zfs_inherit_projid(dzp);
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, projid)) {
zfs_acl_ids_free(&acl_ids);
error = SET_ERROR(EDQUOT);
goto out;
}
tx = dmu_tx_create(os);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa &&
acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, acl_ids.z_aclp->z_acl_bytes);
}
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
zfs_mknode(dzp, vap, tx, cr, IS_TMPFILE, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/* Add to unlinked set */
zp->z_unlinked = 1;
zfs_unlinked_add(zp, tx);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
out:
if (error) {
if (zp)
iput(ZTOI(zp));
} else {
zfs_inode_update(dzp);
zfs_inode_update(zp);
*ipp = ZTOI(zp);
}
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove an entry from a directory.
*
* IN: dip - inode of directory to remove entry from.
* name - name of entry to remove.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dip - ctime|mtime
* ip - ctime (if nlink > 0)
*/
uint64_t null_xattr = 0;
/*ARGSUSED*/
int
zfs_remove(struct inode *dip, char *name, cred_t *cr, int flags)
{
znode_t *zp, *dzp = ITOZ(dip);
znode_t *xzp;
struct inode *ip;
zfsvfs_t *zfsvfs = ITOZSB(dip);
zilog_t *zilog;
uint64_t acl_obj, xattr_obj;
uint64_t xattr_obj_unlinked = 0;
uint64_t obj = 0;
uint64_t links;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
boolean_t may_delete_now, delete_now = FALSE;
boolean_t unlinked, toobig = FALSE;
uint64_t txtype;
pathname_t *realnmp = NULL;
pathname_t realnm;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE) {
zflg |= ZCILOOK;
pn_alloc(&realnm);
realnmp = &realnm;
}
top:
xattr_obj = 0;
xzp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, realnmp))) {
if (realnmp)
pn_free(realnmp);
ZFS_EXIT(zfsvfs);
return (error);
}
ip = ZTOI(zp);
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
/*
* Need to use rmdir for removing directories.
*/
if (S_ISDIR(ip->i_mode)) {
error = SET_ERROR(EPERM);
goto out;
}
#ifdef HAVE_DNLC
if (realnmp)
dnlc_remove(dvp, realnmp->pn_buf);
else
dnlc_remove(dvp, name);
#endif /* HAVE_DNLC */
mutex_enter(&zp->z_lock);
may_delete_now = atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped);
mutex_exit(&zp->z_lock);
/*
* We may delete the znode now, or we may put it in the unlinked set;
* it depends on whether we're the last link, and on whether there are
* other holds on the inode. So we dmu_tx_hold() the right things to
* allow for either case.
*/
obj = zp->z_id;
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
if (may_delete_now) {
toobig = zp->z_size > zp->z_blksz * zfs_delete_blocks;
/* if the file is too big, only hold_free a token amount */
dmu_tx_hold_free(tx, zp->z_id, 0,
(toobig ? DMU_MAX_ACCESS : DMU_OBJECT_END));
}
/* are there any extended attributes? */
error = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (error == 0 && xattr_obj) {
error = zfs_zget(zfsvfs, xattr_obj, &xzp);
ASSERT0(error);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
dmu_tx_hold_sa(tx, xzp->z_sa_hdl, B_FALSE);
}
mutex_enter(&zp->z_lock);
if ((acl_obj = zfs_external_acl(zp)) != 0 && may_delete_now)
dmu_tx_hold_free(tx, acl_obj, 0, DMU_OBJECT_END);
mutex_exit(&zp->z_lock);
/* charge as an update -- would be nice not to charge at all */
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
/*
* Mark this transaction as typically resulting in a net free of space
*/
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
iput(ip);
if (xzp)
iput(ZTOI(xzp));
goto top;
}
if (realnmp)
pn_free(realnmp);
dmu_tx_abort(tx);
iput(ip);
if (xzp)
iput(ZTOI(xzp));
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Remove the directory entry.
*/
error = zfs_link_destroy(dl, zp, tx, zflg, &unlinked);
if (error) {
dmu_tx_commit(tx);
goto out;
}
if (unlinked) {
/*
* Hold z_lock so that we can make sure that the ACL obj
* hasn't changed. Could have been deleted due to
* zfs_sa_upgrade().
*/
mutex_enter(&zp->z_lock);
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj_unlinked, sizeof (xattr_obj_unlinked));
delete_now = may_delete_now && !toobig &&
atomic_read(&ip->i_count) == 1 && !(zp->z_is_mapped) &&
xattr_obj == xattr_obj_unlinked && zfs_external_acl(zp) ==
acl_obj;
}
if (delete_now) {
if (xattr_obj_unlinked) {
ASSERT3U(ZTOI(xzp)->i_nlink, ==, 2);
mutex_enter(&xzp->z_lock);
xzp->z_unlinked = 1;
clear_nlink(ZTOI(xzp));
links = 0;
error = sa_update(xzp->z_sa_hdl, SA_ZPL_LINKS(zfsvfs),
&links, sizeof (links), tx);
ASSERT3U(error, ==, 0);
mutex_exit(&xzp->z_lock);
zfs_unlinked_add(xzp, tx);
if (zp->z_is_sa)
error = sa_remove(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), tx);
else
error = sa_update(zp->z_sa_hdl,
SA_ZPL_XATTR(zfsvfs), &null_xattr,
sizeof (uint64_t), tx);
ASSERT0(error);
}
/*
* Add to the unlinked set because a new reference could be
* taken concurrently resulting in a deferred destruction.
*/
zfs_unlinked_add(zp, tx);
mutex_exit(&zp->z_lock);
} else if (unlinked) {
mutex_exit(&zp->z_lock);
zfs_unlinked_add(zp, tx);
}
txtype = TX_REMOVE;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, obj);
dmu_tx_commit(tx);
out:
if (realnmp)
pn_free(realnmp);
zfs_dirent_unlock(dl);
zfs_inode_update(dzp);
zfs_inode_update(zp);
if (delete_now)
iput(ip);
else
zfs_iput_async(ip);
if (xzp) {
zfs_inode_update(xzp);
zfs_iput_async(ZTOI(xzp));
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create a new directory and insert it into dip using the name
* provided. Return a pointer to the inserted directory.
*
* IN: dip - inode of directory to add subdir to.
* dirname - name of new directory.
* vap - attributes of new directory.
* cr - credentials of caller.
* vsecp - ACL to be set
*
* OUT: ipp - inode of created directory.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* dip - ctime|mtime updated
* ipp - ctime|mtime|atime updated
*/
/*ARGSUSED*/
int
zfs_mkdir(struct inode *dip, char *dirname, vattr_t *vap, struct inode **ipp,
cred_t *cr, int flags, vsecattr_t *vsecp)
{
znode_t *zp, *dzp = ITOZ(dip);
zfsvfs_t *zfsvfs = ITOZSB(dip);
zilog_t *zilog;
zfs_dirlock_t *dl;
uint64_t txtype;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uid_t uid;
gid_t gid = crgetgid(cr);
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
boolean_t waited = B_FALSE;
ASSERT(S_ISDIR(vap->va_mode));
/*
* If we have an ephemeral id, ACL, or XVATTR then
* make sure file system is at proper version
*/
uid = crgetuid(cr);
if (zfsvfs->z_use_fuids == B_FALSE &&
(vsecp || IS_EPHEMERAL(uid) || IS_EPHEMERAL(gid)))
return (SET_ERROR(EINVAL));
if (dirname == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (dzp->z_pflags & ZFS_XATTR) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (zfsvfs->z_utf8 && u8_validate(dirname,
strlen(dirname), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
if (vap->va_mask & ATTR_XVATTR) {
if ((error = secpolicy_xvattr((xvattr_t *)vap,
crgetuid(cr), cr, vap->va_mode)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
if ((error = zfs_acl_ids_create(dzp, 0, vap, cr,
vsecp, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* First make sure the new directory doesn't exist.
*
* Existence is checked first to make sure we don't return
* EACCES instead of EEXIST which can cause some applications
* to fail.
*/
top:
*ipp = NULL;
if ((error = zfs_dirent_lock(&dl, dzp, dirname, &zp, zf,
NULL, NULL))) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_SUBDIRECTORY, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, zfs_inherit_projid(dzp))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
/*
* Add a new entry to the directory.
*/
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, dirname);
dmu_tx_hold_zap(tx, DMU_NEW_OBJECT, FALSE, NULL);
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create new node.
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
/*
* Now put new name in parent dir.
*/
(void) zfs_link_create(dl, zp, tx, ZNEW);
*ipp = ZTOI(zp);
txtype = zfs_log_create_txtype(Z_DIR, vsecp, vap);
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_create(zilog, tx, txtype, dzp, zp, dirname, vsecp,
acl_ids.z_fuidp, vap);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
zfs_inode_update(dzp);
zfs_inode_update(zp);
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Remove a directory subdir entry. If the current working
* directory is the same as the subdir to be removed, the
* remove will fail.
*
* IN: dip - inode of directory to remove from.
* name - name of directory to be removed.
* cwd - inode of current working directory.
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_rmdir(struct inode *dip, char *name, struct inode *cwd, cred_t *cr,
int flags)
{
znode_t *dzp = ITOZ(dip);
znode_t *zp;
struct inode *ip;
zfsvfs_t *zfsvfs = ITOZSB(dip);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zflg = ZEXISTS;
boolean_t waited = B_FALSE;
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
zp = NULL;
/*
* Attempt to lock directory; fail if entry doesn't exist.
*/
if ((error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg,
NULL, NULL))) {
ZFS_EXIT(zfsvfs);
return (error);
}
ip = ZTOI(zp);
if ((error = zfs_zaccess_delete(dzp, zp, cr))) {
goto out;
}
if (!S_ISDIR(ip->i_mode)) {
error = SET_ERROR(ENOTDIR);
goto out;
}
if (ip == cwd) {
error = SET_ERROR(EINVAL);
goto out;
}
/*
* Grab a lock on the directory to make sure that no one is
* trying to add (or lookup) entries while we are removing it.
*/
rw_enter(&zp->z_name_lock, RW_WRITER);
/*
* Grab a lock on the parent pointer to make sure we play well
* with the treewalk and directory rename code.
*/
rw_enter(&zp->z_parent_lock, RW_WRITER);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_zap(tx, dzp->z_id, FALSE, name);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, zp);
zfs_sa_upgrade_txholds(tx, dzp);
dmu_tx_mark_netfree(tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
iput(ip);
goto top;
}
dmu_tx_abort(tx);
iput(ip);
ZFS_EXIT(zfsvfs);
return (error);
}
error = zfs_link_destroy(dl, zp, tx, zflg, NULL);
if (error == 0) {
uint64_t txtype = TX_RMDIR;
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_remove(zilog, tx, txtype, dzp, name, ZFS_NO_OBJECT);
}
dmu_tx_commit(tx);
rw_exit(&zp->z_parent_lock);
rw_exit(&zp->z_name_lock);
out:
zfs_dirent_unlock(dl);
zfs_inode_update(dzp);
zfs_inode_update(zp);
iput(ip);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Read as many directory entries as will fit into the provided
* dirent buffer from the given directory cursor position.
*
* IN: ip - inode of directory to read.
* dirent - buffer for directory entries.
*
* OUT: dirent - filler buffer of directory entries.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*
* Note that the low 4 bits of the cookie returned by zap is always zero.
* This allows us to use the low range for "special" directory entries:
* We use 0 for '.', and 1 for '..'. If this is the root of the filesystem,
* we use the offset 2 for the '.zfs' directory.
*/
/* ARGSUSED */
int
zfs_readdir(struct inode *ip, struct dir_context *ctx, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os;
zap_cursor_t zc;
zap_attribute_t zap;
int error;
uint8_t prefetch;
uint8_t type;
int done = 0;
uint64_t parent;
uint64_t offset; /* must be unsigned; checks for < 1 */
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (parent))) != 0)
goto out;
/*
* Quit if directory has been removed (posix)
*/
if (zp->z_unlinked)
goto out;
error = 0;
os = zfsvfs->z_os;
offset = ctx->pos;
prefetch = zp->z_zn_prefetch;
/*
* Initialize the iterator cursor.
*/
if (offset <= 3) {
/*
* Start iteration from the beginning of the directory.
*/
zap_cursor_init(&zc, os, zp->z_id);
} else {
/*
* The offset is a serialized cursor.
*/
zap_cursor_init_serialized(&zc, os, zp->z_id, offset);
}
/*
* Transform to file-system independent format
*/
while (!done) {
uint64_t objnum;
/*
* Special case `.', `..', and `.zfs'.
*/
if (offset == 0) {
(void) strcpy(zap.za_name, ".");
zap.za_normalization_conflict = 0;
objnum = zp->z_id;
type = DT_DIR;
} else if (offset == 1) {
(void) strcpy(zap.za_name, "..");
zap.za_normalization_conflict = 0;
objnum = parent;
type = DT_DIR;
} else if (offset == 2 && zfs_show_ctldir(zp)) {
(void) strcpy(zap.za_name, ZFS_CTLDIR_NAME);
zap.za_normalization_conflict = 0;
objnum = ZFSCTL_INO_ROOT;
type = DT_DIR;
} else {
/*
* Grab next entry.
*/
if ((error = zap_cursor_retrieve(&zc, &zap))) {
if (error == ENOENT)
break;
else
goto update;
}
/*
* Allow multiple entries provided the first entry is
* the object id. Non-zpl consumers may safely make
* use of the additional space.
*
* XXX: This should be a feature flag for compatibility
*/
if (zap.za_integer_length != 8 ||
zap.za_num_integers == 0) {
cmn_err(CE_WARN, "zap_readdir: bad directory "
"entry, obj = %lld, offset = %lld, "
"length = %d, num = %lld\n",
(u_longlong_t)zp->z_id,
(u_longlong_t)offset,
zap.za_integer_length,
(u_longlong_t)zap.za_num_integers);
error = SET_ERROR(ENXIO);
goto update;
}
objnum = ZFS_DIRENT_OBJ(zap.za_first_integer);
type = ZFS_DIRENT_TYPE(zap.za_first_integer);
}
done = !dir_emit(ctx, zap.za_name, strlen(zap.za_name),
objnum, type);
if (done)
break;
/* Prefetch znode */
if (prefetch) {
dmu_prefetch(os, objnum, 0, 0, 0,
ZIO_PRIORITY_SYNC_READ);
}
/*
* Move to the next entry, fill in the previous offset.
*/
if (offset > 2 || (offset == 2 && !zfs_show_ctldir(zp))) {
zap_cursor_advance(&zc);
offset = zap_cursor_serialize(&zc);
} else {
offset += 1;
}
ctx->pos = offset;
}
zp->z_zn_prefetch = B_FALSE; /* a lookup will re-enable pre-fetching */
update:
zap_cursor_fini(&zc);
if (error == ENOENT)
error = 0;
out:
ZFS_EXIT(zfsvfs);
return (error);
}
ulong_t zfs_fsync_sync_cnt = 4;
int
zfs_fsync(struct inode *ip, int syncflag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
(void) tsd_set(zfs_fsyncer_key, (void *)zfs_fsync_sync_cnt);
if (zfsvfs->z_os->os_sync != ZFS_SYNC_DISABLED) {
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
zil_commit(zfsvfs->z_log, zp->z_id);
ZFS_EXIT(zfsvfs);
}
tsd_set(zfs_fsyncer_key, NULL);
return (0);
}
/*
* Get the requested file attributes and place them in the provided
* vattr structure.
*
* IN: ip - inode of file.
* vap - va_mask identifies requested attributes.
* If ATTR_XVATTR set, then optional attrs are requested
* flags - ATTR_NOACLCHECK (CIFS server context)
* cr - credentials of caller.
*
* OUT: vap - attribute values.
*
* RETURN: 0 (always succeeds)
*/
/* ARGSUSED */
int
zfs_getattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error = 0;
uint64_t links;
uint64_t atime[2], mtime[2], ctime[2];
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap = NULL;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
sa_bulk_attr_t bulk[3];
int count = 0;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
zfs_fuid_map_ids(zp, cr, &vap->va_uid, &vap->va_gid);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
if ((error = sa_bulk_lookup(zp->z_sa_hdl, bulk, count)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* If ACL is trivial don't bother looking for ACE_READ_ATTRIBUTES.
* Also, if we are the owner don't bother, since owner should
* always be allowed to read basic attributes of file.
*/
if (!(zp->z_pflags & ZFS_ACL_TRIVIAL) &&
(vap->va_uid != crgetuid(cr))) {
if ((error = zfs_zaccess(zp, ACE_READ_ATTRIBUTES, 0,
skipaclchk, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
}
/*
* Return all attributes. It's cheaper to provide the answer
* than to determine whether we were asked the question.
*/
mutex_enter(&zp->z_lock);
vap->va_type = vn_mode_to_vtype(zp->z_mode);
vap->va_mode = zp->z_mode;
vap->va_fsid = ZTOI(zp)->i_sb->s_dev;
vap->va_nodeid = zp->z_id;
if ((zp->z_id == zfsvfs->z_root) && zfs_show_ctldir(zp))
links = ZTOI(zp)->i_nlink + 1;
else
links = ZTOI(zp)->i_nlink;
vap->va_nlink = MIN(links, ZFS_LINK_MAX);
vap->va_size = i_size_read(ip);
vap->va_rdev = ip->i_rdev;
vap->va_seq = ip->i_generation;
/*
* Add in any requested optional attributes and the create time.
* Also set the corresponding bits in the returned attribute bitmap.
*/
if ((xoap = xva_getxoptattr(xvap)) != NULL && zfsvfs->z_use_fuids) {
if (XVA_ISSET_REQ(xvap, XAT_ARCHIVE)) {
xoap->xoa_archive =
((zp->z_pflags & ZFS_ARCHIVE) != 0);
XVA_SET_RTN(xvap, XAT_ARCHIVE);
}
if (XVA_ISSET_REQ(xvap, XAT_READONLY)) {
xoap->xoa_readonly =
((zp->z_pflags & ZFS_READONLY) != 0);
XVA_SET_RTN(xvap, XAT_READONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_SYSTEM)) {
xoap->xoa_system =
((zp->z_pflags & ZFS_SYSTEM) != 0);
XVA_SET_RTN(xvap, XAT_SYSTEM);
}
if (XVA_ISSET_REQ(xvap, XAT_HIDDEN)) {
xoap->xoa_hidden =
((zp->z_pflags & ZFS_HIDDEN) != 0);
XVA_SET_RTN(xvap, XAT_HIDDEN);
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
xoap->xoa_nounlink =
((zp->z_pflags & ZFS_NOUNLINK) != 0);
XVA_SET_RTN(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
xoap->xoa_immutable =
((zp->z_pflags & ZFS_IMMUTABLE) != 0);
XVA_SET_RTN(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
xoap->xoa_appendonly =
((zp->z_pflags & ZFS_APPENDONLY) != 0);
XVA_SET_RTN(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
xoap->xoa_nodump =
((zp->z_pflags & ZFS_NODUMP) != 0);
XVA_SET_RTN(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(xvap, XAT_OPAQUE)) {
xoap->xoa_opaque =
((zp->z_pflags & ZFS_OPAQUE) != 0);
XVA_SET_RTN(xvap, XAT_OPAQUE);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
xoap->xoa_av_quarantined =
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0);
XVA_SET_RTN(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
xoap->xoa_av_modified =
((zp->z_pflags & ZFS_AV_MODIFIED) != 0);
XVA_SET_RTN(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) &&
S_ISREG(ip->i_mode)) {
zfs_sa_get_scanstamp(zp, xvap);
}
if (XVA_ISSET_REQ(xvap, XAT_CREATETIME)) {
uint64_t times[2];
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_CRTIME(zfsvfs),
times, sizeof (times));
ZFS_TIME_DECODE(&xoap->xoa_createtime, times);
XVA_SET_RTN(xvap, XAT_CREATETIME);
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
xoap->xoa_reparse = ((zp->z_pflags & ZFS_REPARSE) != 0);
XVA_SET_RTN(xvap, XAT_REPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_GEN)) {
xoap->xoa_generation = ip->i_generation;
XVA_SET_RTN(xvap, XAT_GEN);
}
if (XVA_ISSET_REQ(xvap, XAT_OFFLINE)) {
xoap->xoa_offline =
((zp->z_pflags & ZFS_OFFLINE) != 0);
XVA_SET_RTN(xvap, XAT_OFFLINE);
}
if (XVA_ISSET_REQ(xvap, XAT_SPARSE)) {
xoap->xoa_sparse =
((zp->z_pflags & ZFS_SPARSE) != 0);
XVA_SET_RTN(xvap, XAT_SPARSE);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
xoap->xoa_projinherit =
((zp->z_pflags & ZFS_PROJINHERIT) != 0);
XVA_SET_RTN(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
xoap->xoa_projid = zp->z_projid;
XVA_SET_RTN(xvap, XAT_PROJID);
}
}
ZFS_TIME_DECODE(&vap->va_atime, atime);
ZFS_TIME_DECODE(&vap->va_mtime, mtime);
ZFS_TIME_DECODE(&vap->va_ctime, ctime);
mutex_exit(&zp->z_lock);
sa_object_size(zp->z_sa_hdl, &vap->va_blksize, &vap->va_nblocks);
if (zp->z_blksz == 0) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
vap->va_blksize = zfsvfs->z_max_blksz;
}
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Get the basic file attributes and place them in the provided kstat
* structure. The inode is assumed to be the authoritative source
* for most of the attributes. However, the znode currently has the
* authoritative atime, blksize, and block count.
*
* IN: ip - inode of file.
*
* OUT: sp - kstat values.
*
* RETURN: 0 (always succeeds)
*/
/* ARGSUSED */
int
zfs_getattr_fast(struct inode *ip, struct kstat *sp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t blksize;
u_longlong_t nblocks;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
mutex_enter(&zp->z_lock);
generic_fillattr(ip, sp);
sa_object_size(zp->z_sa_hdl, &blksize, &nblocks);
sp->blksize = blksize;
sp->blocks = nblocks;
if (unlikely(zp->z_blksz == 0)) {
/*
* Block size hasn't been set; suggest maximal I/O transfers.
*/
sp->blksize = zfsvfs->z_max_blksz;
}
mutex_exit(&zp->z_lock);
/*
* Required to prevent NFS client from detecting different inode
* numbers of snapshot root dentry before and after snapshot mount.
*/
if (zfsvfs->z_issnap) {
if (ip->i_sb->s_root->d_inode == ip)
sp->ino = ZFSCTL_INO_SNAPDIRS -
dmu_objset_id(zfsvfs->z_os);
}
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* For the operation of changing file's user/group/project, we need to
* handle not only the main object that is assigned to the file directly,
* but also the ones that are used by the file via hidden xattr directory.
*
* Because the xattr directory may contains many EA entries, as to it may
* be impossible to change all of them via the transaction of changing the
* main object's user/group/project attributes. Then we have to change them
* via other multiple independent transactions one by one. It may be not good
* solution, but we have no better idea yet.
*/
static int
zfs_setattr_dir(znode_t *dzp)
{
struct inode *dxip = ZTOI(dzp);
struct inode *xip = NULL;
zfsvfs_t *zfsvfs = ITOZSB(dxip);
objset_t *os = zfsvfs->z_os;
zap_cursor_t zc;
zap_attribute_t zap;
zfs_dirlock_t *dl;
znode_t *zp;
dmu_tx_t *tx = NULL;
uint64_t uid, gid;
sa_bulk_attr_t bulk[4];
int count = 0;
int err;
zap_cursor_init(&zc, os, dzp->z_id);
while ((err = zap_cursor_retrieve(&zc, &zap)) == 0) {
if (zap.za_integer_length != 8 || zap.za_num_integers != 1) {
err = ENXIO;
break;
}
err = zfs_dirent_lock(&dl, dzp, (char *)zap.za_name, &zp,
ZEXISTS, NULL, NULL);
if (err == ENOENT)
goto next;
if (err)
break;
xip = ZTOI(zp);
if (KUID_TO_SUID(xip->i_uid) == KUID_TO_SUID(dxip->i_uid) &&
KGID_TO_SGID(xip->i_gid) == KGID_TO_SGID(dxip->i_gid) &&
zp->z_projid == dzp->z_projid)
goto next;
tx = dmu_tx_create(os);
if (!(zp->z_pflags & ZFS_PROJID))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
break;
mutex_enter(&dzp->z_lock);
if (KUID_TO_SUID(xip->i_uid) != KUID_TO_SUID(dxip->i_uid)) {
xip->i_uid = dxip->i_uid;
uid = zfs_uid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&uid, sizeof (uid));
}
if (KGID_TO_SGID(xip->i_gid) != KGID_TO_SGID(dxip->i_gid)) {
xip->i_gid = dxip->i_gid;
gid = zfs_gid_read(dxip);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs), NULL,
&gid, sizeof (gid));
}
if (zp->z_projid != dzp->z_projid) {
if (!(zp->z_pflags & ZFS_PROJID)) {
zp->z_pflags |= ZFS_PROJID;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags,
sizeof (zp->z_pflags));
}
zp->z_projid = dzp->z_projid;
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_PROJID(zfsvfs),
NULL, &zp->z_projid, sizeof (zp->z_projid));
}
mutex_exit(&dzp->z_lock);
if (likely(count > 0)) {
err = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
} else {
dmu_tx_abort(tx);
}
tx = NULL;
if (err != 0 && err != ENOENT)
break;
next:
if (xip) {
iput(xip);
xip = NULL;
zfs_dirent_unlock(dl);
}
zap_cursor_advance(&zc);
}
if (tx)
dmu_tx_abort(tx);
if (xip) {
iput(xip);
zfs_dirent_unlock(dl);
}
zap_cursor_fini(&zc);
return (err == ENOENT ? 0 : err);
}
/*
* Set the file attributes to the values contained in the
* vattr structure.
*
* IN: ip - inode of file to be modified.
* vap - new attribute values.
* If ATTR_XVATTR set, then optional attrs are being set
* flags - ATTR_UTIME set if non-default time values provided.
* - ATTR_NOACLCHECK (CIFS context only).
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime updated, mtime updated if size changed.
*/
/* ARGSUSED */
int
zfs_setattr(struct inode *ip, vattr_t *vap, int flags, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os = zfsvfs->z_os;
zilog_t *zilog;
dmu_tx_t *tx;
vattr_t oldva;
xvattr_t *tmpxvattr;
uint_t mask = vap->va_mask;
uint_t saved_mask = 0;
int trim_mask = 0;
uint64_t new_mode;
uint64_t new_kuid = 0, new_kgid = 0, new_uid, new_gid;
uint64_t xattr_obj;
uint64_t mtime[2], ctime[2], atime[2];
uint64_t projid = ZFS_INVALID_PROJID;
znode_t *attrzp;
int need_policy = FALSE;
int err, err2 = 0;
zfs_fuid_info_t *fuidp = NULL;
xvattr_t *xvap = (xvattr_t *)vap; /* vap may be an xvattr_t * */
xoptattr_t *xoap;
zfs_acl_t *aclp;
boolean_t skipaclchk = (flags & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
boolean_t fuid_dirtied = B_FALSE;
boolean_t handle_eadir = B_FALSE;
sa_bulk_attr_t *bulk, *xattr_bulk;
int count = 0, xattr_count = 0, bulks = 8;
if (mask == 0)
return (0);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
/*
* If this is a xvattr_t, then get a pointer to the structure of
* optional attributes. If this is NULL, then we have a vattr_t.
*/
xoap = xva_getxoptattr(xvap);
if (xoap != NULL && (mask & ATTR_XVATTR)) {
if (XVA_ISSET_REQ(xvap, XAT_PROJID)) {
if (!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOTSUP));
}
projid = xoap->xoa_projid;
if (unlikely(projid == ZFS_INVALID_PROJID)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (projid == zp->z_projid && zp->z_pflags & ZFS_PROJID)
projid = ZFS_INVALID_PROJID;
else
need_policy = TRUE;
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT) &&
(xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) &&
(!dmu_objset_projectquota_enabled(os) ||
(!S_ISREG(ip->i_mode) && !S_ISDIR(ip->i_mode)))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENOTSUP));
}
}
zilog = zfsvfs->z_log;
/*
* Make sure that if we have ephemeral uid/gid or xvattr specified
* that file system is at proper version level
*/
if (zfsvfs->z_use_fuids == B_FALSE &&
(((mask & ATTR_UID) && IS_EPHEMERAL(vap->va_uid)) ||
((mask & ATTR_GID) && IS_EPHEMERAL(vap->va_gid)) ||
(mask & ATTR_XVATTR))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
if (mask & ATTR_SIZE && S_ISDIR(ip->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EISDIR));
}
if (mask & ATTR_SIZE && !S_ISREG(ip->i_mode) && !S_ISFIFO(ip->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
tmpxvattr = kmem_alloc(sizeof (xvattr_t), KM_SLEEP);
xva_init(tmpxvattr);
bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
xattr_bulk = kmem_alloc(sizeof (sa_bulk_attr_t) * bulks, KM_SLEEP);
/*
* Immutable files can only alter immutable bit and atime
*/
if ((zp->z_pflags & ZFS_IMMUTABLE) &&
((mask & (ATTR_SIZE|ATTR_UID|ATTR_GID|ATTR_MTIME|ATTR_MODE)) ||
((mask & ATTR_XVATTR) && XVA_ISSET_REQ(xvap, XAT_CREATETIME)))) {
err = SET_ERROR(EPERM);
goto out3;
}
if ((mask & ATTR_SIZE) && (zp->z_pflags & ZFS_READONLY)) {
err = SET_ERROR(EPERM);
goto out3;
}
/*
* Verify timestamps doesn't overflow 32 bits.
* ZFS can handle large timestamps, but 32bit syscalls can't
* handle times greater than 2039. This check should be removed
* once large timestamps are fully supported.
*/
if (mask & (ATTR_ATIME | ATTR_MTIME)) {
if (((mask & ATTR_ATIME) &&
TIMESPEC_OVERFLOW(&vap->va_atime)) ||
((mask & ATTR_MTIME) &&
TIMESPEC_OVERFLOW(&vap->va_mtime))) {
err = SET_ERROR(EOVERFLOW);
goto out3;
}
}
top:
attrzp = NULL;
aclp = NULL;
/* Can this be moved to before the top label? */
if (zfs_is_readonly(zfsvfs)) {
err = SET_ERROR(EROFS);
goto out3;
}
/*
* First validate permissions
*/
if (mask & ATTR_SIZE) {
err = zfs_zaccess(zp, ACE_WRITE_DATA, 0, skipaclchk, cr);
if (err)
goto out3;
/*
* XXX - Note, we are not providing any open
* mode flags here (like FNDELAY), so we may
* block if there are locks present... this
* should be addressed in openat().
*/
/* XXX - would it be OK to generate a log record here? */
err = zfs_freesp(zp, vap->va_size, 0, 0, FALSE);
if (err)
goto out3;
}
if (mask & (ATTR_ATIME|ATTR_MTIME) ||
((mask & ATTR_XVATTR) && (XVA_ISSET_REQ(xvap, XAT_HIDDEN) ||
XVA_ISSET_REQ(xvap, XAT_READONLY) ||
XVA_ISSET_REQ(xvap, XAT_ARCHIVE) ||
XVA_ISSET_REQ(xvap, XAT_OFFLINE) ||
XVA_ISSET_REQ(xvap, XAT_SPARSE) ||
XVA_ISSET_REQ(xvap, XAT_CREATETIME) ||
XVA_ISSET_REQ(xvap, XAT_SYSTEM)))) {
need_policy = zfs_zaccess(zp, ACE_WRITE_ATTRIBUTES, 0,
skipaclchk, cr);
}
if (mask & (ATTR_UID|ATTR_GID)) {
int idmask = (mask & (ATTR_UID|ATTR_GID));
int take_owner;
int take_group;
/*
* NOTE: even if a new mode is being set,
* we may clear S_ISUID/S_ISGID bits.
*/
if (!(mask & ATTR_MODE))
vap->va_mode = zp->z_mode;
/*
* Take ownership or chgrp to group we are a member of
*/
take_owner = (mask & ATTR_UID) && (vap->va_uid == crgetuid(cr));
take_group = (mask & ATTR_GID) &&
zfs_groupmember(zfsvfs, vap->va_gid, cr);
/*
* If both ATTR_UID and ATTR_GID are set then take_owner and
* take_group must both be set in order to allow taking
* ownership.
*
* Otherwise, send the check through secpolicy_vnode_setattr()
*
*/
if (((idmask == (ATTR_UID|ATTR_GID)) &&
take_owner && take_group) ||
((idmask == ATTR_UID) && take_owner) ||
((idmask == ATTR_GID) && take_group)) {
if (zfs_zaccess(zp, ACE_WRITE_OWNER, 0,
skipaclchk, cr) == 0) {
/*
* Remove setuid/setgid for non-privileged users
*/
(void) secpolicy_setid_clear(vap, cr);
trim_mask = (mask & (ATTR_UID|ATTR_GID));
} else {
need_policy = TRUE;
}
} else {
need_policy = TRUE;
}
}
mutex_enter(&zp->z_lock);
oldva.va_mode = zp->z_mode;
zfs_fuid_map_ids(zp, cr, &oldva.va_uid, &oldva.va_gid);
if (mask & ATTR_XVATTR) {
/*
* Update xvattr mask to include only those attributes
* that are actually changing.
*
* the bits will be restored prior to actually setting
* the attributes so the caller thinks they were set.
*/
if (XVA_ISSET_REQ(xvap, XAT_APPENDONLY)) {
if (xoap->xoa_appendonly !=
((zp->z_pflags & ZFS_APPENDONLY) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_APPENDONLY);
XVA_SET_REQ(tmpxvattr, XAT_APPENDONLY);
}
}
if (XVA_ISSET_REQ(xvap, XAT_PROJINHERIT)) {
if (xoap->xoa_projinherit !=
((zp->z_pflags & ZFS_PROJINHERIT) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_PROJINHERIT);
XVA_SET_REQ(tmpxvattr, XAT_PROJINHERIT);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NOUNLINK)) {
if (xoap->xoa_nounlink !=
((zp->z_pflags & ZFS_NOUNLINK) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NOUNLINK);
XVA_SET_REQ(tmpxvattr, XAT_NOUNLINK);
}
}
if (XVA_ISSET_REQ(xvap, XAT_IMMUTABLE)) {
if (xoap->xoa_immutable !=
((zp->z_pflags & ZFS_IMMUTABLE) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_IMMUTABLE);
XVA_SET_REQ(tmpxvattr, XAT_IMMUTABLE);
}
}
if (XVA_ISSET_REQ(xvap, XAT_NODUMP)) {
if (xoap->xoa_nodump !=
((zp->z_pflags & ZFS_NODUMP) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_NODUMP);
XVA_SET_REQ(tmpxvattr, XAT_NODUMP);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_MODIFIED)) {
if (xoap->xoa_av_modified !=
((zp->z_pflags & ZFS_AV_MODIFIED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_MODIFIED);
XVA_SET_REQ(tmpxvattr, XAT_AV_MODIFIED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_AV_QUARANTINED)) {
if ((!S_ISREG(ip->i_mode) &&
xoap->xoa_av_quarantined) ||
xoap->xoa_av_quarantined !=
((zp->z_pflags & ZFS_AV_QUARANTINED) != 0)) {
need_policy = TRUE;
} else {
XVA_CLR_REQ(xvap, XAT_AV_QUARANTINED);
XVA_SET_REQ(tmpxvattr, XAT_AV_QUARANTINED);
}
}
if (XVA_ISSET_REQ(xvap, XAT_REPARSE)) {
mutex_exit(&zp->z_lock);
err = SET_ERROR(EPERM);
goto out3;
}
if (need_policy == FALSE &&
(XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP) ||
XVA_ISSET_REQ(xvap, XAT_OPAQUE))) {
need_policy = TRUE;
}
}
mutex_exit(&zp->z_lock);
if (mask & ATTR_MODE) {
if (zfs_zaccess(zp, ACE_WRITE_ACL, 0, skipaclchk, cr) == 0) {
err = secpolicy_setid_setsticky_clear(ip, vap,
&oldva, cr);
if (err)
goto out3;
trim_mask |= ATTR_MODE;
} else {
need_policy = TRUE;
}
}
if (need_policy) {
/*
* If trim_mask is set then take ownership
* has been granted or write_acl is present and user
* has the ability to modify mode. In that case remove
* UID|GID and or MODE from mask so that
* secpolicy_vnode_setattr() doesn't revoke it.
*/
if (trim_mask) {
saved_mask = vap->va_mask;
vap->va_mask &= ~trim_mask;
}
err = secpolicy_vnode_setattr(cr, ip, vap, &oldva, flags,
(int (*)(void *, int, cred_t *))zfs_zaccess_unix, zp);
if (err)
goto out3;
if (trim_mask)
vap->va_mask |= saved_mask;
}
/*
* secpolicy_vnode_setattr, or take ownership may have
* changed va_mask
*/
mask = vap->va_mask;
if ((mask & (ATTR_UID | ATTR_GID)) || projid != ZFS_INVALID_PROJID) {
handle_eadir = B_TRUE;
err = sa_lookup(zp->z_sa_hdl, SA_ZPL_XATTR(zfsvfs),
&xattr_obj, sizeof (xattr_obj));
if (err == 0 && xattr_obj) {
err = zfs_zget(ZTOZSB(zp), xattr_obj, &attrzp);
if (err)
goto out2;
}
if (mask & ATTR_UID) {
new_kuid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_uid, cr, ZFS_OWNER, &fuidp);
if (new_kuid != KUID_TO_SUID(ZTOI(zp)->i_uid) &&
zfs_id_overquota(zfsvfs, DMU_USERUSED_OBJECT,
new_kuid)) {
if (attrzp)
iput(ZTOI(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (mask & ATTR_GID) {
new_kgid = zfs_fuid_create(zfsvfs,
(uint64_t)vap->va_gid, cr, ZFS_GROUP, &fuidp);
if (new_kgid != KGID_TO_SGID(ZTOI(zp)->i_gid) &&
zfs_id_overquota(zfsvfs, DMU_GROUPUSED_OBJECT,
new_kgid)) {
if (attrzp)
iput(ZTOI(attrzp));
err = SET_ERROR(EDQUOT);
goto out2;
}
}
if (projid != ZFS_INVALID_PROJID &&
zfs_id_overquota(zfsvfs, DMU_PROJECTUSED_OBJECT, projid)) {
if (attrzp)
iput(ZTOI(attrzp));
err = EDQUOT;
goto out2;
}
}
tx = dmu_tx_create(os);
if (mask & ATTR_MODE) {
uint64_t pmode = zp->z_mode;
uint64_t acl_obj;
new_mode = (pmode & S_IFMT) | (vap->va_mode & ~S_IFMT);
zfs_acl_chmod_setattr(zp, &aclp, new_mode);
mutex_enter(&zp->z_lock);
if (!zp->z_is_sa && ((acl_obj = zfs_external_acl(zp)) != 0)) {
/*
* Are we upgrading ACL from old V0 format
* to V1 format?
*/
if (zfsvfs->z_version >= ZPL_VERSION_FUID &&
zfs_znode_acl_version(zp) ==
ZFS_ACL_VERSION_INITIAL) {
dmu_tx_hold_free(tx, acl_obj, 0,
DMU_OBJECT_END);
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
} else {
dmu_tx_hold_write(tx, acl_obj, 0,
aclp->z_acl_bytes);
}
} else if (!zp->z_is_sa && aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT,
0, aclp->z_acl_bytes);
}
mutex_exit(&zp->z_lock);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
} else {
if (((mask & ATTR_XVATTR) &&
XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP)) ||
(projid != ZFS_INVALID_PROJID &&
!(zp->z_pflags & ZFS_PROJID)))
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_TRUE);
else
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
}
if (attrzp) {
dmu_tx_hold_sa(tx, attrzp->z_sa_hdl, B_FALSE);
}
fuid_dirtied = zfsvfs->z_fuid_dirty;
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_WAIT);
if (err)
goto out;
count = 0;
/*
* Set each attribute requested.
* We group settings according to the locks they need to acquire.
*
* Note: you cannot set ctime directly, although it will be
* updated as a side-effect of calling this function.
*/
if (projid != ZFS_INVALID_PROJID && !(zp->z_pflags & ZFS_PROJID)) {
/*
* For the existed object that is upgraded from old system,
* its on-disk layout has no slot for the project ID attribute.
* But quota accounting logic needs to access related slots by
* offset directly. So we need to adjust old objects' layout
* to make the project ID to some unified and fixed offset.
*/
if (attrzp)
err = sa_add_projid(attrzp->z_sa_hdl, tx, projid);
if (err == 0)
err = sa_add_projid(zp->z_sa_hdl, tx, projid);
if (unlikely(err == EEXIST))
err = 0;
else if (err != 0)
goto out;
else
projid = ZFS_INVALID_PROJID;
}
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&zp->z_acl_lock);
mutex_enter(&zp->z_lock);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, sizeof (zp->z_pflags));
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_enter(&attrzp->z_acl_lock);
mutex_enter(&attrzp->z_lock);
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_FLAGS(zfsvfs), NULL, &attrzp->z_pflags,
sizeof (attrzp->z_pflags));
if (projid != ZFS_INVALID_PROJID) {
attrzp->z_projid = projid;
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_PROJID(zfsvfs), NULL, &attrzp->z_projid,
sizeof (attrzp->z_projid));
}
}
if (mask & (ATTR_UID|ATTR_GID)) {
if (mask & ATTR_UID) {
ZTOI(zp)->i_uid = SUID_TO_KUID(new_kuid);
new_uid = zfs_uid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_UID(zfsvfs), NULL,
&new_uid, sizeof (new_uid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_UID(zfsvfs), NULL, &new_uid,
sizeof (new_uid));
ZTOI(attrzp)->i_uid = SUID_TO_KUID(new_uid);
}
}
if (mask & ATTR_GID) {
ZTOI(zp)->i_gid = SGID_TO_KGID(new_kgid);
new_gid = zfs_gid_read(ZTOI(zp));
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_GID(zfsvfs),
NULL, &new_gid, sizeof (new_gid));
if (attrzp) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_GID(zfsvfs), NULL, &new_gid,
sizeof (new_gid));
ZTOI(attrzp)->i_gid = SGID_TO_KGID(new_kgid);
}
}
if (!(mask & ATTR_MODE)) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs),
NULL, &new_mode, sizeof (new_mode));
new_mode = zp->z_mode;
}
err = zfs_acl_chown_setattr(zp);
ASSERT(err == 0);
if (attrzp) {
err = zfs_acl_chown_setattr(attrzp);
ASSERT(err == 0);
}
}
if (mask & ATTR_MODE) {
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MODE(zfsvfs), NULL,
&new_mode, sizeof (new_mode));
zp->z_mode = ZTOI(zp)->i_mode = new_mode;
ASSERT3P(aclp, !=, NULL);
err = zfs_aclset_common(zp, aclp, cr, tx);
ASSERT0(err);
if (zp->z_acl_cached)
zfs_acl_free(zp->z_acl_cached);
zp->z_acl_cached = aclp;
aclp = NULL;
}
if ((mask & ATTR_ATIME) || zp->z_atime_dirty) {
zp->z_atime_dirty = 0;
ZFS_TIME_ENCODE(&ip->i_atime, atime);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_ATIME(zfsvfs), NULL,
&atime, sizeof (atime));
}
if (mask & (ATTR_MTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_mtime, mtime);
ZTOI(zp)->i_mtime = timespec_trunc(vap->va_mtime,
ZTOI(zp)->i_sb->s_time_gran);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL,
mtime, sizeof (mtime));
}
if (mask & (ATTR_CTIME | ATTR_SIZE)) {
ZFS_TIME_ENCODE(&vap->va_ctime, ctime);
ZTOI(zp)->i_ctime = timespec_trunc(vap->va_ctime,
ZTOI(zp)->i_sb->s_time_gran);
SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL,
ctime, sizeof (ctime));
}
if (projid != ZFS_INVALID_PROJID) {
zp->z_projid = projid;
SA_ADD_BULK_ATTR(bulk, count,
SA_ZPL_PROJID(zfsvfs), NULL, &zp->z_projid,
sizeof (zp->z_projid));
}
if (attrzp && mask) {
SA_ADD_BULK_ATTR(xattr_bulk, xattr_count,
SA_ZPL_CTIME(zfsvfs), NULL, &ctime,
sizeof (ctime));
}
/*
* Do this after setting timestamps to prevent timestamp
* update from toggling bit
*/
if (xoap && (mask & ATTR_XVATTR)) {
/*
* restore trimmed off masks
* so that return masks can be set for caller.
*/
if (XVA_ISSET_REQ(tmpxvattr, XAT_APPENDONLY)) {
XVA_SET_REQ(xvap, XAT_APPENDONLY);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NOUNLINK)) {
XVA_SET_REQ(xvap, XAT_NOUNLINK);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_IMMUTABLE)) {
XVA_SET_REQ(xvap, XAT_IMMUTABLE);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_NODUMP)) {
XVA_SET_REQ(xvap, XAT_NODUMP);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_MODIFIED)) {
XVA_SET_REQ(xvap, XAT_AV_MODIFIED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_AV_QUARANTINED)) {
XVA_SET_REQ(xvap, XAT_AV_QUARANTINED);
}
if (XVA_ISSET_REQ(tmpxvattr, XAT_PROJINHERIT)) {
XVA_SET_REQ(xvap, XAT_PROJINHERIT);
}
if (XVA_ISSET_REQ(xvap, XAT_AV_SCANSTAMP))
ASSERT(S_ISREG(ip->i_mode));
zfs_xvattr_set(zp, xvap, tx);
}
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
if (mask != 0)
zfs_log_setattr(zilog, tx, TX_SETATTR, zp, vap, mask, fuidp);
mutex_exit(&zp->z_lock);
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&zp->z_acl_lock);
if (attrzp) {
if (mask & (ATTR_UID|ATTR_GID|ATTR_MODE))
mutex_exit(&attrzp->z_acl_lock);
mutex_exit(&attrzp->z_lock);
}
out:
if (err == 0 && xattr_count > 0) {
err2 = sa_bulk_update(attrzp->z_sa_hdl, xattr_bulk,
xattr_count, tx);
ASSERT(err2 == 0);
}
if (aclp)
zfs_acl_free(aclp);
if (fuidp) {
zfs_fuid_info_free(fuidp);
fuidp = NULL;
}
if (err) {
dmu_tx_abort(tx);
if (attrzp)
iput(ZTOI(attrzp));
if (err == ERESTART)
goto top;
} else {
if (count > 0)
err2 = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx);
dmu_tx_commit(tx);
if (attrzp) {
if (err2 == 0 && handle_eadir)
err2 = zfs_setattr_dir(attrzp);
iput(ZTOI(attrzp));
}
zfs_inode_update(zp);
}
out2:
if (os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
out3:
kmem_free(xattr_bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(bulk, sizeof (sa_bulk_attr_t) * bulks);
kmem_free(tmpxvattr, sizeof (xvattr_t));
ZFS_EXIT(zfsvfs);
return (err);
}
typedef struct zfs_zlock {
krwlock_t *zl_rwlock; /* lock we acquired */
znode_t *zl_znode; /* znode we held */
struct zfs_zlock *zl_next; /* next in list */
} zfs_zlock_t;
/*
* Drop locks and release vnodes that were held by zfs_rename_lock().
*/
static void
zfs_rename_unlock(zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
while ((zl = *zlpp) != NULL) {
if (zl->zl_znode != NULL)
zfs_iput_async(ZTOI(zl->zl_znode));
rw_exit(zl->zl_rwlock);
*zlpp = zl->zl_next;
kmem_free(zl, sizeof (*zl));
}
}
/*
* Search back through the directory tree, using the ".." entries.
* Lock each directory in the chain to prevent concurrent renames.
* Fail any attempt to move a directory into one of its own descendants.
* XXX - z_parent_lock can overlap with map or grow locks
*/
static int
zfs_rename_lock(znode_t *szp, znode_t *tdzp, znode_t *sdzp, zfs_zlock_t **zlpp)
{
zfs_zlock_t *zl;
znode_t *zp = tdzp;
uint64_t rootid = ZTOZSB(zp)->z_root;
uint64_t oidp = zp->z_id;
krwlock_t *rwlp = &szp->z_parent_lock;
krw_t rw = RW_WRITER;
/*
* First pass write-locks szp and compares to zp->z_id.
* Later passes read-lock zp and compare to zp->z_parent.
*/
do {
if (!rw_tryenter(rwlp, rw)) {
/*
* Another thread is renaming in this path.
* Note that if we are a WRITER, we don't have any
* parent_locks held yet.
*/
if (rw == RW_READER && zp->z_id > szp->z_id) {
/*
* Drop our locks and restart
*/
zfs_rename_unlock(&zl);
*zlpp = NULL;
zp = tdzp;
oidp = zp->z_id;
rwlp = &szp->z_parent_lock;
rw = RW_WRITER;
continue;
} else {
/*
* Wait for other thread to drop its locks
*/
rw_enter(rwlp, rw);
}
}
zl = kmem_alloc(sizeof (*zl), KM_SLEEP);
zl->zl_rwlock = rwlp;
zl->zl_znode = NULL;
zl->zl_next = *zlpp;
*zlpp = zl;
if (oidp == szp->z_id) /* We're a descendant of szp */
return (SET_ERROR(EINVAL));
if (oidp == rootid) /* We've hit the top */
return (0);
if (rw == RW_READER) { /* i.e. not the first pass */
int error = zfs_zget(ZTOZSB(zp), oidp, &zp);
if (error)
return (error);
zl->zl_znode = zp;
}
(void) sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(ZTOZSB(zp)),
&oidp, sizeof (oidp));
rwlp = &zp->z_parent_lock;
rw = RW_READER;
} while (zp->z_id != sdzp->z_id);
return (0);
}
/*
* Move an entry from the provided source directory to the target
* directory. Change the entry name as indicated.
*
* IN: sdip - Source directory containing the "old entry".
* snm - Old entry name.
* tdip - Target directory to contain the "new entry".
* tnm - New entry name.
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* sdip,tdip - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_rename(struct inode *sdip, char *snm, struct inode *tdip, char *tnm,
cred_t *cr, int flags)
{
znode_t *tdzp, *szp, *tzp;
znode_t *sdzp = ITOZ(sdip);
zfsvfs_t *zfsvfs = ITOZSB(sdip);
zilog_t *zilog;
zfs_dirlock_t *sdl, *tdl;
dmu_tx_t *tx;
zfs_zlock_t *zl;
int cmp, serr, terr;
int error = 0;
int zflg = 0;
boolean_t waited = B_FALSE;
if (snm == NULL || tnm == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(sdzp);
zilog = zfsvfs->z_log;
tdzp = ITOZ(tdip);
ZFS_VERIFY_ZP(tdzp);
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (tdip->i_sb != sdip->i_sb || zfsctl_is_node(tdip)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
if (zfsvfs->z_utf8 && u8_validate(tnm,
strlen(tnm), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
top:
szp = NULL;
tzp = NULL;
zl = NULL;
/*
* This is to prevent the creation of links into attribute space
* by renaming a linked file into/outof an attribute directory.
* See the comment in zfs_link() for why this is considered bad.
*/
if ((tdzp->z_pflags & ZFS_XATTR) != (sdzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Lock source and target directory entries. To prevent deadlock,
* a lock ordering must be defined. We lock the directory with
* the smallest object id first, or if it's a tie, the one with
* the lexically first name.
*/
if (sdzp->z_id < tdzp->z_id) {
cmp = -1;
} else if (sdzp->z_id > tdzp->z_id) {
cmp = 1;
} else {
/*
* First compare the two name arguments without
* considering any case folding.
*/
int nofold = (zfsvfs->z_norm & ~U8_TEXTPREP_TOUPPER);
cmp = u8_strcmp(snm, tnm, 0, nofold, U8_UNICODE_LATEST, &error);
ASSERT(error == 0 || !zfsvfs->z_utf8);
if (cmp == 0) {
/*
* POSIX: "If the old argument and the new argument
* both refer to links to the same existing file,
* the rename() function shall return successfully
* and perform no other action."
*/
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* If the file system is case-folding, then we may
* have some more checking to do. A case-folding file
* system is either supporting mixed case sensitivity
* access or is completely case-insensitive. Note
* that the file system is always case preserving.
*
* In mixed sensitivity mode case sensitive behavior
* is the default. FIGNORECASE must be used to
* explicitly request case insensitive behavior.
*
* If the source and target names provided differ only
* by case (e.g., a request to rename 'tim' to 'Tim'),
* we will treat this as a special case in the
* case-insensitive mode: as long as the source name
* is an exact match, we will allow this to proceed as
* a name-change request.
*/
if ((zfsvfs->z_case == ZFS_CASE_INSENSITIVE ||
(zfsvfs->z_case == ZFS_CASE_MIXED &&
flags & FIGNORECASE)) &&
u8_strcmp(snm, tnm, 0, zfsvfs->z_norm, U8_UNICODE_LATEST,
&error) == 0) {
/*
* case preserving rename request, require exact
* name matches
*/
zflg |= ZCIEXACT;
zflg &= ~ZCILOOK;
}
}
/*
* If the source and destination directories are the same, we should
* grab the z_name_lock of that directory only once.
*/
if (sdzp == tdzp) {
zflg |= ZHAVELOCK;
rw_enter(&sdzp->z_name_lock, RW_READER);
}
if (cmp < 0) {
serr = zfs_dirent_lock(&sdl, sdzp, snm, &szp,
ZEXISTS | zflg, NULL, NULL);
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, ZRENAMING | zflg, NULL, NULL);
} else {
terr = zfs_dirent_lock(&tdl,
tdzp, tnm, &tzp, zflg, NULL, NULL);
serr = zfs_dirent_lock(&sdl,
sdzp, snm, &szp, ZEXISTS | ZRENAMING | zflg,
NULL, NULL);
}
if (serr) {
/*
* Source entry invalid or not there.
*/
if (!terr) {
zfs_dirent_unlock(tdl);
if (tzp)
iput(ZTOI(tzp));
}
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(snm, "..") == 0)
serr = EINVAL;
ZFS_EXIT(zfsvfs);
return (serr);
}
if (terr) {
zfs_dirent_unlock(sdl);
iput(ZTOI(szp));
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (strcmp(tnm, "..") == 0)
terr = EINVAL;
ZFS_EXIT(zfsvfs);
return (terr);
}
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow renames into our tree when the project
* IDs are the same.
*/
if (tdzp->z_pflags & ZFS_PROJINHERIT &&
tdzp->z_projid != szp->z_projid) {
error = SET_ERROR(EXDEV);
goto out;
}
/*
* Must have write access at the source to remove the old entry
* and write access at the target to create the new entry.
* Note that if target and source are the same, this can be
* done in a single check.
*/
if ((error = zfs_zaccess_rename(sdzp, szp, tdzp, tzp, cr)))
goto out;
if (S_ISDIR(ZTOI(szp)->i_mode)) {
/*
* Check to make sure rename is valid.
* Can't do a move like this: /usr/a/b to /usr/a/b/c/d
*/
if ((error = zfs_rename_lock(szp, tdzp, sdzp, &zl)))
goto out;
}
/*
* Does target exist?
*/
if (tzp) {
/*
* Source and target must be the same type.
*/
if (S_ISDIR(ZTOI(szp)->i_mode)) {
if (!S_ISDIR(ZTOI(tzp)->i_mode)) {
error = SET_ERROR(ENOTDIR);
goto out;
}
} else {
if (S_ISDIR(ZTOI(tzp)->i_mode)) {
error = SET_ERROR(EISDIR);
goto out;
}
}
/*
* POSIX dictates that when the source and target
* entries refer to the same file object, rename
* must do nothing and exit without error.
*/
if (szp->z_id == tzp->z_id) {
error = 0;
goto out;
}
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_sa(tx, sdzp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, sdzp->z_id, FALSE, snm);
dmu_tx_hold_zap(tx, tdzp->z_id, TRUE, tnm);
if (sdzp != tdzp) {
dmu_tx_hold_sa(tx, tdzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tdzp);
}
if (tzp) {
dmu_tx_hold_sa(tx, tzp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, tzp);
}
zfs_sa_upgrade_txholds(tx, szp);
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
iput(ZTOI(szp));
if (tzp)
iput(ZTOI(tzp));
goto top;
}
dmu_tx_abort(tx);
iput(ZTOI(szp));
if (tzp)
iput(ZTOI(tzp));
ZFS_EXIT(zfsvfs);
return (error);
}
if (tzp) /* Attempt to remove the existing target */
error = zfs_link_destroy(tdl, tzp, tx, zflg, NULL);
if (error == 0) {
error = zfs_link_create(tdl, szp, tx, ZRENAMING);
if (error == 0) {
szp->z_pflags |= ZFS_AV_MODIFIED;
if (tdzp->z_pflags & ZFS_PROJINHERIT)
szp->z_pflags |= ZFS_PROJINHERIT;
error = sa_update(szp->z_sa_hdl, SA_ZPL_FLAGS(zfsvfs),
(void *)&szp->z_pflags, sizeof (uint64_t), tx);
ASSERT0(error);
error = zfs_link_destroy(sdl, szp, tx, ZRENAMING, NULL);
if (error == 0) {
zfs_log_rename(zilog, tx, TX_RENAME |
(flags & FIGNORECASE ? TX_CI : 0), sdzp,
sdl->dl_name, tdzp, tdl->dl_name, szp);
} else {
/*
* At this point, we have successfully created
* the target name, but have failed to remove
* the source name. Since the create was done
* with the ZRENAMING flag, there are
* complications; for one, the link count is
* wrong. The easiest way to deal with this
* is to remove the newly created target, and
* return the original error. This must
* succeed; fortunately, it is very unlikely to
* fail, since we just created it.
*/
VERIFY3U(zfs_link_destroy(tdl, szp, tx,
ZRENAMING, NULL), ==, 0);
}
}
}
dmu_tx_commit(tx);
out:
if (zl != NULL)
zfs_rename_unlock(&zl);
zfs_dirent_unlock(sdl);
zfs_dirent_unlock(tdl);
zfs_inode_update(sdzp);
if (sdzp == tdzp)
rw_exit(&sdzp->z_name_lock);
if (sdzp != tdzp)
zfs_inode_update(tdzp);
zfs_inode_update(szp);
iput(ZTOI(szp));
if (tzp) {
zfs_inode_update(tzp);
iput(ZTOI(tzp));
}
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Insert the indicated symbolic reference entry into the directory.
*
* IN: dip - Directory to contain new symbolic link.
* link - Name for new symlink entry.
* vap - Attributes of new entry.
* target - Target path of new symlink.
*
* cr - credentials of caller.
* flags - case flags
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* dip - ctime|mtime updated
*/
/*ARGSUSED*/
int
zfs_symlink(struct inode *dip, char *name, vattr_t *vap, char *link,
struct inode **ipp, cred_t *cr, int flags)
{
znode_t *zp, *dzp = ITOZ(dip);
zfs_dirlock_t *dl;
dmu_tx_t *tx;
zfsvfs_t *zfsvfs = ITOZSB(dip);
zilog_t *zilog;
uint64_t len = strlen(link);
int error;
int zflg = ZNEW;
zfs_acl_ids_t acl_ids;
boolean_t fuid_dirtied;
uint64_t txtype = TX_SYMLINK;
boolean_t waited = B_FALSE;
ASSERT(S_ISLNK(vap->va_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
if (zfsvfs->z_utf8 && u8_validate(name, strlen(name),
NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zflg |= ZCILOOK;
if (len > MAXPATHLEN) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENAMETOOLONG));
}
if ((error = zfs_acl_ids_create(dzp, 0,
vap, cr, NULL, &acl_ids)) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
top:
*ipp = NULL;
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, dzp, name, &zp, zflg, NULL, NULL);
if (error) {
zfs_acl_ids_free(&acl_ids);
ZFS_EXIT(zfsvfs);
return (error);
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (error);
}
if (zfs_acl_ids_overquota(zfsvfs, &acl_ids, ZFS_DEFAULT_PROJID)) {
zfs_acl_ids_free(&acl_ids);
zfs_dirent_unlock(dl);
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EDQUOT));
}
tx = dmu_tx_create(zfsvfs->z_os);
fuid_dirtied = zfsvfs->z_fuid_dirty;
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0, MAX(1, len));
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
dmu_tx_hold_sa_create(tx, acl_ids.z_aclp->z_acl_bytes +
ZFS_SA_BASE_ATTR_SIZE + len);
dmu_tx_hold_sa(tx, dzp->z_sa_hdl, B_FALSE);
if (!zfsvfs->z_use_sa && acl_ids.z_aclp->z_acl_bytes > ZFS_ACE_SPACE) {
dmu_tx_hold_write(tx, DMU_NEW_OBJECT, 0,
acl_ids.z_aclp->z_acl_bytes);
}
if (fuid_dirtied)
zfs_fuid_txhold(zfsvfs, tx);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
zfs_acl_ids_free(&acl_ids);
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Create a new object for the symlink.
* for version 4 ZPL datsets the symlink will be an SA attribute
*/
zfs_mknode(dzp, vap, tx, cr, 0, &zp, &acl_ids);
if (fuid_dirtied)
zfs_fuid_sync(zfsvfs, tx);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_update(zp->z_sa_hdl, SA_ZPL_SYMLINK(zfsvfs),
link, len, tx);
else
zfs_sa_symlink(zp, link, len, tx);
mutex_exit(&zp->z_lock);
zp->z_size = len;
(void) sa_update(zp->z_sa_hdl, SA_ZPL_SIZE(zfsvfs),
&zp->z_size, sizeof (zp->z_size), tx);
/*
* Insert the new object into the directory.
*/
(void) zfs_link_create(dl, zp, tx, ZNEW);
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_symlink(zilog, tx, txtype, dzp, zp, name, link);
zfs_inode_update(dzp);
zfs_inode_update(zp);
zfs_acl_ids_free(&acl_ids);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
*ipp = ZTOI(zp);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Return, in the buffer contained in the provided uio structure,
* the symbolic path referred to by ip.
*
* IN: ip - inode of symbolic link
* uio - structure to contain the link path.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - atime updated
*/
/* ARGSUSED */
int
zfs_readlink(struct inode *ip, uio_t *uio, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
mutex_enter(&zp->z_lock);
if (zp->z_is_sa)
error = sa_lookup_uio(zp->z_sa_hdl,
SA_ZPL_SYMLINK(zfsvfs), uio);
else
error = zfs_sa_readlink(zp, uio);
mutex_exit(&zp->z_lock);
ZFS_EXIT(zfsvfs);
return (error);
}
/*
* Insert a new entry into directory tdip referencing sip.
*
* IN: tdip - Directory to contain new entry.
* sip - inode of new entry.
* name - name of new entry.
* cr - credentials of caller.
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* tdip - ctime|mtime updated
* sip - ctime updated
*/
/* ARGSUSED */
int
zfs_link(struct inode *tdip, struct inode *sip, char *name, cred_t *cr,
int flags)
{
znode_t *dzp = ITOZ(tdip);
znode_t *tzp, *szp;
zfsvfs_t *zfsvfs = ITOZSB(tdip);
zilog_t *zilog;
zfs_dirlock_t *dl;
dmu_tx_t *tx;
int error;
int zf = ZNEW;
uint64_t parent;
uid_t owner;
boolean_t waited = B_FALSE;
boolean_t is_tmpfile = 0;
uint64_t txg;
#ifdef HAVE_TMPFILE
is_tmpfile = (sip->i_nlink == 0 && (sip->i_state & I_LINKABLE));
#endif
ASSERT(S_ISDIR(tdip->i_mode));
if (name == NULL)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(dzp);
zilog = zfsvfs->z_log;
/*
* POSIX dictates that we return EPERM here.
* Better choices include ENOTSUP or EISDIR.
*/
if (S_ISDIR(sip->i_mode)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
szp = ITOZ(sip);
ZFS_VERIFY_ZP(szp);
/*
* If we are using project inheritance, means if the directory has
* ZFS_PROJINHERIT set, then its descendant directories will inherit
* not only the project ID, but also the ZFS_PROJINHERIT flag. Under
* such case, we only allow hard link creation in our tree when the
* project IDs are the same.
*/
if (dzp->z_pflags & ZFS_PROJINHERIT && dzp->z_projid != szp->z_projid) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
/*
* We check i_sb because snapshots and the ctldir must have different
* super blocks.
*/
if (sip->i_sb != tdip->i_sb || zfsctl_is_node(sip)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EXDEV));
}
/* Prevent links to .zfs/shares files */
if ((error = sa_lookup(szp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
&parent, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (parent == zfsvfs->z_shares_dir) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if (zfsvfs->z_utf8 && u8_validate(name,
strlen(name), NULL, U8_VALIDATE_ENTIRE, &error) < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EILSEQ));
}
if (flags & FIGNORECASE)
zf |= ZCILOOK;
/*
* We do not support links between attributes and non-attributes
* because of the potential security risk of creating links
* into "normal" file space in order to circumvent restrictions
* imposed in attribute space.
*/
if ((szp->z_pflags & ZFS_XATTR) != (dzp->z_pflags & ZFS_XATTR)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
owner = zfs_fuid_map_id(zfsvfs, KUID_TO_SUID(sip->i_uid),
cr, ZFS_OWNER);
if (owner != crgetuid(cr) && secpolicy_basic_link(cr) != 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if ((error = zfs_zaccess(dzp, ACE_ADD_FILE, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
top:
/*
* Attempt to lock directory; fail if entry already exists.
*/
error = zfs_dirent_lock(&dl, dzp, name, &tzp, zf, NULL, NULL);
if (error) {
ZFS_EXIT(zfsvfs);
return (error);
}
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, szp->z_sa_hdl, B_FALSE);
dmu_tx_hold_zap(tx, dzp->z_id, TRUE, name);
if (is_tmpfile)
dmu_tx_hold_zap(tx, zfsvfs->z_unlinkedobj, FALSE, NULL);
zfs_sa_upgrade_txholds(tx, szp);
zfs_sa_upgrade_txholds(tx, dzp);
error = dmu_tx_assign(tx, (waited ? TXG_NOTHROTTLE : 0) | TXG_NOWAIT);
if (error) {
zfs_dirent_unlock(dl);
if (error == ERESTART) {
waited = B_TRUE;
dmu_tx_wait(tx);
dmu_tx_abort(tx);
goto top;
}
dmu_tx_abort(tx);
ZFS_EXIT(zfsvfs);
return (error);
}
/* unmark z_unlinked so zfs_link_create will not reject */
if (is_tmpfile)
szp->z_unlinked = 0;
error = zfs_link_create(dl, szp, tx, 0);
if (error == 0) {
uint64_t txtype = TX_LINK;
/*
* tmpfile is created to be in z_unlinkedobj, so remove it.
* Also, we don't log in ZIL, be cause all previous file
* operation on the tmpfile are ignored by ZIL. Instead we
* always wait for txg to sync to make sure all previous
* operation are sync safe.
*/
if (is_tmpfile) {
VERIFY(zap_remove_int(zfsvfs->z_os,
zfsvfs->z_unlinkedobj, szp->z_id, tx) == 0);
} else {
if (flags & FIGNORECASE)
txtype |= TX_CI;
zfs_log_link(zilog, tx, txtype, dzp, szp, name);
}
} else if (is_tmpfile) {
/* restore z_unlinked since when linking failed */
szp->z_unlinked = 1;
}
txg = dmu_tx_get_txg(tx);
dmu_tx_commit(tx);
zfs_dirent_unlock(dl);
if (!is_tmpfile && zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
if (is_tmpfile)
txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), txg);
zfs_inode_update(dzp);
zfs_inode_update(szp);
ZFS_EXIT(zfsvfs);
return (error);
}
static void
zfs_putpage_commit_cb(void *arg)
{
struct page *pp = arg;
ClearPageError(pp);
end_page_writeback(pp);
}
/*
* Push a page out to disk, once the page is on stable storage the
* registered commit callback will be run as notification of completion.
*
* IN: ip - page mapped for inode.
* pp - page to push (page is locked)
* wbc - writeback control data
*
* RETURN: 0 if success
* error code if failure
*
* Timestamps:
* ip - ctime|mtime updated
*/
/* ARGSUSED */
int
zfs_putpage(struct inode *ip, struct page *pp, struct writeback_control *wbc)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
loff_t offset;
loff_t pgoff;
unsigned int pglen;
rl_t *rl;
dmu_tx_t *tx;
caddr_t va;
int err = 0;
uint64_t mtime[2], ctime[2];
sa_bulk_attr_t bulk[3];
int cnt = 0;
struct address_space *mapping;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
ASSERT(PageLocked(pp));
pgoff = page_offset(pp); /* Page byte-offset in file */
offset = i_size_read(ip); /* File length in bytes */
pglen = MIN(PAGE_SIZE, /* Page length in bytes */
P2ROUNDUP(offset, PAGE_SIZE)-pgoff);
/* Page is beyond end of file */
if (pgoff >= offset) {
unlock_page(pp);
ZFS_EXIT(zfsvfs);
return (0);
}
/* Truncate page length to end of file */
if (pgoff + pglen > offset)
pglen = offset - pgoff;
#if 0
/*
* FIXME: Allow mmap writes past its quota. The correct fix
* is to register a page_mkwrite() handler to count the page
* against its quota when it is about to be dirtied.
*/
if (zfs_id_overblockquota(zfsvfs, DMU_USERUSED_OBJECT,
KUID_TO_SUID(ip->i_uid)) ||
zfs_id_overblockquota(zfsvfs, DMU_GROUPUSED_OBJECT,
KGID_TO_SGID(ip->i_gid)) ||
(zp->z_projid != ZFS_DEFAULT_PROJID &&
zfs_id_overblockquota(zfsvfs, DMU_PROJECTUSED_OBJECT,
zp->z_projid))) {
err = EDQUOT;
}
#endif
/*
* The ordering here is critical and must adhere to the following
* rules in order to avoid deadlocking in either zfs_read() or
* zfs_free_range() due to a lock inversion.
*
* 1) The page must be unlocked prior to acquiring the range lock.
* This is critical because zfs_read() calls find_lock_page()
* which may block on the page lock while holding the range lock.
*
* 2) Before setting or clearing write back on a page the range lock
* must be held in order to prevent a lock inversion with the
* zfs_free_range() function.
*
* This presents a problem because upon entering this function the
* page lock is already held. To safely acquire the range lock the
* page lock must be dropped. This creates a window where another
* process could truncate, invalidate, dirty, or write out the page.
*
* Therefore, after successfully reacquiring the range and page locks
* the current page state is checked. In the common case everything
* will be as is expected and it can be written out. However, if
* the page state has changed it must be handled accordingly.
*/
mapping = pp->mapping;
redirty_page_for_writepage(wbc, pp);
unlock_page(pp);
rl = zfs_range_lock(&zp->z_range_lock, pgoff, pglen, RL_WRITER);
lock_page(pp);
/* Page mapping changed or it was no longer dirty, we're done */
if (unlikely((mapping != pp->mapping) || !PageDirty(pp))) {
unlock_page(pp);
zfs_range_unlock(rl);
ZFS_EXIT(zfsvfs);
return (0);
}
/* Another process started write block if required */
if (PageWriteback(pp)) {
unlock_page(pp);
zfs_range_unlock(rl);
if (wbc->sync_mode != WB_SYNC_NONE)
wait_on_page_writeback(pp);
ZFS_EXIT(zfsvfs);
return (0);
}
/* Clear the dirty flag the required locks are held */
if (!clear_page_dirty_for_io(pp)) {
unlock_page(pp);
zfs_range_unlock(rl);
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* Counterpart for redirty_page_for_writepage() above. This page
* was in fact not skipped and should not be counted as if it were.
*/
wbc->pages_skipped--;
set_page_writeback(pp);
unlock_page(pp);
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_write(tx, zp->z_id, pgoff, pglen);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
err = dmu_tx_assign(tx, TXG_NOWAIT);
if (err != 0) {
if (err == ERESTART)
dmu_tx_wait(tx);
dmu_tx_abort(tx);
__set_page_dirty_nobuffers(pp);
ClearPageError(pp);
end_page_writeback(pp);
zfs_range_unlock(rl);
ZFS_EXIT(zfsvfs);
return (err);
}
va = kmap(pp);
ASSERT3U(pglen, <=, PAGE_SIZE);
dmu_write(zfsvfs->z_os, zp->z_id, pgoff, pglen, va, tx);
kunmap(pp);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_FLAGS(zfsvfs), NULL,
&zp->z_pflags, 8);
/* Preserve the mtime and ctime provided by the inode */
ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
zp->z_atime_dirty = 0;
zp->z_seq++;
err = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
zfs_log_write(zfsvfs->z_log, tx, TX_WRITE, zp, pgoff, pglen, 0,
zfs_putpage_commit_cb, pp);
dmu_tx_commit(tx);
zfs_range_unlock(rl);
if (wbc->sync_mode != WB_SYNC_NONE) {
/*
* Note that this is rarely called under writepages(), because
* writepages() normally handles the entire commit for
* performance reasons.
*/
zil_commit(zfsvfs->z_log, zp->z_id);
}
ZFS_EXIT(zfsvfs);
return (err);
}
/*
* Update the system attributes when the inode has been dirtied. For the
* moment we only update the mode, atime, mtime, and ctime.
*/
int
zfs_dirty_inode(struct inode *ip, int flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
dmu_tx_t *tx;
uint64_t mode, atime[2], mtime[2], ctime[2];
sa_bulk_attr_t bulk[4];
int error = 0;
int cnt = 0;
if (zfs_is_readonly(zfsvfs) || dmu_objset_is_snapshot(zfsvfs->z_os))
return (0);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
#ifdef I_DIRTY_TIME
/*
* This is the lazytime semantic indroduced in Linux 4.0
* This flag will only be called from update_time when lazytime is set.
* (Note, I_DIRTY_SYNC will also set if not lazytime)
* Fortunately mtime and ctime are managed within ZFS itself, so we
* only need to dirty atime.
*/
if (flags == I_DIRTY_TIME) {
zp->z_atime_dirty = 1;
goto out;
}
#endif
tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
goto out;
}
mutex_enter(&zp->z_lock);
zp->z_atime_dirty = 0;
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MODE(zfsvfs), NULL, &mode, 8);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_ATIME(zfsvfs), NULL, &atime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_MTIME(zfsvfs), NULL, &mtime, 16);
SA_ADD_BULK_ATTR(bulk, cnt, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, 16);
/* Preserve the mode, mtime and ctime provided by the inode */
ZFS_TIME_ENCODE(&ip->i_atime, atime);
ZFS_TIME_ENCODE(&ip->i_mtime, mtime);
ZFS_TIME_ENCODE(&ip->i_ctime, ctime);
mode = ip->i_mode;
zp->z_mode = mode;
error = sa_bulk_update(zp->z_sa_hdl, bulk, cnt, tx);
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
out:
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
void
zfs_inactive(struct inode *ip)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t atime[2];
int error;
int need_unlock = 0;
/* Only read lock if we haven't already write locked, e.g. rollback */
if (!RW_WRITE_HELD(&zfsvfs->z_teardown_inactive_lock)) {
need_unlock = 1;
rw_enter(&zfsvfs->z_teardown_inactive_lock, RW_READER);
}
if (zp->z_sa_hdl == NULL) {
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
return;
}
if (zp->z_atime_dirty && zp->z_unlinked == 0) {
dmu_tx_t *tx = dmu_tx_create(zfsvfs->z_os);
dmu_tx_hold_sa(tx, zp->z_sa_hdl, B_FALSE);
zfs_sa_upgrade_txholds(tx, zp);
error = dmu_tx_assign(tx, TXG_WAIT);
if (error) {
dmu_tx_abort(tx);
} else {
ZFS_TIME_ENCODE(&ip->i_atime, atime);
mutex_enter(&zp->z_lock);
(void) sa_update(zp->z_sa_hdl, SA_ZPL_ATIME(zfsvfs),
(void *)&atime, sizeof (atime), tx);
zp->z_atime_dirty = 0;
mutex_exit(&zp->z_lock);
dmu_tx_commit(tx);
}
}
zfs_zinactive(zp);
if (need_unlock)
rw_exit(&zfsvfs->z_teardown_inactive_lock);
}
/*
* Bounds-check the seek operation.
*
* IN: ip - inode seeking within
* ooff - old file offset
* noffp - pointer to new file offset
* ct - caller context
*
* RETURN: 0 if success
* EINVAL if new offset invalid
*/
/* ARGSUSED */
int
zfs_seek(struct inode *ip, offset_t ooff, offset_t *noffp)
{
if (S_ISDIR(ip->i_mode))
return (0);
return ((*noffp < 0 || *noffp > MAXOFFSET_T) ? EINVAL : 0);
}
/*
* Fill pages with data from the disk.
*/
static int
zfs_fillpage(struct inode *ip, struct page *pl[], int nr_pages)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
objset_t *os;
struct page *cur_pp;
u_offset_t io_off, total;
size_t io_len;
loff_t i_size;
unsigned page_idx;
int err;
os = zfsvfs->z_os;
io_len = nr_pages << PAGE_SHIFT;
i_size = i_size_read(ip);
io_off = page_offset(pl[0]);
if (io_off + io_len > i_size)
io_len = i_size - io_off;
/*
* Iterate over list of pages and read each page individually.
*/
page_idx = 0;
for (total = io_off + io_len; io_off < total; io_off += PAGESIZE) {
caddr_t va;
cur_pp = pl[page_idx++];
va = kmap(cur_pp);
err = dmu_read(os, zp->z_id, io_off, PAGESIZE, va,
DMU_READ_PREFETCH);
kunmap(cur_pp);
if (err) {
/* convert checksum errors into IO errors */
if (err == ECKSUM)
err = SET_ERROR(EIO);
return (err);
}
}
return (0);
}
/*
* Uses zfs_fillpage to read data from the file and fill the pages.
*
* IN: ip - inode of file to get data from.
* pl - list of pages to read
* nr_pages - number of pages to read
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* vp - atime updated
*/
/* ARGSUSED */
int
zfs_getpage(struct inode *ip, struct page *pl[], int nr_pages)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int err;
if (pl == NULL)
return (0);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
err = zfs_fillpage(ip, pl, nr_pages);
ZFS_EXIT(zfsvfs);
return (err);
}
/*
* Check ZFS specific permissions to memory map a section of a file.
*
* IN: ip - inode of the file to mmap
* off - file offset
* addrp - start address in memory region
* len - length of memory region
* vm_flags- address flags
*
* RETURN: 0 if success
* error code if failure
*/
/*ARGSUSED*/
int
zfs_map(struct inode *ip, offset_t off, caddr_t *addrp, size_t len,
unsigned long vm_flags)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((vm_flags & VM_WRITE) && (zp->z_pflags &
(ZFS_IMMUTABLE | ZFS_READONLY | ZFS_APPENDONLY))) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EPERM));
}
if ((vm_flags & (VM_READ | VM_EXEC)) &&
(zp->z_pflags & ZFS_AV_QUARANTINED)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EACCES));
}
if (off < 0 || len > MAXOFFSET_T - off) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(ENXIO));
}
ZFS_EXIT(zfsvfs);
return (0);
}
/*
* convoff - converts the given data (start, whence) to the
* given whence.
*/
int
convoff(struct inode *ip, flock64_t *lckdat, int whence, offset_t offset)
{
vattr_t vap;
int error;
if ((lckdat->l_whence == 2) || (whence == 2)) {
if ((error = zfs_getattr(ip, &vap, 0, CRED())))
return (error);
}
switch (lckdat->l_whence) {
case 1:
lckdat->l_start += offset;
break;
case 2:
lckdat->l_start += vap.va_size;
/* FALLTHRU */
case 0:
break;
default:
return (SET_ERROR(EINVAL));
}
if (lckdat->l_start < 0)
return (SET_ERROR(EINVAL));
switch (whence) {
case 1:
lckdat->l_start -= offset;
break;
case 2:
lckdat->l_start -= vap.va_size;
/* FALLTHRU */
case 0:
break;
default:
return (SET_ERROR(EINVAL));
}
lckdat->l_whence = (short)whence;
return (0);
}
/*
* Free or allocate space in a file. Currently, this function only
* supports the `F_FREESP' command. However, this command is somewhat
* misnamed, as its functionality includes the ability to allocate as
* well as free space.
*
* IN: ip - inode of file to free data in.
* cmd - action to take (only F_FREESP supported).
* bfp - section of file to free/alloc.
* flag - current file open mode flags.
* offset - current file offset.
* cr - credentials of caller [UNUSED].
*
* RETURN: 0 on success, error code on failure.
*
* Timestamps:
* ip - ctime|mtime updated
*/
/* ARGSUSED */
int
zfs_space(struct inode *ip, int cmd, flock64_t *bfp, int flag,
offset_t offset, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint64_t off, len;
int error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if (cmd != F_FREESP) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Callers might not be able to detect properly that we are read-only,
* so check it explicitly here.
*/
if (zfs_is_readonly(zfsvfs)) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EROFS));
}
if ((error = convoff(ip, bfp, 0, offset))) {
ZFS_EXIT(zfsvfs);
return (error);
}
if (bfp->l_len < 0) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Permissions aren't checked on Solaris because on this OS
* zfs_space() can only be called with an opened file handle.
* On Linux we can get here through truncate_range() which
* operates directly on inodes, so we need to check access rights.
*/
if ((error = zfs_zaccess(zp, ACE_WRITE_DATA, 0, B_FALSE, cr))) {
ZFS_EXIT(zfsvfs);
return (error);
}
off = bfp->l_start;
len = bfp->l_len; /* 0 means from off to end of file */
error = zfs_freesp(zp, off, len, flag, TRUE);
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
int
zfs_fid(struct inode *ip, fid_t *fidp)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
uint32_t gen;
uint64_t gen64;
uint64_t object = zp->z_id;
zfid_short_t *zfid;
int size, i, error;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
if ((error = sa_lookup(zp->z_sa_hdl, SA_ZPL_GEN(zfsvfs),
&gen64, sizeof (uint64_t))) != 0) {
ZFS_EXIT(zfsvfs);
return (error);
}
gen = (uint32_t)gen64;
size = SHORT_FID_LEN;
zfid = (zfid_short_t *)fidp;
zfid->zf_len = size;
for (i = 0; i < sizeof (zfid->zf_object); i++)
zfid->zf_object[i] = (uint8_t)(object >> (8 * i));
/* Must have a non-zero generation number to distinguish from .zfs */
if (gen == 0)
gen = 1;
for (i = 0; i < sizeof (zfid->zf_gen); i++)
zfid->zf_gen[i] = (uint8_t)(gen >> (8 * i));
ZFS_EXIT(zfsvfs);
return (0);
}
/*ARGSUSED*/
int
zfs_getsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_getacl(zp, vsecp, skipaclchk, cr);
ZFS_EXIT(zfsvfs);
return (error);
}
/*ARGSUSED*/
int
zfs_setsecattr(struct inode *ip, vsecattr_t *vsecp, int flag, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int error;
boolean_t skipaclchk = (flag & ATTR_NOACLCHECK) ? B_TRUE : B_FALSE;
zilog_t *zilog = zfsvfs->z_log;
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
error = zfs_setacl(zp, vsecp, skipaclchk, cr);
if (zfsvfs->z_os->os_sync == ZFS_SYNC_ALWAYS)
zil_commit(zilog, 0);
ZFS_EXIT(zfsvfs);
return (error);
}
#ifdef HAVE_UIO_ZEROCOPY
/*
* Tunable, both must be a power of 2.
*
* zcr_blksz_min: the smallest read we may consider to loan out an arcbuf
* zcr_blksz_max: if set to less than the file block size, allow loaning out of
* an arcbuf for a partial block read
*/
int zcr_blksz_min = (1 << 10); /* 1K */
int zcr_blksz_max = (1 << 17); /* 128K */
/*ARGSUSED*/
static int
zfs_reqzcbuf(struct inode *ip, enum uio_rw ioflag, xuio_t *xuio, cred_t *cr)
{
znode_t *zp = ITOZ(ip);
zfsvfs_t *zfsvfs = ITOZSB(ip);
int max_blksz = zfsvfs->z_max_blksz;
uio_t *uio = &xuio->xu_uio;
ssize_t size = uio->uio_resid;
offset_t offset = uio->uio_loffset;
int blksz;
int fullblk, i;
arc_buf_t *abuf;
ssize_t maxsize;
int preamble, postamble;
if (xuio->xu_type != UIOTYPE_ZEROCOPY)
return (SET_ERROR(EINVAL));
ZFS_ENTER(zfsvfs);
ZFS_VERIFY_ZP(zp);
switch (ioflag) {
case UIO_WRITE:
/*
* Loan out an arc_buf for write if write size is bigger than
* max_blksz, and the file's block size is also max_blksz.
*/
blksz = max_blksz;
if (size < blksz || zp->z_blksz != blksz) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
/*
* Caller requests buffers for write before knowing where the
* write offset might be (e.g. NFS TCP write).
*/
if (offset == -1) {
preamble = 0;
} else {
preamble = P2PHASE(offset, blksz);
if (preamble) {
preamble = blksz - preamble;
size -= preamble;
}
}
postamble = P2PHASE(size, blksz);
size -= postamble;
fullblk = size / blksz;
(void) dmu_xuio_init(xuio,
(preamble != 0) + fullblk + (postamble != 0));
/*
* Have to fix iov base/len for partial buffers. They
* currently represent full arc_buf's.
*/
if (preamble) {
/* data begins in the middle of the arc_buf */
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
blksz);
ASSERT(abuf);
(void) dmu_xuio_add(xuio, abuf,
blksz - preamble, preamble);
}
for (i = 0; i < fullblk; i++) {
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
blksz);
ASSERT(abuf);
(void) dmu_xuio_add(xuio, abuf, 0, blksz);
}
if (postamble) {
/* data ends in the middle of the arc_buf */
abuf = dmu_request_arcbuf(sa_get_db(zp->z_sa_hdl),
blksz);
ASSERT(abuf);
(void) dmu_xuio_add(xuio, abuf, 0, postamble);
}
break;
case UIO_READ:
/*
* Loan out an arc_buf for read if the read size is larger than
* the current file block size. Block alignment is not
* considered. Partial arc_buf will be loaned out for read.
*/
blksz = zp->z_blksz;
if (blksz < zcr_blksz_min)
blksz = zcr_blksz_min;
if (blksz > zcr_blksz_max)
blksz = zcr_blksz_max;
/* avoid potential complexity of dealing with it */
if (blksz > max_blksz) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
maxsize = zp->z_size - uio->uio_loffset;
if (size > maxsize)
size = maxsize;
if (size < blksz) {
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
break;
default:
ZFS_EXIT(zfsvfs);
return (SET_ERROR(EINVAL));
}
uio->uio_extflg = UIO_XUIO;
XUIO_XUZC_RW(xuio) = ioflag;
ZFS_EXIT(zfsvfs);
return (0);
}
/*ARGSUSED*/
static int
zfs_retzcbuf(struct inode *ip, xuio_t *xuio, cred_t *cr)
{
int i;
arc_buf_t *abuf;
int ioflag = XUIO_XUZC_RW(xuio);
ASSERT(xuio->xu_type == UIOTYPE_ZEROCOPY);
i = dmu_xuio_cnt(xuio);
while (i-- > 0) {
abuf = dmu_xuio_arcbuf(xuio, i);
/*
* if abuf == NULL, it must be a write buffer
* that has been returned in zfs_write().
*/
if (abuf)
dmu_return_arcbuf(abuf);
ASSERT(abuf || ioflag == UIO_WRITE);
}
dmu_xuio_fini(xuio);
return (0);
}
#endif /* HAVE_UIO_ZEROCOPY */
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(zfs_open);
EXPORT_SYMBOL(zfs_close);
EXPORT_SYMBOL(zfs_read);
EXPORT_SYMBOL(zfs_write);
EXPORT_SYMBOL(zfs_access);
EXPORT_SYMBOL(zfs_lookup);
EXPORT_SYMBOL(zfs_create);
EXPORT_SYMBOL(zfs_tmpfile);
EXPORT_SYMBOL(zfs_remove);
EXPORT_SYMBOL(zfs_mkdir);
EXPORT_SYMBOL(zfs_rmdir);
EXPORT_SYMBOL(zfs_readdir);
EXPORT_SYMBOL(zfs_fsync);
EXPORT_SYMBOL(zfs_getattr);
EXPORT_SYMBOL(zfs_getattr_fast);
EXPORT_SYMBOL(zfs_setattr);
EXPORT_SYMBOL(zfs_rename);
EXPORT_SYMBOL(zfs_symlink);
EXPORT_SYMBOL(zfs_readlink);
EXPORT_SYMBOL(zfs_link);
EXPORT_SYMBOL(zfs_inactive);
EXPORT_SYMBOL(zfs_space);
EXPORT_SYMBOL(zfs_fid);
EXPORT_SYMBOL(zfs_getsecattr);
EXPORT_SYMBOL(zfs_setsecattr);
EXPORT_SYMBOL(zfs_getpage);
EXPORT_SYMBOL(zfs_putpage);
EXPORT_SYMBOL(zfs_dirty_inode);
EXPORT_SYMBOL(zfs_map);
/* CSTYLED */
module_param(zfs_delete_blocks, ulong, 0644);
MODULE_PARM_DESC(zfs_delete_blocks, "Delete files larger than N blocks async");
module_param(zfs_read_chunk_size, long, 0644);
MODULE_PARM_DESC(zfs_read_chunk_size, "Bytes to read per chunk");
#endif
diff --git a/module/zfs/zil.c b/module/zfs/zil.c
index d9ae1f413e94..d0a1002525d2 100644
--- a/module/zfs/zil.c
+++ b/module/zfs/zil.c
@@ -1,3447 +1,3446 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2014 Integros [integros.com]
*/
/* Portions Copyright 2010 Robert Milkowski */
#include <sys/zfs_context.h>
#include <sys/spa.h>
#include <sys/dmu.h>
#include <sys/zap.h>
#include <sys/arc.h>
#include <sys/stat.h>
#include <sys/resource.h>
#include <sys/zil.h>
#include <sys/zil_impl.h>
#include <sys/dsl_dataset.h>
#include <sys/vdev_impl.h>
#include <sys/dmu_tx.h>
#include <sys/dsl_pool.h>
#include <sys/metaslab.h>
#include <sys/trace_zil.h>
#include <sys/abd.h>
/*
* The ZFS Intent Log (ZIL) saves "transaction records" (itxs) of system
* calls that change the file system. Each itx has enough information to
* be able to replay them after a system crash, power loss, or
* equivalent failure mode. These are stored in memory until either:
*
* 1. they are committed to the pool by the DMU transaction group
* (txg), at which point they can be discarded; or
* 2. they are committed to the on-disk ZIL for the dataset being
* modified (e.g. due to an fsync, O_DSYNC, or other synchronous
* requirement).
*
* In the event of a crash or power loss, the itxs contained by each
* dataset's on-disk ZIL will be replayed when that dataset is first
* instantiated (e.g. if the dataset is a normal fileystem, when it is
* first mounted).
*
* As hinted at above, there is one ZIL per dataset (both the in-memory
* representation, and the on-disk representation). The on-disk format
* consists of 3 parts:
*
* - a single, per-dataset, ZIL header; which points to a chain of
* - zero or more ZIL blocks; each of which contains
* - zero or more ZIL records
*
* A ZIL record holds the information necessary to replay a single
* system call transaction. A ZIL block can hold many ZIL records, and
* the blocks are chained together, similarly to a singly linked list.
*
* Each ZIL block contains a block pointer (blkptr_t) to the next ZIL
* block in the chain, and the ZIL header points to the first block in
* the chain.
*
* Note, there is not a fixed place in the pool to hold these ZIL
* blocks; they are dynamically allocated and freed as needed from the
* blocks available on the pool, though they can be preferentially
* allocated from a dedicated "log" vdev.
*/
/*
* This controls the amount of time that a ZIL block (lwb) will remain
* "open" when it isn't "full", and it has a thread waiting for it to be
* committed to stable storage. Please refer to the zil_commit_waiter()
* function (and the comments within it) for more details.
*/
int zfs_commit_timeout_pct = 5;
/*
* See zil.h for more information about these fields.
*/
zil_stats_t zil_stats = {
{ "zil_commit_count", KSTAT_DATA_UINT64 },
{ "zil_commit_writer_count", KSTAT_DATA_UINT64 },
{ "zil_itx_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_count", KSTAT_DATA_UINT64 },
{ "zil_itx_indirect_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_count", KSTAT_DATA_UINT64 },
{ "zil_itx_copied_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_count", KSTAT_DATA_UINT64 },
{ "zil_itx_needcopy_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_normal_bytes", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_count", KSTAT_DATA_UINT64 },
{ "zil_itx_metaslab_slog_bytes", KSTAT_DATA_UINT64 },
};
static kstat_t *zil_ksp;
/*
* Disable intent logging replay. This global ZIL switch affects all pools.
*/
int zil_replay_disable = 0;
/*
* Tunable parameter for debugging or performance analysis. Setting
* zfs_nocacheflush will cause corruption on power loss if a volatile
* out-of-order write cache is enabled.
*/
int zfs_nocacheflush = 0;
/*
* Limit SLOG write size per commit executed with synchronous priority.
* Any writes above that will be executed with lower (asynchronous) priority
* to limit potential SLOG device abuse by single active ZIL writer.
*/
unsigned long zil_slog_bulk = 768 * 1024;
static kmem_cache_t *zil_lwb_cache;
static kmem_cache_t *zil_zcw_cache;
static void zil_async_to_sync(zilog_t *zilog, uint64_t foid);
#define LWB_EMPTY(lwb) ((BP_GET_LSIZE(&lwb->lwb_blk) - \
sizeof (zil_chain_t)) == (lwb->lwb_sz - lwb->lwb_nused))
static int
zil_bp_compare(const void *x1, const void *x2)
{
const dva_t *dva1 = &((zil_bp_node_t *)x1)->zn_dva;
const dva_t *dva2 = &((zil_bp_node_t *)x2)->zn_dva;
int cmp = AVL_CMP(DVA_GET_VDEV(dva1), DVA_GET_VDEV(dva2));
if (likely(cmp))
return (cmp);
return (AVL_CMP(DVA_GET_OFFSET(dva1), DVA_GET_OFFSET(dva2)));
}
static void
zil_bp_tree_init(zilog_t *zilog)
{
avl_create(&zilog->zl_bp_tree, zil_bp_compare,
sizeof (zil_bp_node_t), offsetof(zil_bp_node_t, zn_node));
}
static void
zil_bp_tree_fini(zilog_t *zilog)
{
avl_tree_t *t = &zilog->zl_bp_tree;
zil_bp_node_t *zn;
void *cookie = NULL;
while ((zn = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zn, sizeof (zil_bp_node_t));
avl_destroy(t);
}
int
zil_bp_tree_add(zilog_t *zilog, const blkptr_t *bp)
{
avl_tree_t *t = &zilog->zl_bp_tree;
const dva_t *dva;
zil_bp_node_t *zn;
avl_index_t where;
if (BP_IS_EMBEDDED(bp))
return (0);
dva = BP_IDENTITY(bp);
if (avl_find(t, dva, &where) != NULL)
return (SET_ERROR(EEXIST));
zn = kmem_alloc(sizeof (zil_bp_node_t), KM_SLEEP);
zn->zn_dva = *dva;
avl_insert(t, zn, where);
return (0);
}
static zil_header_t *
zil_header_in_syncing_context(zilog_t *zilog)
{
return ((zil_header_t *)zilog->zl_header);
}
static void
zil_init_log_chain(zilog_t *zilog, blkptr_t *bp)
{
zio_cksum_t *zc = &bp->blk_cksum;
zc->zc_word[ZIL_ZC_GUID_0] = spa_get_random(-1ULL);
zc->zc_word[ZIL_ZC_GUID_1] = spa_get_random(-1ULL);
zc->zc_word[ZIL_ZC_OBJSET] = dmu_objset_id(zilog->zl_os);
zc->zc_word[ZIL_ZC_SEQ] = 1ULL;
}
/*
* Read a log block and make sure it's valid.
*/
static int
zil_read_log_block(zilog_t *zilog, boolean_t decrypt, const blkptr_t *bp,
blkptr_t *nbp, void *dst, char **end)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
if (!(zilog->zl_header->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
zio_flags |= ZIO_FLAG_SPECULATIVE;
if (!decrypt)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, bp->blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL, bp->blk_cksum.zc_word[ZIL_ZC_SEQ]);
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func,
&abuf, ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
zio_cksum_t cksum = bp->blk_cksum;
/*
* Validate the checksummed log block.
*
* Sequence numbers should be... sequential. The checksum
* verifier for the next block should be bp's checksum plus 1.
*
* Also check the log chain linkage and size used.
*/
cksum.zc_word[ZIL_ZC_SEQ]++;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
zil_chain_t *zilc = abuf->b_data;
char *lr = (char *)(zilc + 1);
uint64_t len = zilc->zc_nused - sizeof (zil_chain_t);
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk)) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(len, <=, SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, len);
*end = (char *)dst + len;
*nbp = zilc->zc_next_blk;
}
} else {
char *lr = abuf->b_data;
uint64_t size = BP_GET_LSIZE(bp);
zil_chain_t *zilc = (zil_chain_t *)(lr + size) - 1;
if (bcmp(&cksum, &zilc->zc_next_blk.blk_cksum,
sizeof (cksum)) || BP_IS_HOLE(&zilc->zc_next_blk) ||
(zilc->zc_nused > (size - sizeof (*zilc)))) {
error = SET_ERROR(ECKSUM);
} else {
ASSERT3U(zilc->zc_nused, <=,
SPA_OLD_MAXBLOCKSIZE);
bcopy(lr, dst, zilc->zc_nused);
*end = (char *)dst + zilc->zc_nused;
*nbp = zilc->zc_next_blk;
}
}
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Read a TX_WRITE log data block.
*/
static int
zil_read_log_data(zilog_t *zilog, const lr_write_t *lr, void *wbuf)
{
enum zio_flag zio_flags = ZIO_FLAG_CANFAIL;
const blkptr_t *bp = &lr->lr_blkptr;
arc_flags_t aflags = ARC_FLAG_WAIT;
arc_buf_t *abuf = NULL;
zbookmark_phys_t zb;
int error;
if (BP_IS_HOLE(bp)) {
if (wbuf != NULL)
bzero(wbuf, MAX(BP_GET_LSIZE(bp), lr->lr_length));
return (0);
}
if (zilog->zl_header->zh_claim_txg == 0)
zio_flags |= ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB;
/*
* If we are not using the resulting data, we are just checking that
* it hasn't been corrupted so we don't need to waste CPU time
* decompressing and decrypting it.
*/
if (wbuf == NULL)
zio_flags |= ZIO_FLAG_RAW;
SET_BOOKMARK(&zb, dmu_objset_id(zilog->zl_os), lr->lr_foid,
ZB_ZIL_LEVEL, lr->lr_offset / BP_GET_LSIZE(bp));
error = arc_read(NULL, zilog->zl_spa, bp, arc_getbuf_func, &abuf,
ZIO_PRIORITY_SYNC_READ, zio_flags, &aflags, &zb);
if (error == 0) {
if (wbuf != NULL)
bcopy(abuf->b_data, wbuf, arc_buf_size(abuf));
arc_buf_destroy(abuf, &abuf);
}
return (error);
}
/*
* Parse the intent log, and call parse_func for each valid record within.
*/
int
zil_parse(zilog_t *zilog, zil_parse_blk_func_t *parse_blk_func,
zil_parse_lr_func_t *parse_lr_func, void *arg, uint64_t txg,
boolean_t decrypt)
{
const zil_header_t *zh = zilog->zl_header;
boolean_t claimed = !!zh->zh_claim_txg;
uint64_t claim_blk_seq = claimed ? zh->zh_claim_blk_seq : UINT64_MAX;
uint64_t claim_lr_seq = claimed ? zh->zh_claim_lr_seq : UINT64_MAX;
uint64_t max_blk_seq = 0;
uint64_t max_lr_seq = 0;
uint64_t blk_count = 0;
uint64_t lr_count = 0;
blkptr_t blk, next_blk;
char *lrbuf, *lrp;
int error = 0;
bzero(&next_blk, sizeof (blkptr_t));
/*
* Old logs didn't record the maximum zh_claim_lr_seq.
*/
if (!(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID))
claim_lr_seq = UINT64_MAX;
/*
* Starting at the block pointed to by zh_log we read the log chain.
* For each block in the chain we strongly check that block to
* ensure its validity. We stop when an invalid block is found.
* For each block pointer in the chain we call parse_blk_func().
* For each record in each valid block we call parse_lr_func().
* If the log has been claimed, stop if we encounter a sequence
* number greater than the highest claimed sequence number.
*/
lrbuf = zio_buf_alloc(SPA_OLD_MAXBLOCKSIZE);
zil_bp_tree_init(zilog);
for (blk = zh->zh_log; !BP_IS_HOLE(&blk); blk = next_blk) {
uint64_t blk_seq = blk.blk_cksum.zc_word[ZIL_ZC_SEQ];
int reclen;
char *end = NULL;
if (blk_seq > claim_blk_seq)
break;
error = parse_blk_func(zilog, &blk, arg, txg);
if (error != 0)
break;
ASSERT3U(max_blk_seq, <, blk_seq);
max_blk_seq = blk_seq;
blk_count++;
if (max_lr_seq == claim_lr_seq && max_blk_seq == claim_blk_seq)
break;
error = zil_read_log_block(zilog, decrypt, &blk, &next_blk,
lrbuf, &end);
if (error != 0)
break;
for (lrp = lrbuf; lrp < end; lrp += reclen) {
lr_t *lr = (lr_t *)lrp;
reclen = lr->lrc_reclen;
ASSERT3U(reclen, >=, sizeof (lr_t));
if (lr->lrc_seq > claim_lr_seq)
goto done;
error = parse_lr_func(zilog, lr, arg, txg);
if (error != 0)
goto done;
ASSERT3U(max_lr_seq, <, lr->lrc_seq);
max_lr_seq = lr->lrc_seq;
lr_count++;
}
}
done:
zilog->zl_parse_error = error;
zilog->zl_parse_blk_seq = max_blk_seq;
zilog->zl_parse_lr_seq = max_lr_seq;
zilog->zl_parse_blk_count = blk_count;
zilog->zl_parse_lr_count = lr_count;
ASSERT(!claimed || !(zh->zh_flags & ZIL_CLAIM_LR_SEQ_VALID) ||
(max_blk_seq == claim_blk_seq && max_lr_seq == claim_lr_seq) ||
(decrypt && error == EIO));
zil_bp_tree_fini(zilog);
zio_buf_free(lrbuf, SPA_OLD_MAXBLOCKSIZE);
return (error);
}
static int
zil_claim_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t first_txg)
{
/*
* Claim log block if not already committed and not already claimed.
* If tx == NULL, just verify that the block is claimable.
*/
if (BP_IS_HOLE(bp) || bp->blk_birth < first_txg ||
zil_bp_tree_add(zilog, bp) != 0)
return (0);
return (zio_wait(zio_claim(NULL, zilog->zl_spa,
tx == NULL ? 0 : first_txg, bp, spa_claim_notify, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE | ZIO_FLAG_SCRUB)));
}
static int
zil_claim_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t first_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
int error;
if (lrc->lrc_txtype != TX_WRITE)
return (0);
/*
* If the block is not readable, don't claim it. This can happen
* in normal operation when a log block is written to disk before
* some of the dmu_sync() blocks it points to. In this case, the
* transaction cannot have been committed to anyone (we would have
* waited for all writes to be stable first), so it is semantically
* correct to declare this the end of the log.
*/
if (lr->lr_blkptr.blk_birth >= first_txg) {
error = zil_read_log_data(zilog, lr, NULL);
if (error != 0)
return (error);
}
return (zil_claim_log_block(zilog, &lr->lr_blkptr, tx, first_txg));
}
/* ARGSUSED */
static int
zil_free_log_block(zilog_t *zilog, blkptr_t *bp, void *tx, uint64_t claim_txg)
{
zio_free_zil(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_free_log_record(zilog_t *zilog, lr_t *lrc, void *tx, uint64_t claim_txg)
{
lr_write_t *lr = (lr_write_t *)lrc;
blkptr_t *bp = &lr->lr_blkptr;
/*
* If we previously claimed it, we need to free it.
*/
if (claim_txg != 0 && lrc->lrc_txtype == TX_WRITE &&
bp->blk_birth >= claim_txg && zil_bp_tree_add(zilog, bp) == 0 &&
!BP_IS_HOLE(bp))
zio_free(zilog->zl_spa, dmu_tx_get_txg(tx), bp);
return (0);
}
static int
zil_lwb_vdev_compare(const void *x1, const void *x2)
{
const uint64_t v1 = ((zil_vdev_node_t *)x1)->zv_vdev;
const uint64_t v2 = ((zil_vdev_node_t *)x2)->zv_vdev;
return (AVL_CMP(v1, v2));
}
static lwb_t *
zil_alloc_lwb(zilog_t *zilog, blkptr_t *bp, boolean_t slog, uint64_t txg,
boolean_t fastwrite)
{
lwb_t *lwb;
lwb = kmem_cache_alloc(zil_lwb_cache, KM_SLEEP);
lwb->lwb_zilog = zilog;
lwb->lwb_blk = *bp;
lwb->lwb_fastwrite = fastwrite;
lwb->lwb_slog = slog;
lwb->lwb_state = LWB_STATE_CLOSED;
lwb->lwb_buf = zio_buf_alloc(BP_GET_LSIZE(bp));
lwb->lwb_max_txg = txg;
lwb->lwb_write_zio = NULL;
lwb->lwb_root_zio = NULL;
lwb->lwb_tx = NULL;
lwb->lwb_issued_timestamp = 0;
if (BP_GET_CHECKSUM(bp) == ZIO_CHECKSUM_ZILOG2) {
lwb->lwb_nused = sizeof (zil_chain_t);
lwb->lwb_sz = BP_GET_LSIZE(bp);
} else {
lwb->lwb_nused = 0;
lwb->lwb_sz = BP_GET_LSIZE(bp) - sizeof (zil_chain_t);
}
mutex_enter(&zilog->zl_lock);
list_insert_tail(&zilog->zl_lwb_list, lwb);
mutex_exit(&zilog->zl_lock);
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
return (lwb);
}
static void
zil_free_lwb(zilog_t *zilog, lwb_t *lwb)
{
ASSERT(MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&lwb->lwb_vdev_lock));
VERIFY(list_is_empty(&lwb->lwb_waiters));
VERIFY(list_is_empty(&lwb->lwb_itxs));
ASSERT(avl_is_empty(&lwb->lwb_vdev_tree));
ASSERT3P(lwb->lwb_write_zio, ==, NULL);
ASSERT3P(lwb->lwb_root_zio, ==, NULL);
ASSERT3U(lwb->lwb_max_txg, <=, spa_syncing_txg(zilog->zl_spa));
ASSERT(lwb->lwb_state == LWB_STATE_CLOSED ||
lwb->lwb_state == LWB_STATE_DONE);
/*
* Clear the zilog's field to indicate this lwb is no longer
* valid, and prevent use-after-free errors.
*/
if (zilog->zl_last_lwb_opened == lwb)
zilog->zl_last_lwb_opened = NULL;
kmem_cache_free(zil_lwb_cache, lwb);
}
/*
* Called when we create in-memory log transactions so that we know
* to cleanup the itxs at the end of spa_sync().
*/
void
zilog_dirty(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
dsl_dataset_t *ds = dmu_objset_ds(zilog->zl_os);
ASSERT(spa_writeable(zilog->zl_spa));
if (ds->ds_is_snapshot)
panic("dirtying snapshot!");
if (txg_list_add(&dp->dp_dirty_zilogs, zilog, txg)) {
/* up the hold count until we can be written out */
dmu_buf_add_ref(ds->ds_dbuf, zilog);
zilog->zl_dirty_max_txg = MAX(txg, zilog->zl_dirty_max_txg);
}
}
/*
* Determine if the zil is dirty in the specified txg. Callers wanting to
* ensure that the dirty state does not change must hold the itxg_lock for
* the specified txg. Holding the lock will ensure that the zil cannot be
* dirtied (zil_itx_assign) or cleaned (zil_clean) while we check its current
* state.
*/
boolean_t
zilog_is_dirty_in_txg(zilog_t *zilog, uint64_t txg)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, txg & TXG_MASK))
return (B_TRUE);
return (B_FALSE);
}
/*
* Determine if the zil is dirty. The zil is considered dirty if it has
* any pending itx records that have not been cleaned by zil_clean().
*/
boolean_t
zilog_is_dirty(zilog_t *zilog)
{
dsl_pool_t *dp = zilog->zl_dmu_pool;
for (int t = 0; t < TXG_SIZE; t++) {
if (txg_list_member(&dp->dp_dirty_zilogs, zilog, t))
return (B_TRUE);
}
return (B_FALSE);
}
/*
* Create an on-disk intent log.
*/
static lwb_t *
zil_create(zilog_t *zilog)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb = NULL;
uint64_t txg = 0;
dmu_tx_t *tx = NULL;
blkptr_t blk;
int error = 0;
boolean_t fastwrite = FALSE;
boolean_t slog = FALSE;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
ASSERT(zh->zh_claim_txg == 0);
ASSERT(zh->zh_replay_seq == 0);
blk = zh->zh_log;
/*
* Allocate an initial log block if:
* - there isn't one already
* - the existing block is the wrong endianness
*/
if (BP_IS_HOLE(&blk) || BP_SHOULD_BYTESWAP(&blk)) {
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
if (!BP_IS_HOLE(&blk)) {
zio_free_zil(zilog->zl_spa, txg, &blk);
BP_ZERO(&blk);
}
error = zio_alloc_zil(zilog->zl_spa, zilog->zl_os, txg, &blk,
ZIL_MIN_BLKSZ, &slog);
fastwrite = TRUE;
if (error == 0)
zil_init_log_chain(zilog, &blk);
}
/*
* Allocate a log write block (lwb) for the first log block.
*/
if (error == 0)
lwb = zil_alloc_lwb(zilog, &blk, slog, txg, fastwrite);
/*
* If we just allocated the first log block, commit our transaction
* and wait for zil_sync() to stuff the block pointer into zh_log.
* (zh is part of the MOS, so we cannot modify it in open context.)
*/
if (tx != NULL) {
dmu_tx_commit(tx);
txg_wait_synced(zilog->zl_dmu_pool, txg);
}
ASSERT(bcmp(&blk, &zh->zh_log, sizeof (blk)) == 0);
return (lwb);
}
/*
* In one tx, free all log blocks and clear the log header. If keep_first
* is set, then we're replaying a log with no content. We want to keep the
* first block, however, so that the first synchronous transaction doesn't
* require a txg_wait_synced() in zil_create(). We don't need to
* txg_wait_synced() here either when keep_first is set, because both
* zil_create() and zil_destroy() will wait for any in-progress destroys
* to complete.
*/
void
zil_destroy(zilog_t *zilog, boolean_t keep_first)
{
const zil_header_t *zh = zilog->zl_header;
lwb_t *lwb;
dmu_tx_t *tx;
uint64_t txg;
/*
* Wait for any previous destroy to complete.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_old_header = *zh; /* debugging aid */
if (BP_IS_HOLE(&zh->zh_log))
return;
tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
mutex_enter(&zilog->zl_lock);
ASSERT3U(zilog->zl_destroy_txg, <, txg);
zilog->zl_destroy_txg = txg;
zilog->zl_keep_first = keep_first;
if (!list_is_empty(&zilog->zl_lwb_list)) {
ASSERT(zh->zh_claim_txg == 0);
VERIFY(!keep_first);
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa,
&lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
if (lwb->lwb_buf != NULL)
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zio_free(zilog->zl_spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
}
} else if (!keep_first) {
zil_destroy_sync(zilog, tx);
}
mutex_exit(&zilog->zl_lock);
dmu_tx_commit(tx);
}
void
zil_destroy_sync(zilog_t *zilog, dmu_tx_t *tx)
{
ASSERT(list_is_empty(&zilog->zl_lwb_list));
(void) zil_parse(zilog, zil_free_log_block,
zil_free_log_record, tx, zilog->zl_header->zh_claim_txg, B_FALSE);
}
int
zil_claim(dsl_pool_t *dp, dsl_dataset_t *ds, void *txarg)
{
dmu_tx_t *tx = txarg;
uint64_t first_txg = dmu_tx_get_txg(tx);
zilog_t *zilog;
zil_header_t *zh;
objset_t *os;
int error;
error = dmu_objset_own_obj(dp, ds->ds_object,
DMU_OST_ANY, B_FALSE, B_FALSE, FTAG, &os);
if (error != 0) {
/*
* EBUSY indicates that the objset is inconsistent, in which
* case it can not have a ZIL.
*/
if (error != EBUSY) {
cmn_err(CE_WARN, "can't open objset for %llu, error %u",
(unsigned long long)ds->ds_object, error);
}
return (0);
}
zilog = dmu_objset_zil(os);
zh = zil_header_in_syncing_context(zilog);
if (spa_get_log_state(zilog->zl_spa) == SPA_LOG_CLEAR) {
if (!BP_IS_HOLE(&zh->zh_log))
zio_free_zil(zilog->zl_spa, first_txg, &zh->zh_log);
BP_ZERO(&zh->zh_log);
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Claim all log blocks if we haven't already done so, and remember
* the highest claimed sequence number. This ensures that if we can
* read only part of the log now (e.g. due to a missing device),
* but we can read the entire log later, we will not try to replay
* or destroy beyond the last block we successfully claimed.
*/
ASSERT3U(zh->zh_claim_txg, <=, first_txg);
if (zh->zh_claim_txg == 0 && !BP_IS_HOLE(&zh->zh_log)) {
(void) zil_parse(zilog, zil_claim_log_block,
zil_claim_log_record, tx, first_txg, B_FALSE);
zh->zh_claim_txg = first_txg;
zh->zh_claim_blk_seq = zilog->zl_parse_blk_seq;
zh->zh_claim_lr_seq = zilog->zl_parse_lr_seq;
if (zilog->zl_parse_lr_count || zilog->zl_parse_blk_count > 1)
zh->zh_flags |= ZIL_REPLAY_NEEDED;
zh->zh_flags |= ZIL_CLAIM_LR_SEQ_VALID;
if (os->os_encrypted)
os->os_next_write_raw[tx->tx_txg & TXG_MASK] = B_TRUE;
dsl_dataset_dirty(dmu_objset_ds(os), tx);
}
ASSERT3U(first_txg, ==, (spa_last_synced_txg(zilog->zl_spa) + 1));
dmu_objset_disown(os, B_FALSE, FTAG);
return (0);
}
/*
* Check the log by walking the log chain.
* Checksum errors are ok as they indicate the end of the chain.
* Any other error (no device or read failure) returns an error.
*/
/* ARGSUSED */
int
zil_check_log_chain(dsl_pool_t *dp, dsl_dataset_t *ds, void *tx)
{
zilog_t *zilog;
objset_t *os;
blkptr_t *bp;
int error;
ASSERT(tx == NULL);
error = dmu_objset_from_ds(ds, &os);
if (error != 0) {
cmn_err(CE_WARN, "can't open objset %llu, error %d",
(unsigned long long)ds->ds_object, error);
return (0);
}
zilog = dmu_objset_zil(os);
bp = (blkptr_t *)&zilog->zl_header->zh_log;
/*
* Check the first block and determine if it's on a log device
* which may have been removed or faulted prior to loading this
* pool. If so, there's no point in checking the rest of the log
* as its content should have already been synced to the pool.
*/
if (!BP_IS_HOLE(bp)) {
vdev_t *vd;
boolean_t valid = B_TRUE;
spa_config_enter(os->os_spa, SCL_STATE, FTAG, RW_READER);
vd = vdev_lookup_top(os->os_spa, DVA_GET_VDEV(&bp->blk_dva[0]));
if (vd->vdev_islog && vdev_is_dead(vd))
valid = vdev_log_state_valid(vd);
spa_config_exit(os->os_spa, SCL_STATE, FTAG);
if (!valid)
return (0);
}
/*
* Because tx == NULL, zil_claim_log_block() will not actually claim
* any blocks, but just determine whether it is possible to do so.
* In addition to checking the log chain, zil_claim_log_block()
* will invoke zio_claim() with a done func of spa_claim_notify(),
* which will update spa_max_claim_txg. See spa_load() for details.
*/
error = zil_parse(zilog, zil_claim_log_block, zil_claim_log_record, tx,
zilog->zl_header->zh_claim_txg ? -1ULL : spa_first_txg(os->os_spa),
B_FALSE);
return ((error == ECKSUM || error == ENOENT) ? 0 : error);
}
/*
* When an itx is "skipped", this function is used to properly mark the
* waiter as "done, and signal any thread(s) waiting on it. An itx can
* be skipped (and not committed to an lwb) for a variety of reasons,
* one of them being that the itx was committed via spa_sync(), prior to
* it being committed to an lwb; this can happen if a thread calling
* zil_commit() is racing with spa_sync().
*/
static void
zil_commit_waiter_skip(zil_commit_waiter_t *zcw)
{
mutex_enter(&zcw->zcw_lock);
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when the given waiter is to be linked into an
* lwb's "lwb_waiter" list; i.e. when the itx is committed to the lwb.
* At this point, the waiter will no longer be referenced by the itx,
* and instead, will be referenced by the lwb.
*/
static void
zil_commit_waiter_link_lwb(zil_commit_waiter_t *zcw, lwb_t *lwb)
{
/*
* The lwb_waiters field of the lwb is protected by the zilog's
* zl_lock, thus it must be held when calling this function.
*/
ASSERT(MUTEX_HELD(&lwb->lwb_zilog->zl_lock));
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3P(lwb, !=, NULL);
ASSERT(lwb->lwb_state == LWB_STATE_OPENED ||
lwb->lwb_state == LWB_STATE_ISSUED);
list_insert_tail(&lwb->lwb_waiters, zcw);
zcw->zcw_lwb = lwb;
mutex_exit(&zcw->zcw_lock);
}
/*
* This function is used when zio_alloc_zil() fails to allocate a ZIL
* block, and the given waiter must be linked to the "nolwb waiters"
* list inside of zil_process_commit_list().
*/
static void
zil_commit_waiter_link_nolwb(zil_commit_waiter_t *zcw, list_t *nolwb)
{
mutex_enter(&zcw->zcw_lock);
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
list_insert_tail(nolwb, zcw);
mutex_exit(&zcw->zcw_lock);
}
void
zil_lwb_add_block(lwb_t *lwb, const blkptr_t *bp)
{
avl_tree_t *t = &lwb->lwb_vdev_tree;
avl_index_t where;
zil_vdev_node_t *zv, zvsearch;
int ndvas = BP_GET_NDVAS(bp);
int i;
if (zfs_nocacheflush)
return;
mutex_enter(&lwb->lwb_vdev_lock);
for (i = 0; i < ndvas; i++) {
zvsearch.zv_vdev = DVA_GET_VDEV(&bp->blk_dva[i]);
if (avl_find(t, &zvsearch, &where) == NULL) {
zv = kmem_alloc(sizeof (*zv), KM_SLEEP);
zv->zv_vdev = zvsearch.zv_vdev;
avl_insert(t, zv, where);
}
}
mutex_exit(&lwb->lwb_vdev_lock);
}
void
zil_lwb_add_txg(lwb_t *lwb, uint64_t txg)
{
lwb->lwb_max_txg = MAX(lwb->lwb_max_txg, txg);
}
/*
* This function is a called after all VDEVs associated with a given lwb
* write have completed their DKIOCFLUSHWRITECACHE command; or as soon
* as the lwb write completes, if "zfs_nocacheflush" is set.
*
* The intention is for this function to be called as soon as the
* contents of an lwb are considered "stable" on disk, and will survive
* any sudden loss of power. At this point, any threads waiting for the
* lwb to reach this state are signalled, and the "waiter" structures
* are marked "done".
*/
static void
zil_lwb_flush_vdevs_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
zilog_t *zilog = lwb->lwb_zilog;
dmu_tx_t *tx = lwb->lwb_tx;
zil_commit_waiter_t *zcw;
itx_t *itx;
spa_config_exit(zilog->zl_spa, SCL_STATE, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
mutex_enter(&zilog->zl_lock);
/*
* Ensure the lwb buffer pointer is cleared before releasing the
* txg. If we have had an allocation failure and the txg is
* waiting to sync then we want zil_sync() to remove the lwb so
* that it's not picked up as the next new one in
* zil_process_commit_list(). zil_sync() will only remove the
* lwb if lwb_buf is null.
*/
lwb->lwb_buf = NULL;
lwb->lwb_tx = NULL;
ASSERT3U(lwb->lwb_issued_timestamp, >, 0);
zilog->zl_last_lwb_latency = gethrtime() - lwb->lwb_issued_timestamp;
lwb->lwb_root_zio = NULL;
lwb->lwb_state = LWB_STATE_DONE;
if (zilog->zl_last_lwb_opened == lwb) {
/*
* Remember the highest committed log sequence number
* for ztest. We only update this value when all the log
* writes succeeded, because ztest wants to ASSERT that
* it got the whole log chain.
*/
zilog->zl_commit_lr_seq = zilog->zl_lr_seq;
}
while ((itx = list_head(&lwb->lwb_itxs)) != NULL) {
list_remove(&lwb->lwb_itxs, itx);
zil_itx_destroy(itx);
}
while ((zcw = list_head(&lwb->lwb_waiters)) != NULL) {
mutex_enter(&zcw->zcw_lock);
ASSERT(list_link_active(&zcw->zcw_node));
list_remove(&lwb->lwb_waiters, zcw);
ASSERT3P(zcw->zcw_lwb, ==, lwb);
zcw->zcw_lwb = NULL;
zcw->zcw_zio_error = zio->io_error;
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
zcw->zcw_done = B_TRUE;
cv_broadcast(&zcw->zcw_cv);
mutex_exit(&zcw->zcw_lock);
}
mutex_exit(&zilog->zl_lock);
/*
* Now that we've written this log block, we have a stable pointer
* to the next block in the chain, so it's OK to let the txg in
* which we allocated the next block sync.
*/
dmu_tx_commit(tx);
}
/*
* This is called when an lwb write completes. This means, this specific
* lwb was written to disk, and all dependent lwb have also been
* written to disk.
*
* At this point, a DKIOCFLUSHWRITECACHE command hasn't been issued to
* the VDEVs involved in writing out this specific lwb. The lwb will be
* "done" once zil_lwb_flush_vdevs_done() is called, which occurs in the
* zio completion callback for the lwb's root zio.
*/
static void
zil_lwb_write_done(zio_t *zio)
{
lwb_t *lwb = zio->io_private;
spa_t *spa = zio->io_spa;
zilog_t *zilog = lwb->lwb_zilog;
avl_tree_t *t = &lwb->lwb_vdev_tree;
void *cookie = NULL;
zil_vdev_node_t *zv;
ASSERT3S(spa_config_held(spa, SCL_STATE, RW_READER), !=, 0);
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
ASSERT(BP_GET_TYPE(zio->io_bp) == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(zio->io_bp) == 0);
ASSERT(BP_GET_BYTEORDER(zio->io_bp) == ZFS_HOST_BYTEORDER);
ASSERT(!BP_IS_GANG(zio->io_bp));
ASSERT(!BP_IS_HOLE(zio->io_bp));
ASSERT(BP_GET_FILL(zio->io_bp) == 0);
abd_put(zio->io_abd);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_ISSUED);
mutex_enter(&zilog->zl_lock);
lwb->lwb_write_zio = NULL;
lwb->lwb_fastwrite = FALSE;
mutex_exit(&zilog->zl_lock);
if (avl_numnodes(t) == 0)
return;
/*
* If there was an IO error, we're not going to call zio_flush()
* on these vdevs, so we simply empty the tree and free the
* nodes. We avoid calling zio_flush() since there isn't any
* good reason for doing so, after the lwb block failed to be
* written out.
*/
if (zio->io_error != 0) {
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL)
kmem_free(zv, sizeof (*zv));
return;
}
while ((zv = avl_destroy_nodes(t, &cookie)) != NULL) {
vdev_t *vd = vdev_lookup_top(spa, zv->zv_vdev);
if (vd != NULL)
zio_flush(lwb->lwb_root_zio, vd);
kmem_free(zv, sizeof (*zv));
}
}
/*
* This function's purpose is to "open" an lwb such that it is ready to
* accept new itxs being committed to it. To do this, the lwb's zio
* structures are created, and linked to the lwb. This function is
* idempotent; if the passed in lwb has already been opened, this
* function is essentially a no-op.
*/
static void
zil_lwb_write_open(zilog_t *zilog, lwb_t *lwb)
{
zbookmark_phys_t zb;
zio_priority_t prio;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
EQUIV(lwb->lwb_root_zio == NULL, lwb->lwb_state == LWB_STATE_CLOSED);
EQUIV(lwb->lwb_root_zio != NULL, lwb->lwb_state == LWB_STATE_OPENED);
SET_BOOKMARK(&zb, lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_OBJSET],
ZB_ZIL_OBJECT, ZB_ZIL_LEVEL,
lwb->lwb_blk.blk_cksum.zc_word[ZIL_ZC_SEQ]);
/* Lock so zil_sync() doesn't fastwrite_unmark after zio is created */
mutex_enter(&zilog->zl_lock);
if (lwb->lwb_root_zio == NULL) {
abd_t *lwb_abd = abd_get_from_buf(lwb->lwb_buf,
BP_GET_LSIZE(&lwb->lwb_blk));
if (!lwb->lwb_fastwrite) {
metaslab_fastwrite_mark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 1;
}
if (!lwb->lwb_slog || zilog->zl_cur_used <= zil_slog_bulk)
prio = ZIO_PRIORITY_SYNC_WRITE;
else
prio = ZIO_PRIORITY_ASYNC_WRITE;
lwb->lwb_root_zio = zio_root(zilog->zl_spa,
zil_lwb_flush_vdevs_done, lwb, ZIO_FLAG_CANFAIL);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
lwb->lwb_write_zio = zio_rewrite(lwb->lwb_root_zio,
zilog->zl_spa, 0, &lwb->lwb_blk, lwb_abd,
BP_GET_LSIZE(&lwb->lwb_blk), zil_lwb_write_done, lwb,
prio, ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE |
ZIO_FLAG_FASTWRITE, &zb);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
lwb->lwb_state = LWB_STATE_OPENED;
/*
* The zilog's "zl_last_lwb_opened" field is used to
* build the lwb/zio dependency chain, which is used to
* preserve the ordering of lwb completions that is
* required by the semantics of the ZIL. Each new lwb
* zio becomes a parent of the "previous" lwb zio, such
* that the new lwb's zio cannot complete until the
* "previous" lwb's zio completes.
*
* This is required by the semantics of zil_commit();
* the commit waiters attached to the lwbs will be woken
* in the lwb zio's completion callback, so this zio
* dependency graph ensures the waiters are woken in the
* correct order (the same order the lwbs were created).
*/
lwb_t *last_lwb_opened = zilog->zl_last_lwb_opened;
if (last_lwb_opened != NULL &&
last_lwb_opened->lwb_state != LWB_STATE_DONE) {
ASSERT(last_lwb_opened->lwb_state == LWB_STATE_OPENED ||
last_lwb_opened->lwb_state == LWB_STATE_ISSUED);
ASSERT3P(last_lwb_opened->lwb_root_zio, !=, NULL);
zio_add_child(lwb->lwb_root_zio,
last_lwb_opened->lwb_root_zio);
}
zilog->zl_last_lwb_opened = lwb;
}
mutex_exit(&zilog->zl_lock);
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
}
/*
* Define a limited set of intent log block sizes.
*
* These must be a multiple of 4KB. Note only the amount used (again
* aligned to 4KB) actually gets written. However, we can't always just
* allocate SPA_OLD_MAXBLOCKSIZE as the slog space could be exhausted.
*/
uint64_t zil_block_buckets[] = {
4096, /* non TX_WRITE */
8192+4096, /* data base */
32*1024 + 4096, /* NFS writes */
UINT64_MAX
};
/*
* Start a log block write and advance to the next log block.
* Calls are serialized.
*/
static lwb_t *
zil_lwb_write_issue(zilog_t *zilog, lwb_t *lwb)
{
lwb_t *nlwb = NULL;
zil_chain_t *zilc;
spa_t *spa = zilog->zl_spa;
blkptr_t *bp;
dmu_tx_t *tx;
uint64_t txg;
uint64_t zil_blksz, wsz;
int i, error;
boolean_t slog;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb->lwb_root_zio, !=, NULL);
ASSERT3P(lwb->lwb_write_zio, !=, NULL);
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
zilc = (zil_chain_t *)lwb->lwb_buf;
bp = &zilc->zc_next_blk;
} else {
zilc = (zil_chain_t *)(lwb->lwb_buf + lwb->lwb_sz);
bp = &zilc->zc_next_blk;
}
ASSERT(lwb->lwb_nused <= lwb->lwb_sz);
/*
* Allocate the next block and save its address in this block
* before writing it in order to establish the log chain.
* Note that if the allocation of nlwb synced before we wrote
* the block that points at it (lwb), we'd leak it if we crashed.
* Therefore, we don't do dmu_tx_commit() until zil_lwb_write_done().
* We dirty the dataset to ensure that zil_sync() will be called
* to clean up in the event of allocation failure or I/O failure.
*/
tx = dmu_tx_create(zilog->zl_os);
/*
* Since we are not going to create any new dirty data, and we
* can even help with clearing the existing dirty data, we
* should not be subject to the dirty data based delays. We
* use TXG_NOTHROTTLE to bypass the delay mechanism.
*/
VERIFY0(dmu_tx_assign(tx, TXG_WAIT | TXG_NOTHROTTLE));
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
txg = dmu_tx_get_txg(tx);
lwb->lwb_tx = tx;
/*
* Log blocks are pre-allocated. Here we select the size of the next
* block, based on size used in the last block.
* - first find the smallest bucket that will fit the block from a
* limited set of block sizes. This is because it's faster to write
* blocks allocated from the same metaslab as they are adjacent or
* close.
* - next find the maximum from the new suggested size and an array of
* previous sizes. This lessens a picket fence effect of wrongly
* guessing the size if we have a stream of say 2k, 64k, 2k, 64k
* requests.
*
* Note we only write what is used, but we can't just allocate
* the maximum block size because we can exhaust the available
* pool log space.
*/
zil_blksz = zilog->zl_cur_used + sizeof (zil_chain_t);
for (i = 0; zil_blksz > zil_block_buckets[i]; i++)
continue;
zil_blksz = zil_block_buckets[i];
if (zil_blksz == UINT64_MAX)
zil_blksz = SPA_OLD_MAXBLOCKSIZE;
zilog->zl_prev_blks[zilog->zl_prev_rotor] = zil_blksz;
for (i = 0; i < ZIL_PREV_BLKS; i++)
zil_blksz = MAX(zil_blksz, zilog->zl_prev_blks[i]);
zilog->zl_prev_rotor = (zilog->zl_prev_rotor + 1) & (ZIL_PREV_BLKS - 1);
BP_ZERO(bp);
error = zio_alloc_zil(spa, zilog->zl_os, txg, bp, zil_blksz, &slog);
if (slog) {
ZIL_STAT_BUMP(zil_itx_metaslab_slog_count);
ZIL_STAT_INCR(zil_itx_metaslab_slog_bytes, lwb->lwb_nused);
} else {
ZIL_STAT_BUMP(zil_itx_metaslab_normal_count);
ZIL_STAT_INCR(zil_itx_metaslab_normal_bytes, lwb->lwb_nused);
}
if (error == 0) {
ASSERT3U(bp->blk_birth, ==, txg);
bp->blk_cksum = lwb->lwb_blk.blk_cksum;
bp->blk_cksum.zc_word[ZIL_ZC_SEQ]++;
/*
* Allocate a new log write block (lwb).
*/
nlwb = zil_alloc_lwb(zilog, bp, slog, txg, TRUE);
}
if (BP_GET_CHECKSUM(&lwb->lwb_blk) == ZIO_CHECKSUM_ZILOG2) {
/* For Slim ZIL only write what is used. */
wsz = P2ROUNDUP_TYPED(lwb->lwb_nused, ZIL_MIN_BLKSZ, uint64_t);
ASSERT3U(wsz, <=, lwb->lwb_sz);
zio_shrink(lwb->lwb_write_zio, wsz);
} else {
wsz = lwb->lwb_sz;
}
zilc->zc_pad = 0;
zilc->zc_nused = lwb->lwb_nused;
zilc->zc_eck.zec_cksum = lwb->lwb_blk.blk_cksum;
/*
* clear unused data for security
*/
bzero(lwb->lwb_buf + lwb->lwb_nused, wsz - lwb->lwb_nused);
spa_config_enter(zilog->zl_spa, SCL_STATE, lwb, RW_READER);
zil_lwb_add_block(lwb, &lwb->lwb_blk);
lwb->lwb_issued_timestamp = gethrtime();
lwb->lwb_state = LWB_STATE_ISSUED;
zio_nowait(lwb->lwb_root_zio);
zio_nowait(lwb->lwb_write_zio);
/*
* If there was an allocation failure then nlwb will be null which
* forces a txg_wait_synced().
*/
return (nlwb);
}
static lwb_t *
zil_lwb_commit(zilog_t *zilog, itx_t *itx, lwb_t *lwb)
{
lr_t *lrcb, *lrc;
lr_write_t *lrwb, *lrw;
char *lr_buf;
uint64_t dlen, dnow, lwb_sp, reclen, txg;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT3P(lwb, !=, NULL);
ASSERT3P(lwb->lwb_buf, !=, NULL);
zil_lwb_write_open(zilog, lwb);
lrc = &itx->itx_lr;
lrw = (lr_write_t *)lrc;
/*
* A commit itx doesn't represent any on-disk state; instead
* it's simply used as a place holder on the commit list, and
* provides a mechanism for attaching a "commit waiter" onto the
* correct lwb (such that the waiter can be signalled upon
* completion of that lwb). Thus, we don't process this itx's
* log record if it's a commit itx (these itx's don't have log
* records), and instead link the itx's waiter onto the lwb's
* list of waiters.
*
* For more details, see the comment above zil_commit().
*/
if (lrc->lrc_txtype == TX_COMMIT) {
mutex_enter(&zilog->zl_lock);
zil_commit_waiter_link_lwb(itx->itx_private, lwb);
itx->itx_private = NULL;
mutex_exit(&zilog->zl_lock);
return (lwb);
}
if (lrc->lrc_txtype == TX_WRITE && itx->itx_wr_state == WR_NEED_COPY) {
dlen = P2ROUNDUP_TYPED(
lrw->lr_length, sizeof (uint64_t), uint64_t);
} else {
dlen = 0;
}
reclen = lrc->lrc_reclen;
zilog->zl_cur_used += (reclen + dlen);
txg = lrc->lrc_txg;
ASSERT3U(zilog->zl_cur_used, <, UINT64_MAX - (reclen + dlen));
cont:
/*
* If this record won't fit in the current log block, start a new one.
* For WR_NEED_COPY optimize layout for minimal number of chunks.
*/
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
if (reclen > lwb_sp || (reclen + dlen > lwb_sp &&
lwb_sp < ZIL_MAX_WASTE_SPACE && (dlen % ZIL_MAX_LOG_DATA == 0 ||
lwb_sp < reclen + dlen % ZIL_MAX_LOG_DATA))) {
lwb = zil_lwb_write_issue(zilog, lwb);
if (lwb == NULL)
return (NULL);
zil_lwb_write_open(zilog, lwb);
ASSERT(LWB_EMPTY(lwb));
lwb_sp = lwb->lwb_sz - lwb->lwb_nused;
ASSERT3U(reclen + MIN(dlen, sizeof (uint64_t)), <=, lwb_sp);
}
dnow = MIN(dlen, lwb_sp - reclen);
lr_buf = lwb->lwb_buf + lwb->lwb_nused;
bcopy(lrc, lr_buf, reclen);
lrcb = (lr_t *)lr_buf; /* Like lrc, but inside lwb. */
lrwb = (lr_write_t *)lrcb; /* Like lrw, but inside lwb. */
ZIL_STAT_BUMP(zil_itx_count);
/*
* If it's a write, fetch the data or get its blkptr as appropriate.
*/
if (lrc->lrc_txtype == TX_WRITE) {
if (txg > spa_freeze_txg(zilog->zl_spa))
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (itx->itx_wr_state == WR_COPIED) {
ZIL_STAT_BUMP(zil_itx_copied_count);
ZIL_STAT_INCR(zil_itx_copied_bytes, lrw->lr_length);
} else {
char *dbuf;
int error;
if (itx->itx_wr_state == WR_NEED_COPY) {
dbuf = lr_buf + reclen;
lrcb->lrc_reclen += dnow;
if (lrwb->lr_length > dnow)
lrwb->lr_length = dnow;
lrw->lr_offset += dnow;
lrw->lr_length -= dnow;
ZIL_STAT_BUMP(zil_itx_needcopy_count);
ZIL_STAT_INCR(zil_itx_needcopy_bytes, dnow);
} else {
ASSERT3S(itx->itx_wr_state, ==, WR_INDIRECT);
dbuf = NULL;
ZIL_STAT_BUMP(zil_itx_indirect_count);
ZIL_STAT_INCR(zil_itx_indirect_bytes,
lrw->lr_length);
}
/*
* We pass in the "lwb_write_zio" rather than
* "lwb_root_zio" so that the "lwb_write_zio"
* becomes the parent of any zio's created by
* the "zl_get_data" callback. The vdevs are
* flushed after the "lwb_write_zio" completes,
* so we want to make sure that completion
* callback waits for these additional zio's,
* such that the vdevs used by those zio's will
* be included in the lwb's vdev tree, and those
* vdevs will be properly flushed. If we passed
* in "lwb_root_zio" here, then these additional
* vdevs may not be flushed; e.g. if these zio's
* completed after "lwb_write_zio" completed.
*/
error = zilog->zl_get_data(itx->itx_private,
lrwb, dbuf, lwb, lwb->lwb_write_zio);
if (error == EIO) {
txg_wait_synced(zilog->zl_dmu_pool, txg);
return (lwb);
}
if (error != 0) {
ASSERT(error == ENOENT || error == EEXIST ||
error == EALREADY);
return (lwb);
}
}
}
/*
* We're actually making an entry, so update lrc_seq to be the
* log record sequence number. Note that this is generally not
* equal to the itx sequence number because not all transactions
* are synchronous, and sometimes spa_sync() gets there first.
*/
lrcb->lrc_seq = ++zilog->zl_lr_seq;
lwb->lwb_nused += reclen + dnow;
zil_lwb_add_txg(lwb, txg);
ASSERT3U(lwb->lwb_nused, <=, lwb->lwb_sz);
ASSERT0(P2PHASE(lwb->lwb_nused, sizeof (uint64_t)));
dlen -= dnow;
if (dlen > 0) {
zilog->zl_cur_used += reclen;
goto cont;
}
return (lwb);
}
itx_t *
zil_itx_create(uint64_t txtype, size_t lrsize)
{
size_t itxsize;
itx_t *itx;
lrsize = P2ROUNDUP_TYPED(lrsize, sizeof (uint64_t), size_t);
itxsize = offsetof(itx_t, itx_lr) + lrsize;
itx = zio_data_buf_alloc(itxsize);
itx->itx_lr.lrc_txtype = txtype;
itx->itx_lr.lrc_reclen = lrsize;
itx->itx_lr.lrc_seq = 0; /* defensive */
itx->itx_sync = B_TRUE; /* default is synchronous */
itx->itx_callback = NULL;
itx->itx_callback_data = NULL;
itx->itx_size = itxsize;
return (itx);
}
void
zil_itx_destroy(itx_t *itx)
{
IMPLY(itx->itx_lr.lrc_txtype == TX_COMMIT, itx->itx_callback == NULL);
IMPLY(itx->itx_callback != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
if (itx->itx_callback != NULL)
itx->itx_callback(itx->itx_callback_data);
zio_data_buf_free(itx, itx->itx_size);
}
/*
* Free up the sync and async itxs. The itxs_t has already been detached
* so no locks are needed.
*/
static void
zil_itxg_clean(itxs_t *itxs)
{
itx_t *itx;
list_t *list;
avl_tree_t *t;
void *cookie;
itx_async_node_t *ian;
list = &itxs->i_sync_list;
while ((itx = list_head(list)) != NULL) {
/*
* In the general case, commit itxs will not be found
* here, as they'll be committed to an lwb via
* zil_lwb_commit(), and free'd in that function. Having
* said that, it is still possible for commit itxs to be
* found here, due to the following race:
*
* - a thread calls zil_commit() which assigns the
* commit itx to a per-txg i_sync_list
* - zil_itxg_clean() is called (e.g. via spa_sync())
* while the waiter is still on the i_sync_list
*
* There's nothing to prevent syncing the txg while the
* waiter is on the i_sync_list. This normally doesn't
* happen because spa_sync() is slower than zil_commit(),
* but if zil_commit() calls txg_wait_synced() (e.g.
* because zil_create() or zil_commit_writer_stall() is
* called) we will hit this case.
*/
if (itx->itx_lr.lrc_txtype == TX_COMMIT)
zil_commit_waiter_skip(itx->itx_private);
list_remove(list, itx);
zil_itx_destroy(itx);
}
cookie = NULL;
t = &itxs->i_async_tree;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list = &ian->ia_list;
while ((itx = list_head(list)) != NULL) {
list_remove(list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(list);
kmem_free(ian, sizeof (itx_async_node_t));
}
avl_destroy(t);
kmem_free(itxs, sizeof (itxs_t));
}
static int
zil_aitx_compare(const void *x1, const void *x2)
{
const uint64_t o1 = ((itx_async_node_t *)x1)->ia_foid;
const uint64_t o2 = ((itx_async_node_t *)x2)->ia_foid;
return (AVL_CMP(o1, o2));
}
/*
* Remove all async itx with the given oid.
*/
static void
zil_remove_async(zilog_t *zilog, uint64_t oid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
list_t clean_list;
itx_t *itx;
ASSERT(oid != 0);
list_create(&clean_list, sizeof (itx_t), offsetof(itx_t, itx_node));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* Locate the object node and append its list.
*/
t = &itxg->itxg_itxs->i_async_tree;
ian = avl_find(t, &oid, &where);
if (ian != NULL)
list_move_tail(&clean_list, &ian->ia_list);
mutex_exit(&itxg->itxg_lock);
}
while ((itx = list_head(&clean_list)) != NULL) {
list_remove(&clean_list, itx);
/* commit itxs should never be on the async lists. */
ASSERT3U(itx->itx_lr.lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
list_destroy(&clean_list);
}
void
zil_itx_assign(zilog_t *zilog, itx_t *itx, dmu_tx_t *tx)
{
uint64_t txg;
itxg_t *itxg;
itxs_t *itxs, *clean = NULL;
/*
* Object ids can be re-instantiated in the next txg so
* remove any async transactions to avoid future leaks.
* This can happen if a fsync occurs on the re-instantiated
* object for a WR_INDIRECT or WR_NEED_COPY write, which gets
* the new file data and flushes a write record for the old object.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_REMOVE)
zil_remove_async(zilog, itx->itx_oid);
/*
* Ensure the data of a renamed file is committed before the rename.
*/
if ((itx->itx_lr.lrc_txtype & ~TX_CI) == TX_RENAME)
zil_async_to_sync(zilog, itx->itx_oid);
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX)
txg = ZILTEST_TXG;
else
txg = dmu_tx_get_txg(tx);
itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
itxs = itxg->itxg_itxs;
if (itxg->itxg_txg != txg) {
if (itxs != NULL) {
/*
* The zil_clean callback hasn't got around to cleaning
* this itxg. Save the itxs for release below.
* This should be rare.
*/
zfs_dbgmsg("zil_itx_assign: missed itx cleanup for "
"txg %llu", itxg->itxg_txg);
clean = itxg->itxg_itxs;
}
itxg->itxg_txg = txg;
itxs = itxg->itxg_itxs = kmem_zalloc(sizeof (itxs_t),
KM_SLEEP);
list_create(&itxs->i_sync_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
avl_create(&itxs->i_async_tree, zil_aitx_compare,
sizeof (itx_async_node_t),
offsetof(itx_async_node_t, ia_node));
}
if (itx->itx_sync) {
list_insert_tail(&itxs->i_sync_list, itx);
} else {
avl_tree_t *t = &itxs->i_async_tree;
uint64_t foid =
LR_FOID_GET_OBJ(((lr_ooo_t *)&itx->itx_lr)->lr_foid);
itx_async_node_t *ian;
avl_index_t where;
ian = avl_find(t, &foid, &where);
if (ian == NULL) {
ian = kmem_alloc(sizeof (itx_async_node_t),
KM_SLEEP);
list_create(&ian->ia_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
ian->ia_foid = foid;
avl_insert(t, ian, where);
}
list_insert_tail(&ian->ia_list, itx);
}
itx->itx_lr.lrc_txg = dmu_tx_get_txg(tx);
/*
* We don't want to dirty the ZIL using ZILTEST_TXG, because
* zil_clean() will never be called using ZILTEST_TXG. Thus, we
* need to be careful to always dirty the ZIL using the "real"
* TXG (not itxg_txg) even when the SPA is frozen.
*/
zilog_dirty(zilog, dmu_tx_get_txg(tx));
mutex_exit(&itxg->itxg_lock);
/* Release the old itxs now we've dropped the lock */
if (clean != NULL)
zil_itxg_clean(clean);
}
/*
* If there are any in-memory intent log transactions which have now been
* synced then start up a taskq to free them. We should only do this after we
* have written out the uberblocks (i.e. txg has been comitted) so that
* don't inadvertently clean out in-memory log records that would be required
* by zil_commit().
*/
void
zil_clean(zilog_t *zilog, uint64_t synced_txg)
{
itxg_t *itxg = &zilog->zl_itxg[synced_txg & TXG_MASK];
itxs_t *clean_me;
ASSERT3U(synced_txg, <, ZILTEST_TXG);
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_itxs == NULL || itxg->itxg_txg == ZILTEST_TXG) {
mutex_exit(&itxg->itxg_lock);
return;
}
ASSERT3U(itxg->itxg_txg, <=, synced_txg);
ASSERT3U(itxg->itxg_txg, !=, 0);
clean_me = itxg->itxg_itxs;
itxg->itxg_itxs = NULL;
itxg->itxg_txg = 0;
mutex_exit(&itxg->itxg_lock);
/*
* Preferably start a task queue to free up the old itxs but
* if taskq_dispatch can't allocate resources to do that then
* free it in-line. This should be rare. Note, using TQ_SLEEP
* created a bad performance problem.
*/
ASSERT3P(zilog->zl_dmu_pool, !=, NULL);
ASSERT3P(zilog->zl_dmu_pool->dp_zil_clean_taskq, !=, NULL);
taskqid_t id = taskq_dispatch(zilog->zl_dmu_pool->dp_zil_clean_taskq,
(void (*)(void *))zil_itxg_clean, clean_me, TQ_NOSLEEP);
if (id == TASKQID_INVALID)
zil_itxg_clean(clean_me);
}
/*
* This function will traverse the queue of itxs that need to be
* committed, and move them onto the ZIL's zl_itx_commit_list.
*/
static void
zil_get_commit_list(zilog_t *zilog)
{
uint64_t otxg, txg;
list_t *commit_list = &zilog->zl_itx_commit_list;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing. That's okay since we'll
* only commit things in the future.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If we're adding itx records to the zl_itx_commit_list,
* then the zil better be dirty in this "txg". We can assert
* that here since we're holding the itxg_lock which will
* prevent spa_sync from cleaning it. Once we add the itxs
* to the zl_itx_commit_list we must commit it to disk even
* if it's unnecessary (i.e. the txg was synced).
*/
ASSERT(zilog_is_dirty_in_txg(zilog, txg) ||
spa_freeze_txg(zilog->zl_spa) != UINT64_MAX);
list_move_tail(commit_list, &itxg->itxg_itxs->i_sync_list);
mutex_exit(&itxg->itxg_lock);
}
}
/*
* Move the async itxs for a specified object to commit into sync lists.
*/
static void
zil_async_to_sync(zilog_t *zilog, uint64_t foid)
{
uint64_t otxg, txg;
itx_async_node_t *ian;
avl_tree_t *t;
avl_index_t where;
if (spa_freeze_txg(zilog->zl_spa) != UINT64_MAX) /* ziltest support */
otxg = ZILTEST_TXG;
else
otxg = spa_last_synced_txg(zilog->zl_spa) + 1;
/*
* This is inherently racy, since there is nothing to prevent
* the last synced txg from changing.
*/
for (txg = otxg; txg < (otxg + TXG_CONCURRENT_STATES); txg++) {
itxg_t *itxg = &zilog->zl_itxg[txg & TXG_MASK];
mutex_enter(&itxg->itxg_lock);
if (itxg->itxg_txg != txg) {
mutex_exit(&itxg->itxg_lock);
continue;
}
/*
* If a foid is specified then find that node and append its
* list. Otherwise walk the tree appending all the lists
* to the sync list. We add to the end rather than the
* beginning to ensure the create has happened.
*/
t = &itxg->itxg_itxs->i_async_tree;
if (foid != 0) {
ian = avl_find(t, &foid, &where);
if (ian != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
}
} else {
void *cookie = NULL;
while ((ian = avl_destroy_nodes(t, &cookie)) != NULL) {
list_move_tail(&itxg->itxg_itxs->i_sync_list,
&ian->ia_list);
list_destroy(&ian->ia_list);
kmem_free(ian, sizeof (itx_async_node_t));
}
}
mutex_exit(&itxg->itxg_lock);
}
}
/*
* This function will prune commit itxs that are at the head of the
* commit list (it won't prune past the first non-commit itx), and
* either: a) attach them to the last lwb that's still pending
* completion, or b) skip them altogether.
*
* This is used as a performance optimization to prevent commit itxs
* from generating new lwbs when it's unnecessary to do so.
*/
static void
zil_prune_commit_list(zilog_t *zilog)
{
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
if (lrc->lrc_txtype != TX_COMMIT)
break;
mutex_enter(&zilog->zl_lock);
lwb_t *last_lwb = zilog->zl_last_lwb_opened;
if (last_lwb == NULL || last_lwb->lwb_state == LWB_STATE_DONE) {
/*
* All of the itxs this waiter was waiting on
* must have already completed (or there were
* never any itx's for it to wait on), so it's
* safe to skip this waiter and mark it done.
*/
zil_commit_waiter_skip(itx->itx_private);
} else {
zil_commit_waiter_link_lwb(itx->itx_private, last_lwb);
itx->itx_private = NULL;
}
mutex_exit(&zilog->zl_lock);
list_remove(&zilog->zl_itx_commit_list, itx);
zil_itx_destroy(itx);
}
IMPLY(itx != NULL, itx->itx_lr.lrc_txtype != TX_COMMIT);
}
static void
zil_commit_writer_stall(zilog_t *zilog)
{
/*
* When zio_alloc_zil() fails to allocate the next lwb block on
* disk, we must call txg_wait_synced() to ensure all of the
* lwbs in the zilog's zl_lwb_list are synced and then freed (in
* zil_sync()), such that any subsequent ZIL writer (i.e. a call
* to zil_process_commit_list()) will have to call zil_create(),
* and start a new ZIL chain.
*
* Since zil_alloc_zil() failed, the lwb that was previously
* issued does not have a pointer to the "next" lwb on disk.
* Thus, if another ZIL writer thread was to allocate the "next"
* on-disk lwb, that block could be leaked in the event of a
* crash (because the previous lwb on-disk would not point to
* it).
*
* We must hold the zilog's zl_issuer_lock while we do this, to
* ensure no new threads enter zil_process_commit_list() until
* all lwb's in the zl_lwb_list have been synced and freed
* (which is achieved via the txg_wait_synced() call).
*/
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
txg_wait_synced(zilog->zl_dmu_pool, 0);
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
}
/*
* This function will traverse the commit list, creating new lwbs as
* needed, and committing the itxs from the commit list to these newly
* created lwbs. Additionally, as a new lwb is created, the previous
* lwb will be issued to the zio layer to be written to disk.
*/
static void
zil_process_commit_list(zilog_t *zilog)
{
spa_t *spa = zilog->zl_spa;
list_t nolwb_itxs;
list_t nolwb_waiters;
lwb_t *lwb;
itx_t *itx;
ASSERT(MUTEX_HELD(&zilog->zl_issuer_lock));
/*
* Return if there's nothing to commit before we dirty the fs by
* calling zil_create().
*/
if (list_head(&zilog->zl_itx_commit_list) == NULL)
return;
list_create(&nolwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&nolwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL) {
lwb = zil_create(zilog);
} else {
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE);
}
while ((itx = list_head(&zilog->zl_itx_commit_list)) != NULL) {
lr_t *lrc = &itx->itx_lr;
uint64_t txg = lrc->lrc_txg;
ASSERT3U(txg, !=, 0);
if (lrc->lrc_txtype == TX_COMMIT) {
DTRACE_PROBE2(zil__process__commit__itx,
zilog_t *, zilog, itx_t *, itx);
} else {
DTRACE_PROBE2(zil__process__normal__itx,
zilog_t *, zilog, itx_t *, itx);
}
list_remove(&zilog->zl_itx_commit_list, itx);
boolean_t synced = txg <= spa_last_synced_txg(spa);
boolean_t frozen = txg > spa_freeze_txg(spa);
/*
* If the txg of this itx has already been synced out, then
* we don't need to commit this itx to an lwb. This is
* because the data of this itx will have already been
* written to the main pool. This is inherently racy, and
* it's still ok to commit an itx whose txg has already
* been synced; this will result in a write that's
* unnecessary, but will do no harm.
*
* With that said, we always want to commit TX_COMMIT itxs
* to an lwb, regardless of whether or not that itx's txg
* has been synced out. We do this to ensure any OPENED lwb
* will always have at least one zil_commit_waiter_t linked
* to the lwb.
*
* As a counter-example, if we skipped TX_COMMIT itx's
* whose txg had already been synced, the following
* situation could occur if we happened to be racing with
* spa_sync:
*
* 1. We commit a non-TX_COMMIT itx to an lwb, where the
* itx's txg is 10 and the last synced txg is 9.
* 2. spa_sync finishes syncing out txg 10.
* 3. We move to the next itx in the list, it's a TX_COMMIT
* whose txg is 10, so we skip it rather than committing
* it to the lwb used in (1).
*
* If the itx that is skipped in (3) is the last TX_COMMIT
* itx in the commit list, than it's possible for the lwb
* used in (1) to remain in the OPENED state indefinitely.
*
* To prevent the above scenario from occurring, ensuring
* that once an lwb is OPENED it will transition to ISSUED
* and eventually DONE, we always commit TX_COMMIT itx's to
* an lwb here, even if that itx's txg has already been
* synced.
*
* Finally, if the pool is frozen, we _always_ commit the
* itx. The point of freezing the pool is to prevent data
* from being written to the main pool via spa_sync, and
* instead rely solely on the ZIL to persistently store the
* data; i.e. when the pool is frozen, the last synced txg
* value can't be trusted.
*/
if (frozen || !synced || lrc->lrc_txtype == TX_COMMIT) {
if (lwb != NULL) {
lwb = zil_lwb_commit(zilog, itx, lwb);
if (lwb == NULL)
list_insert_tail(&nolwb_itxs, itx);
else
list_insert_tail(&lwb->lwb_itxs, itx);
} else {
if (lrc->lrc_txtype == TX_COMMIT) {
zil_commit_waiter_link_nolwb(
itx->itx_private, &nolwb_waiters);
}
list_insert_tail(&nolwb_itxs, itx);
}
} else {
ASSERT3S(lrc->lrc_txtype, !=, TX_COMMIT);
zil_itx_destroy(itx);
}
}
if (lwb == NULL) {
/*
* This indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this happens, we must stall
* the ZIL write pipeline; see the comment within
* zil_commit_writer_stall() for more details.
*/
zil_commit_writer_stall(zilog);
/*
* Additionally, we have to signal and mark the "nolwb"
* waiters as "done" here, since without an lwb, we
* can't do this via zil_lwb_flush_vdevs_done() like
* normal.
*/
zil_commit_waiter_t *zcw;
while ((zcw = list_head(&nolwb_waiters)) != NULL) {
zil_commit_waiter_skip(zcw);
list_remove(&nolwb_waiters, zcw);
}
/*
* And finally, we have to destroy the itx's that
* couldn't be committed to an lwb; this will also call
* the itx's callback if one exists for the itx.
*/
while ((itx = list_head(&nolwb_itxs)) != NULL) {
list_remove(&nolwb_itxs, itx);
zil_itx_destroy(itx);
}
} else {
ASSERT(list_is_empty(&nolwb_waiters));
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_DONE);
/*
* At this point, the ZIL block pointed at by the "lwb"
* variable is in one of the following states: "closed"
* or "open".
*
* If it's "closed", then no itxs have been committed to
* it, so there's no point in issuing its zio (i.e. it's
* "empty").
*
* If it's "open", then it contains one or more itxs that
* eventually need to be committed to stable storage. In
* this case we intentionally do not issue the lwb's zio
* to disk yet, and instead rely on one of the following
* two mechanisms for issuing the zio:
*
* 1. Ideally, there will be more ZIL activity occurring
* on the system, such that this function will be
* immediately called again (not necessarily by the same
* thread) and this lwb's zio will be issued via
* zil_lwb_commit(). This way, the lwb is guaranteed to
* be "full" when it is issued to disk, and we'll make
* use of the lwb's size the best we can.
*
* 2. If there isn't sufficient ZIL activity occurring on
* the system, such that this lwb's zio isn't issued via
* zil_lwb_commit(), zil_commit_waiter() will issue the
* lwb's zio. If this occurs, the lwb is not guaranteed
* to be "full" by the time its zio is issued, and means
* the size of the lwb was "too large" given the amount
* of ZIL activity occurring on the system at that time.
*
* We do this for a couple of reasons:
*
* 1. To try and reduce the number of IOPs needed to
* write the same number of itxs. If an lwb has space
* available in its buffer for more itxs, and more itxs
* will be committed relatively soon (relative to the
* latency of performing a write), then it's beneficial
* to wait for these "next" itxs. This way, more itxs
* can be committed to stable storage with fewer writes.
*
* 2. To try and use the largest lwb block size that the
* incoming rate of itxs can support. Again, this is to
* try and pack as many itxs into as few lwbs as
* possible, without significantly impacting the latency
* of each individual itx.
*/
}
}
/*
* This function is responsible for ensuring the passed in commit waiter
* (and associated commit itx) is committed to an lwb. If the waiter is
* not already committed to an lwb, all itxs in the zilog's queue of
* itxs will be processed. The assumption is the passed in waiter's
* commit itx will found in the queue just like the other non-commit
* itxs, such that when the entire queue is processed, the waiter will
* have been committed to an lwb.
*
* The lwb associated with the passed in waiter is not guaranteed to
* have been issued by the time this function completes. If the lwb is
* not issued, we rely on future calls to zil_commit_writer() to issue
* the lwb, or the timeout mechanism found in zil_commit_waiter().
*/
static void
zil_commit_writer(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zilog->zl_issuer_lock);
if (zcw->zcw_lwb != NULL || zcw->zcw_done) {
/*
* It's possible that, while we were waiting to acquire
* the "zl_issuer_lock", another thread committed this
* waiter to an lwb. If that occurs, we bail out early,
* without processing any of the zilog's queue of itxs.
*
* On certain workloads and system configurations, the
* "zl_issuer_lock" can become highly contended. In an
* attempt to reduce this contention, we immediately drop
* the lock if the waiter has already been processed.
*
* We've measured this optimization to reduce CPU spent
* contending on this lock by up to 5%, using a system
* with 32 CPUs, low latency storage (~50 usec writes),
* and 1024 threads performing sync writes.
*/
goto out;
}
ZIL_STAT_BUMP(zil_commit_writer_count);
zil_get_commit_list(zilog);
zil_prune_commit_list(zilog);
zil_process_commit_list(zilog);
out:
mutex_exit(&zilog->zl_issuer_lock);
}
static void
zil_commit_waiter_timeout(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
ASSERT3B(zcw->zcw_done, ==, B_FALSE);
lwb_t *lwb = zcw->zcw_lwb;
ASSERT3P(lwb, !=, NULL);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_CLOSED);
/*
* If the lwb has already been issued by another thread, we can
* immediately return since there's no work to be done (the
* point of this function is to issue the lwb). Additionally, we
* do this prior to acquiring the zl_issuer_lock, to avoid
* acquiring it when it's not necessary to do so.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_DONE)
return;
/*
* In order to call zil_lwb_write_issue() we must hold the
* zilog's "zl_issuer_lock". We can't simply acquire that lock,
* since we're already holding the commit waiter's "zcw_lock",
* and those two locks are acquired in the opposite order
* elsewhere.
*/
mutex_exit(&zcw->zcw_lock);
mutex_enter(&zilog->zl_issuer_lock);
mutex_enter(&zcw->zcw_lock);
/*
* Since we just dropped and re-acquired the commit waiter's
* lock, we have to re-check to see if the waiter was marked
* "done" during that process. If the waiter was marked "done",
* the "lwb" pointer is no longer valid (it can be free'd after
* the waiter is marked "done"), so without this check we could
* wind up with a use-after-free error below.
*/
if (zcw->zcw_done)
goto out;
ASSERT3P(lwb, ==, zcw->zcw_lwb);
/*
* We've already checked this above, but since we hadn't acquired
* the zilog's zl_issuer_lock, we have to perform this check a
* second time while holding the lock.
*
* We don't need to hold the zl_lock since the lwb cannot transition
* from OPENED to ISSUED while we hold the zl_issuer_lock. The lwb
* _can_ transition from ISSUED to DONE, but it's OK to race with
* that transition since we treat the lwb the same, whether it's in
* the ISSUED or DONE states.
*
* The important thing, is we treat the lwb differently depending on
* if it's ISSUED or OPENED, and block any other threads that might
* attempt to issue this lwb. For that reason we hold the
* zl_issuer_lock when checking the lwb_state; we must not call
* zil_lwb_write_issue() if the lwb had already been issued.
*
* See the comment above the lwb_state_t structure definition for
* more details on the lwb states, and locking requirements.
*/
if (lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_DONE)
goto out;
ASSERT3S(lwb->lwb_state, ==, LWB_STATE_OPENED);
/*
* As described in the comments above zil_commit_waiter() and
* zil_process_commit_list(), we need to issue this lwb's zio
* since we've reached the commit waiter's timeout and it still
* hasn't been issued.
*/
lwb_t *nlwb = zil_lwb_write_issue(zilog, lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
/*
* Since the lwb's zio hadn't been issued by the time this thread
* reached its timeout, we reset the zilog's "zl_cur_used" field
* to influence the zil block size selection algorithm.
*
* By having to issue the lwb's zio here, it means the size of the
* lwb was too large, given the incoming throughput of itxs. By
* setting "zl_cur_used" to zero, we communicate this fact to the
* block size selection algorithm, so it can take this information
* into account, and potentially select a smaller size for the
* next lwb block that is allocated.
*/
zilog->zl_cur_used = 0;
if (nlwb == NULL) {
/*
* When zil_lwb_write_issue() returns NULL, this
* indicates zio_alloc_zil() failed to allocate the
* "next" lwb on-disk. When this occurs, the ZIL write
* pipeline must be stalled; see the comment within the
* zil_commit_writer_stall() function for more details.
*
* We must drop the commit waiter's lock prior to
* calling zil_commit_writer_stall() or else we can wind
* up with the following deadlock:
*
* - This thread is waiting for the txg to sync while
* holding the waiter's lock; txg_wait_synced() is
* used within txg_commit_writer_stall().
*
* - The txg can't sync because it is waiting for this
* lwb's zio callback to call dmu_tx_commit().
*
* - The lwb's zio callback can't call dmu_tx_commit()
* because it's blocked trying to acquire the waiter's
* lock, which occurs prior to calling dmu_tx_commit()
*/
mutex_exit(&zcw->zcw_lock);
zil_commit_writer_stall(zilog);
mutex_enter(&zcw->zcw_lock);
}
out:
mutex_exit(&zilog->zl_issuer_lock);
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
}
/*
* This function is responsible for performing the following two tasks:
*
* 1. its primary responsibility is to block until the given "commit
* waiter" is considered "done".
*
* 2. its secondary responsibility is to issue the zio for the lwb that
* the given "commit waiter" is waiting on, if this function has
* waited "long enough" and the lwb is still in the "open" state.
*
* Given a sufficient amount of itxs being generated and written using
* the ZIL, the lwb's zio will be issued via the zil_lwb_commit()
* function. If this does not occur, this secondary responsibility will
* ensure the lwb is issued even if there is not other synchronous
* activity on the system.
*
* For more details, see zil_process_commit_list(); more specifically,
* the comment at the bottom of that function.
*/
static void
zil_commit_waiter(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
ASSERT(!MUTEX_HELD(&zilog->zl_lock));
ASSERT(!MUTEX_HELD(&zilog->zl_issuer_lock));
ASSERT(spa_writeable(zilog->zl_spa));
mutex_enter(&zcw->zcw_lock);
/*
* The timeout is scaled based on the lwb latency to avoid
* significantly impacting the latency of each individual itx.
* For more details, see the comment at the bottom of the
* zil_process_commit_list() function.
*/
int pct = MAX(zfs_commit_timeout_pct, 1);
hrtime_t sleep = (zilog->zl_last_lwb_latency * pct) / 100;
hrtime_t wakeup = gethrtime() + sleep;
boolean_t timedout = B_FALSE;
while (!zcw->zcw_done) {
ASSERT(MUTEX_HELD(&zcw->zcw_lock));
lwb_t *lwb = zcw->zcw_lwb;
/*
* Usually, the waiter will have a non-NULL lwb field here,
* but it's possible for it to be NULL as a result of
* zil_commit() racing with spa_sync().
*
* When zil_clean() is called, it's possible for the itxg
* list (which may be cleaned via a taskq) to contain
* commit itxs. When this occurs, the commit waiters linked
* off of these commit itxs will not be committed to an
* lwb. Additionally, these commit waiters will not be
* marked done until zil_commit_waiter_skip() is called via
* zil_itxg_clean().
*
* Thus, it's possible for this commit waiter (i.e. the
* "zcw" variable) to be found in this "in between" state;
* where it's "zcw_lwb" field is NULL, and it hasn't yet
* been skipped, so it's "zcw_done" field is still B_FALSE.
*/
IMPLY(lwb != NULL, lwb->lwb_state != LWB_STATE_CLOSED);
if (lwb != NULL && lwb->lwb_state == LWB_STATE_OPENED) {
ASSERT3B(timedout, ==, B_FALSE);
/*
* If the lwb hasn't been issued yet, then we
* need to wait with a timeout, in case this
* function needs to issue the lwb after the
* timeout is reached; responsibility (2) from
* the comment above this function.
*/
clock_t timeleft = cv_timedwait_hires(&zcw->zcw_cv,
&zcw->zcw_lock, wakeup, USEC2NSEC(1),
CALLOUT_FLAG_ABSOLUTE);
if (timeleft >= 0 || zcw->zcw_done)
continue;
timedout = B_TRUE;
zil_commit_waiter_timeout(zilog, zcw);
if (!zcw->zcw_done) {
/*
* If the commit waiter has already been
* marked "done", it's possible for the
* waiter's lwb structure to have already
* been freed. Thus, we can only reliably
* make these assertions if the waiter
* isn't done.
*/
ASSERT3P(lwb, ==, zcw->zcw_lwb);
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_OPENED);
}
} else {
/*
* If the lwb isn't open, then it must have already
* been issued. In that case, there's no need to
* use a timeout when waiting for the lwb to
* complete.
*
* Additionally, if the lwb is NULL, the waiter
* will soon be signaled and marked done via
* zil_clean() and zil_itxg_clean(), so no timeout
* is required.
*/
IMPLY(lwb != NULL,
lwb->lwb_state == LWB_STATE_ISSUED ||
lwb->lwb_state == LWB_STATE_DONE);
cv_wait(&zcw->zcw_cv, &zcw->zcw_lock);
}
}
mutex_exit(&zcw->zcw_lock);
}
static zil_commit_waiter_t *
zil_alloc_commit_waiter(void)
{
zil_commit_waiter_t *zcw = kmem_cache_alloc(zil_zcw_cache, KM_SLEEP);
cv_init(&zcw->zcw_cv, NULL, CV_DEFAULT, NULL);
mutex_init(&zcw->zcw_lock, NULL, MUTEX_DEFAULT, NULL);
list_link_init(&zcw->zcw_node);
zcw->zcw_lwb = NULL;
zcw->zcw_done = B_FALSE;
zcw->zcw_zio_error = 0;
return (zcw);
}
static void
zil_free_commit_waiter(zil_commit_waiter_t *zcw)
{
ASSERT(!list_link_active(&zcw->zcw_node));
ASSERT3P(zcw->zcw_lwb, ==, NULL);
ASSERT3B(zcw->zcw_done, ==, B_TRUE);
mutex_destroy(&zcw->zcw_lock);
cv_destroy(&zcw->zcw_cv);
kmem_cache_free(zil_zcw_cache, zcw);
}
/*
* This function is used to create a TX_COMMIT itx and assign it. This
* way, it will be linked into the ZIL's list of synchronous itxs, and
* then later committed to an lwb (or skipped) when
* zil_process_commit_list() is called.
*/
static void
zil_commit_itx_assign(zilog_t *zilog, zil_commit_waiter_t *zcw)
{
dmu_tx_t *tx = dmu_tx_create(zilog->zl_os);
VERIFY0(dmu_tx_assign(tx, TXG_WAIT));
itx_t *itx = zil_itx_create(TX_COMMIT, sizeof (lr_t));
itx->itx_sync = B_TRUE;
itx->itx_private = zcw;
zil_itx_assign(zilog, itx, tx);
dmu_tx_commit(tx);
}
/*
* Commit ZFS Intent Log transactions (itxs) to stable storage.
*
* When writing ZIL transactions to the on-disk representation of the
* ZIL, the itxs are committed to a Log Write Block (lwb). Multiple
* itxs can be committed to a single lwb. Once a lwb is written and
* committed to stable storage (i.e. the lwb is written, and vdevs have
* been flushed), each itx that was committed to that lwb is also
* considered to be committed to stable storage.
*
* When an itx is committed to an lwb, the log record (lr_t) contained
* by the itx is copied into the lwb's zio buffer, and once this buffer
* is written to disk, it becomes an on-disk ZIL block.
*
* As itxs are generated, they're inserted into the ZIL's queue of
* uncommitted itxs. The semantics of zil_commit() are such that it will
* block until all itxs that were in the queue when it was called, are
* committed to stable storage.
*
* If "foid" is zero, this means all "synchronous" and "asynchronous"
* itxs, for all objects in the dataset, will be committed to stable
* storage prior to zil_commit() returning. If "foid" is non-zero, all
* "synchronous" itxs for all objects, but only "asynchronous" itxs
* that correspond to the foid passed in, will be committed to stable
* storage prior to zil_commit() returning.
*
* Generally speaking, when zil_commit() is called, the consumer doesn't
* actually care about _all_ of the uncommitted itxs. Instead, they're
* simply trying to waiting for a specific itx to be committed to disk,
* but the interface(s) for interacting with the ZIL don't allow such
* fine-grained communication. A better interface would allow a consumer
* to create and assign an itx, and then pass a reference to this itx to
* zil_commit(); such that zil_commit() would return as soon as that
* specific itx was committed to disk (instead of waiting for _all_
* itxs to be committed).
*
* When a thread calls zil_commit() a special "commit itx" will be
* generated, along with a corresponding "waiter" for this commit itx.
* zil_commit() will wait on this waiter's CV, such that when the waiter
* is marked done, and signaled, zil_commit() will return.
*
* This commit itx is inserted into the queue of uncommitted itxs. This
* provides an easy mechanism for determining which itxs were in the
* queue prior to zil_commit() having been called, and which itxs were
* added after zil_commit() was called.
*
* The commit it is special; it doesn't have any on-disk representation.
* When a commit itx is "committed" to an lwb, the waiter associated
* with it is linked onto the lwb's list of waiters. Then, when that lwb
* completes, each waiter on the lwb's list is marked done and signaled
* -- allowing the thread waiting on the waiter to return from zil_commit().
*
* It's important to point out a few critical factors that allow us
* to make use of the commit itxs, commit waiters, per-lwb lists of
* commit waiters, and zio completion callbacks like we're doing:
*
* 1. The list of waiters for each lwb is traversed, and each commit
* waiter is marked "done" and signaled, in the zio completion
* callback of the lwb's zio[*].
*
* * Actually, the waiters are signaled in the zio completion
* callback of the root zio for the DKIOCFLUSHWRITECACHE commands
* that are sent to the vdevs upon completion of the lwb zio.
*
* 2. When the itxs are inserted into the ZIL's queue of uncommitted
* itxs, the order in which they are inserted is preserved[*]; as
* itxs are added to the queue, they are added to the tail of
* in-memory linked lists.
*
* When committing the itxs to lwbs (to be written to disk), they
* are committed in the same order in which the itxs were added to
* the uncommitted queue's linked list(s); i.e. the linked list of
* itxs to commit is traversed from head to tail, and each itx is
* committed to an lwb in that order.
*
* * To clarify:
*
* - the order of "sync" itxs is preserved w.r.t. other
* "sync" itxs, regardless of the corresponding objects.
* - the order of "async" itxs is preserved w.r.t. other
* "async" itxs corresponding to the same object.
* - the order of "async" itxs is *not* preserved w.r.t. other
* "async" itxs corresponding to different objects.
* - the order of "sync" itxs w.r.t. "async" itxs (or vice
* versa) is *not* preserved, even for itxs that correspond
* to the same object.
*
* For more details, see: zil_itx_assign(), zil_async_to_sync(),
* zil_get_commit_list(), and zil_process_commit_list().
*
* 3. The lwbs represent a linked list of blocks on disk. Thus, any
* lwb cannot be considered committed to stable storage, until its
* "previous" lwb is also committed to stable storage. This fact,
* coupled with the fact described above, means that itxs are
* committed in (roughly) the order in which they were generated.
* This is essential because itxs are dependent on prior itxs.
* Thus, we *must not* deem an itx as being committed to stable
* storage, until *all* prior itxs have also been committed to
* stable storage.
*
* To enforce this ordering of lwb zio's, while still leveraging as
* much of the underlying storage performance as possible, we rely
* on two fundamental concepts:
*
* 1. The creation and issuance of lwb zio's is protected by
* the zilog's "zl_issuer_lock", which ensures only a single
* thread is creating and/or issuing lwb's at a time
* 2. The "previous" lwb is a child of the "current" lwb
* (leveraging the zio parent-child dependency graph)
*
* By relying on this parent-child zio relationship, we can have
* many lwb zio's concurrently issued to the underlying storage,
* but the order in which they complete will be the same order in
* which they were created.
*/
void
zil_commit(zilog_t *zilog, uint64_t foid)
{
/*
* We should never attempt to call zil_commit on a snapshot for
* a couple of reasons:
*
* 1. A snapshot may never be modified, thus it cannot have any
* in-flight itxs that would have modified the dataset.
*
* 2. By design, when zil_commit() is called, a commit itx will
* be assigned to this zilog; as a result, the zilog will be
* dirtied. We must not dirty the zilog of a snapshot; there's
* checks in the code that enforce this invariant, and will
* cause a panic if it's not upheld.
*/
ASSERT3B(dmu_objset_is_snapshot(zilog->zl_os), ==, B_FALSE);
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return;
if (!spa_writeable(zilog->zl_spa)) {
/*
* If the SPA is not writable, there should never be any
* pending itxs waiting to be committed to disk. If that
* weren't true, we'd skip writing those itxs out, and
* would break the semantics of zil_commit(); thus, we're
* verifying that truth before we return to the caller.
*/
ASSERT(list_is_empty(&zilog->zl_lwb_list));
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
for (int i = 0; i < TXG_SIZE; i++)
ASSERT3P(zilog->zl_itxg[i].itxg_itxs, ==, NULL);
return;
}
/*
* If the ZIL is suspended, we don't want to dirty it by calling
* zil_commit_itx_assign() below, nor can we write out
* lwbs like would be done in zil_commit_write(). Thus, we
* simply rely on txg_wait_synced() to maintain the necessary
* semantics, and avoid calling those functions altogether.
*/
if (zilog->zl_suspend > 0) {
txg_wait_synced(zilog->zl_dmu_pool, 0);
return;
}
zil_commit_impl(zilog, foid);
}
void
zil_commit_impl(zilog_t *zilog, uint64_t foid)
{
ZIL_STAT_BUMP(zil_commit_count);
/*
* Move the "async" itxs for the specified foid to the "sync"
* queues, such that they will be later committed (or skipped)
* to an lwb when zil_process_commit_list() is called.
*
* Since these "async" itxs must be committed prior to this
* call to zil_commit returning, we must perform this operation
* before we call zil_commit_itx_assign().
*/
zil_async_to_sync(zilog, foid);
/*
* We allocate a new "waiter" structure which will initially be
* linked to the commit itx using the itx's "itx_private" field.
* Since the commit itx doesn't represent any on-disk state,
* when it's committed to an lwb, rather than copying the its
* lr_t into the lwb's buffer, the commit itx's "waiter" will be
* added to the lwb's list of waiters. Then, when the lwb is
* committed to stable storage, each waiter in the lwb's list of
* waiters will be marked "done", and signalled.
*
* We must create the waiter and assign the commit itx prior to
* calling zil_commit_writer(), or else our specific commit itx
* is not guaranteed to be committed to an lwb prior to calling
* zil_commit_waiter().
*/
zil_commit_waiter_t *zcw = zil_alloc_commit_waiter();
zil_commit_itx_assign(zilog, zcw);
zil_commit_writer(zilog, zcw);
zil_commit_waiter(zilog, zcw);
if (zcw->zcw_zio_error != 0) {
/*
* If there was an error writing out the ZIL blocks that
* this thread is waiting on, then we fallback to
* relying on spa_sync() to write out the data this
* thread is waiting on. Obviously this has performance
* implications, but the expectation is for this to be
* an exceptional case, and shouldn't occur often.
*/
DTRACE_PROBE2(zil__commit__io__error,
zilog_t *, zilog, zil_commit_waiter_t *, zcw);
txg_wait_synced(zilog->zl_dmu_pool, 0);
}
zil_free_commit_waiter(zcw);
}
/*
* Called in syncing context to free committed log blocks and update log header.
*/
void
zil_sync(zilog_t *zilog, dmu_tx_t *tx)
{
zil_header_t *zh = zil_header_in_syncing_context(zilog);
uint64_t txg = dmu_tx_get_txg(tx);
spa_t *spa = zilog->zl_spa;
uint64_t *replayed_seq = &zilog->zl_replayed_seq[txg & TXG_MASK];
lwb_t *lwb;
/*
* We don't zero out zl_destroy_txg, so make sure we don't try
* to destroy it twice.
*/
if (spa_sync_pass(spa) != 1)
return;
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_stop_sync == 0);
if (*replayed_seq != 0) {
ASSERT(zh->zh_replay_seq < *replayed_seq);
zh->zh_replay_seq = *replayed_seq;
*replayed_seq = 0;
}
if (zilog->zl_destroy_txg == txg) {
blkptr_t blk = zh->zh_log;
ASSERT(list_head(&zilog->zl_lwb_list) == NULL);
bzero(zh, sizeof (zil_header_t));
bzero(zilog->zl_replayed_seq, sizeof (zilog->zl_replayed_seq));
if (zilog->zl_keep_first) {
/*
* If this block was part of log chain that couldn't
* be claimed because a device was missing during
* zil_claim(), but that device later returns,
* then this block could erroneously appear valid.
* To guard against this, assign a new GUID to the new
* log chain so it doesn't matter what blk points to.
*/
zil_init_log_chain(zilog, &blk);
zh->zh_log = blk;
}
}
while ((lwb = list_head(&zilog->zl_lwb_list)) != NULL) {
zh->zh_log = lwb->lwb_blk;
if (lwb->lwb_buf != NULL || lwb->lwb_max_txg > txg)
break;
list_remove(&zilog->zl_lwb_list, lwb);
zio_free(spa, txg, &lwb->lwb_blk);
zil_free_lwb(zilog, lwb);
/*
* If we don't have anything left in the lwb list then
* we've had an allocation failure and we need to zero
* out the zil_header blkptr so that we don't end
* up freeing the same block twice.
*/
if (list_head(&zilog->zl_lwb_list) == NULL)
BP_ZERO(&zh->zh_log);
}
/*
* Remove fastwrite on any blocks that have been pre-allocated for
* the next commit. This prevents fastwrite counter pollution by
* unused, long-lived LWBs.
*/
for (; lwb != NULL; lwb = list_next(&zilog->zl_lwb_list, lwb)) {
if (lwb->lwb_fastwrite && !lwb->lwb_write_zio) {
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
lwb->lwb_fastwrite = 0;
}
}
mutex_exit(&zilog->zl_lock);
}
/* ARGSUSED */
static int
zil_lwb_cons(void *vbuf, void *unused, int kmflag)
{
lwb_t *lwb = vbuf;
list_create(&lwb->lwb_itxs, sizeof (itx_t), offsetof(itx_t, itx_node));
list_create(&lwb->lwb_waiters, sizeof (zil_commit_waiter_t),
offsetof(zil_commit_waiter_t, zcw_node));
avl_create(&lwb->lwb_vdev_tree, zil_lwb_vdev_compare,
sizeof (zil_vdev_node_t), offsetof(zil_vdev_node_t, zv_node));
mutex_init(&lwb->lwb_vdev_lock, NULL, MUTEX_DEFAULT, NULL);
return (0);
}
/* ARGSUSED */
static void
zil_lwb_dest(void *vbuf, void *unused)
{
lwb_t *lwb = vbuf;
mutex_destroy(&lwb->lwb_vdev_lock);
avl_destroy(&lwb->lwb_vdev_tree);
list_destroy(&lwb->lwb_waiters);
list_destroy(&lwb->lwb_itxs);
}
void
zil_init(void)
{
zil_lwb_cache = kmem_cache_create("zil_lwb_cache",
sizeof (lwb_t), 0, zil_lwb_cons, zil_lwb_dest, NULL, NULL, NULL, 0);
zil_zcw_cache = kmem_cache_create("zil_zcw_cache",
sizeof (zil_commit_waiter_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zil_ksp = kstat_create("zfs", 0, "zil", "misc",
KSTAT_TYPE_NAMED, sizeof (zil_stats) / sizeof (kstat_named_t),
KSTAT_FLAG_VIRTUAL);
if (zil_ksp != NULL) {
zil_ksp->ks_data = &zil_stats;
kstat_install(zil_ksp);
}
}
void
zil_fini(void)
{
kmem_cache_destroy(zil_zcw_cache);
kmem_cache_destroy(zil_lwb_cache);
if (zil_ksp != NULL) {
kstat_delete(zil_ksp);
zil_ksp = NULL;
}
}
void
zil_set_sync(zilog_t *zilog, uint64_t sync)
{
zilog->zl_sync = sync;
}
void
zil_set_logbias(zilog_t *zilog, uint64_t logbias)
{
zilog->zl_logbias = logbias;
}
zilog_t *
zil_alloc(objset_t *os, zil_header_t *zh_phys)
{
zilog_t *zilog;
zilog = kmem_zalloc(sizeof (zilog_t), KM_SLEEP);
zilog->zl_header = zh_phys;
zilog->zl_os = os;
zilog->zl_spa = dmu_objset_spa(os);
zilog->zl_dmu_pool = dmu_objset_pool(os);
zilog->zl_destroy_txg = TXG_INITIAL - 1;
zilog->zl_logbias = dmu_objset_logbias(os);
zilog->zl_sync = dmu_objset_syncprop(os);
zilog->zl_dirty_max_txg = 0;
zilog->zl_last_lwb_opened = NULL;
zilog->zl_last_lwb_latency = 0;
mutex_init(&zilog->zl_lock, NULL, MUTEX_DEFAULT, NULL);
mutex_init(&zilog->zl_issuer_lock, NULL, MUTEX_DEFAULT, NULL);
for (int i = 0; i < TXG_SIZE; i++) {
mutex_init(&zilog->zl_itxg[i].itxg_lock, NULL,
MUTEX_DEFAULT, NULL);
}
list_create(&zilog->zl_lwb_list, sizeof (lwb_t),
offsetof(lwb_t, lwb_node));
list_create(&zilog->zl_itx_commit_list, sizeof (itx_t),
offsetof(itx_t, itx_node));
cv_init(&zilog->zl_cv_suspend, NULL, CV_DEFAULT, NULL);
return (zilog);
}
void
zil_free(zilog_t *zilog)
{
int i;
zilog->zl_stop_sync = 1;
ASSERT0(zilog->zl_suspend);
ASSERT0(zilog->zl_suspending);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
list_destroy(&zilog->zl_lwb_list);
ASSERT(list_is_empty(&zilog->zl_itx_commit_list));
list_destroy(&zilog->zl_itx_commit_list);
for (i = 0; i < TXG_SIZE; i++) {
/*
* It's possible for an itx to be generated that doesn't dirty
* a txg (e.g. ztest TX_TRUNCATE). So there's no zil_clean()
* callback to remove the entry. We remove those here.
*
* Also free up the ziltest itxs.
*/
if (zilog->zl_itxg[i].itxg_itxs)
zil_itxg_clean(zilog->zl_itxg[i].itxg_itxs);
mutex_destroy(&zilog->zl_itxg[i].itxg_lock);
}
mutex_destroy(&zilog->zl_issuer_lock);
mutex_destroy(&zilog->zl_lock);
cv_destroy(&zilog->zl_cv_suspend);
kmem_free(zilog, sizeof (zilog_t));
}
/*
* Open an intent log.
*/
zilog_t *
zil_open(objset_t *os, zil_get_data_t *get_data)
{
zilog_t *zilog = dmu_objset_zil(os);
ASSERT3P(zilog->zl_get_data, ==, NULL);
ASSERT3P(zilog->zl_last_lwb_opened, ==, NULL);
ASSERT(list_is_empty(&zilog->zl_lwb_list));
zilog->zl_get_data = get_data;
return (zilog);
}
/*
* Close an intent log.
*/
void
zil_close(zilog_t *zilog)
{
lwb_t *lwb;
uint64_t txg;
if (!dmu_objset_is_snapshot(zilog->zl_os)) {
zil_commit(zilog, 0);
} else {
ASSERT3P(list_tail(&zilog->zl_lwb_list), ==, NULL);
ASSERT0(zilog->zl_dirty_max_txg);
ASSERT3B(zilog_is_dirty(zilog), ==, B_FALSE);
}
mutex_enter(&zilog->zl_lock);
lwb = list_tail(&zilog->zl_lwb_list);
if (lwb == NULL)
txg = zilog->zl_dirty_max_txg;
else
txg = MAX(zilog->zl_dirty_max_txg, lwb->lwb_max_txg);
mutex_exit(&zilog->zl_lock);
/*
* We need to use txg_wait_synced() to wait long enough for the
* ZIL to be clean, and to wait for all pending lwbs to be
* written out.
*/
if (txg != 0)
txg_wait_synced(zilog->zl_dmu_pool, txg);
if (zilog_is_dirty(zilog))
zfs_dbgmsg("zil (%p) is dirty, txg %llu", zilog, txg);
if (txg < spa_freeze_txg(zilog->zl_spa))
VERIFY(!zilog_is_dirty(zilog));
zilog->zl_get_data = NULL;
/*
* We should have only one lwb left on the list; remove it now.
*/
mutex_enter(&zilog->zl_lock);
lwb = list_head(&zilog->zl_lwb_list);
if (lwb != NULL) {
ASSERT3P(lwb, ==, list_tail(&zilog->zl_lwb_list));
ASSERT3S(lwb->lwb_state, !=, LWB_STATE_ISSUED);
if (lwb->lwb_fastwrite)
metaslab_fastwrite_unmark(zilog->zl_spa, &lwb->lwb_blk);
list_remove(&zilog->zl_lwb_list, lwb);
zio_buf_free(lwb->lwb_buf, lwb->lwb_sz);
zil_free_lwb(zilog, lwb);
}
mutex_exit(&zilog->zl_lock);
}
static char *suspend_tag = "zil suspending";
/*
* Suspend an intent log. While in suspended mode, we still honor
* synchronous semantics, but we rely on txg_wait_synced() to do it.
* On old version pools, we suspend the log briefly when taking a
* snapshot so that it will have an empty intent log.
*
* Long holds are not really intended to be used the way we do here --
* held for such a short time. A concurrent caller of dsl_dataset_long_held()
* could fail. Therefore we take pains to only put a long hold if it is
* actually necessary. Fortunately, it will only be necessary if the
* objset is currently mounted (or the ZVOL equivalent). In that case it
* will already have a long hold, so we are not really making things any worse.
*
* Ideally, we would locate the existing long-holder (i.e. the zfsvfs_t or
* zvol_state_t), and use their mechanism to prevent their hold from being
* dropped (e.g. VFS_HOLD()). However, that would be even more pain for
* very little gain.
*
* if cookiep == NULL, this does both the suspend & resume.
* Otherwise, it returns with the dataset "long held", and the cookie
* should be passed into zil_resume().
*/
int
zil_suspend(const char *osname, void **cookiep)
{
objset_t *os;
zilog_t *zilog;
const zil_header_t *zh;
int error;
error = dmu_objset_hold(osname, suspend_tag, &os);
if (error != 0)
return (error);
zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
zh = zilog->zl_header;
if (zh->zh_flags & ZIL_REPLAY_NEEDED) { /* unplayed log */
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (SET_ERROR(EBUSY));
}
/*
* Don't put a long hold in the cases where we can avoid it. This
* is when there is no cookie so we are doing a suspend & resume
* (i.e. called from zil_vdev_offline()), and there's nothing to do
* for the suspend because it's already suspended, or there's no ZIL.
*/
if (cookiep == NULL && !zilog->zl_suspending &&
(zilog->zl_suspend > 0 || BP_IS_HOLE(&zh->zh_log))) {
mutex_exit(&zilog->zl_lock);
dmu_objset_rele(os, suspend_tag);
return (0);
}
dsl_dataset_long_hold(dmu_objset_ds(os), suspend_tag);
dsl_pool_rele(dmu_objset_pool(os), suspend_tag);
zilog->zl_suspend++;
if (zilog->zl_suspend > 1) {
/*
* Someone else is already suspending it.
* Just wait for them to finish.
*/
while (zilog->zl_suspending)
cv_wait(&zilog->zl_cv_suspend, &zilog->zl_lock);
mutex_exit(&zilog->zl_lock);
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
/*
* If there is no pointer to an on-disk block, this ZIL must not
* be active (e.g. filesystem not mounted), so there's nothing
* to clean up.
*/
if (BP_IS_HOLE(&zh->zh_log)) {
ASSERT(cookiep != NULL); /* fast path already handled */
*cookiep = os;
mutex_exit(&zilog->zl_lock);
return (0);
}
/*
* The ZIL has work to do. Ensure that the associated encryption
* key will remain mapped while we are committing the log by
* grabbing a reference to it. If the key isn't loaded we have no
* choice but to return an error until the wrapping key is loaded.
*/
if (os->os_encrypted && spa_keystore_create_mapping(os->os_spa,
dmu_objset_ds(os), FTAG) != 0) {
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
return (SET_ERROR(EBUSY));
}
zilog->zl_suspending = B_TRUE;
mutex_exit(&zilog->zl_lock);
/*
* We need to use zil_commit_impl to ensure we wait for all
* LWB_STATE_OPENED and LWB_STATE_ISSUED lwbs to be committed
* to disk before proceeding. If we used zil_commit instead, it
* would just call txg_wait_synced(), because zl_suspend is set.
* txg_wait_synced() doesn't wait for these lwb's to be
* LWB_STATE_DONE before returning.
*/
zil_commit_impl(zilog, 0);
/*
* Now that we've ensured all lwb's are LWB_STATE_DONE, we use
* txg_wait_synced() to ensure the data from the zilog has
* migrated to the main pool before calling zil_destroy().
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zil_destroy(zilog, B_FALSE);
mutex_enter(&zilog->zl_lock);
zilog->zl_suspending = B_FALSE;
cv_broadcast(&zilog->zl_cv_suspend);
mutex_exit(&zilog->zl_lock);
if (os->os_encrypted) {
/*
* Encrypted datasets need to wait for all data to be
* synced out before removing the mapping.
*
* XXX: Depending on the number of datasets with
* outstanding ZIL data on a given log device, this
* might cause spa_offline_log() to take a long time.
*/
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
VERIFY0(spa_keystore_remove_mapping(os->os_spa,
dmu_objset_id(os), FTAG));
}
if (cookiep == NULL)
zil_resume(os);
else
*cookiep = os;
return (0);
}
void
zil_resume(void *cookie)
{
objset_t *os = cookie;
zilog_t *zilog = dmu_objset_zil(os);
mutex_enter(&zilog->zl_lock);
ASSERT(zilog->zl_suspend != 0);
zilog->zl_suspend--;
mutex_exit(&zilog->zl_lock);
dsl_dataset_long_rele(dmu_objset_ds(os), suspend_tag);
dsl_dataset_rele(dmu_objset_ds(os), suspend_tag);
}
typedef struct zil_replay_arg {
zil_replay_func_t **zr_replay;
void *zr_arg;
boolean_t zr_byteswap;
char *zr_lr;
} zil_replay_arg_t;
static int
zil_replay_error(zilog_t *zilog, lr_t *lr, int error)
{
char name[ZFS_MAX_DATASET_NAME_LEN];
zilog->zl_replaying_seq--; /* didn't actually replay this one */
dmu_objset_name(zilog->zl_os, name);
cmn_err(CE_WARN, "ZFS replay transaction error %d, "
"dataset %s, seq 0x%llx, txtype %llu %s\n", error, name,
(u_longlong_t)lr->lrc_seq,
(u_longlong_t)(lr->lrc_txtype & ~TX_CI),
(lr->lrc_txtype & TX_CI) ? "CI" : "");
return (error);
}
static int
zil_replay_log_record(zilog_t *zilog, lr_t *lr, void *zra, uint64_t claim_txg)
{
zil_replay_arg_t *zr = zra;
const zil_header_t *zh = zilog->zl_header;
uint64_t reclen = lr->lrc_reclen;
uint64_t txtype = lr->lrc_txtype;
int error = 0;
zilog->zl_replaying_seq = lr->lrc_seq;
if (lr->lrc_seq <= zh->zh_replay_seq) /* already replayed */
return (0);
if (lr->lrc_txg < claim_txg) /* already committed */
return (0);
/* Strip case-insensitive bit, still present in log record */
txtype &= ~TX_CI;
if (txtype == 0 || txtype >= TX_MAX_TYPE)
return (zil_replay_error(zilog, lr, EINVAL));
/*
* If this record type can be logged out of order, the object
* (lr_foid) may no longer exist. That's legitimate, not an error.
*/
if (TX_OOO(txtype)) {
error = dmu_object_info(zilog->zl_os,
LR_FOID_GET_OBJ(((lr_ooo_t *)lr)->lr_foid), NULL);
if (error == ENOENT || error == EEXIST)
return (0);
}
/*
* Make a copy of the data so we can revise and extend it.
*/
bcopy(lr, zr->zr_lr, reclen);
/*
* If this is a TX_WRITE with a blkptr, suck in the data.
*/
if (txtype == TX_WRITE && reclen == sizeof (lr_write_t)) {
error = zil_read_log_data(zilog, (lr_write_t *)lr,
zr->zr_lr + reclen);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
/*
* The log block containing this lr may have been byteswapped
* so that we can easily examine common fields like lrc_txtype.
* However, the log is a mix of different record types, and only the
* replay vectors know how to byteswap their records. Therefore, if
* the lr was byteswapped, undo it before invoking the replay vector.
*/
if (zr->zr_byteswap)
byteswap_uint64_array(zr->zr_lr, reclen);
/*
* We must now do two things atomically: replay this log record,
* and update the log header sequence number to reflect the fact that
* we did so. At the end of each replay function the sequence number
* is updated if we are in replay mode.
*/
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, zr->zr_byteswap);
if (error != 0) {
/*
* The DMU's dnode layer doesn't see removes until the txg
* commits, so a subsequent claim can spuriously fail with
* EEXIST. So if we receive any error we try syncing out
* any removes then retry the transaction. Note that we
* specify B_FALSE for byteswap now, so we don't do it twice.
*/
txg_wait_synced(spa_get_dsl(zilog->zl_spa), 0);
error = zr->zr_replay[txtype](zr->zr_arg, zr->zr_lr, B_FALSE);
if (error != 0)
return (zil_replay_error(zilog, lr, error));
}
return (0);
}
/* ARGSUSED */
static int
zil_incr_blks(zilog_t *zilog, blkptr_t *bp, void *arg, uint64_t claim_txg)
{
zilog->zl_replay_blks++;
return (0);
}
/*
* If this dataset has a non-empty intent log, replay it and destroy it.
*/
void
zil_replay(objset_t *os, void *arg, zil_replay_func_t *replay_func[TX_MAX_TYPE])
{
zilog_t *zilog = dmu_objset_zil(os);
const zil_header_t *zh = zilog->zl_header;
zil_replay_arg_t zr;
if ((zh->zh_flags & ZIL_REPLAY_NEEDED) == 0) {
zil_destroy(zilog, B_TRUE);
return;
}
zr.zr_replay = replay_func;
zr.zr_arg = arg;
zr.zr_byteswap = BP_SHOULD_BYTESWAP(&zh->zh_log);
zr.zr_lr = vmem_alloc(2 * SPA_MAXBLOCKSIZE, KM_SLEEP);
/*
* Wait for in-progress removes to sync before starting replay.
*/
txg_wait_synced(zilog->zl_dmu_pool, 0);
zilog->zl_replay = B_TRUE;
zilog->zl_replay_time = ddi_get_lbolt();
ASSERT(zilog->zl_replay_blks == 0);
(void) zil_parse(zilog, zil_incr_blks, zil_replay_log_record, &zr,
zh->zh_claim_txg, B_TRUE);
vmem_free(zr.zr_lr, 2 * SPA_MAXBLOCKSIZE);
zil_destroy(zilog, B_FALSE);
txg_wait_synced(zilog->zl_dmu_pool, zilog->zl_destroy_txg);
zilog->zl_replay = B_FALSE;
}
boolean_t
zil_replaying(zilog_t *zilog, dmu_tx_t *tx)
{
if (zilog->zl_sync == ZFS_SYNC_DISABLED)
return (B_TRUE);
if (zilog->zl_replay) {
dsl_dataset_dirty(dmu_objset_ds(zilog->zl_os), tx);
zilog->zl_replayed_seq[dmu_tx_get_txg(tx) & TXG_MASK] =
zilog->zl_replaying_seq;
return (B_TRUE);
}
return (B_FALSE);
}
/* ARGSUSED */
int
-zil_vdev_offline(const char *osname, void *arg)
+zil_reset(const char *osname, void *arg)
{
int error;
error = zil_suspend(osname, NULL);
if (error != 0)
return (SET_ERROR(EEXIST));
return (0);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(zil_alloc);
EXPORT_SYMBOL(zil_free);
EXPORT_SYMBOL(zil_open);
EXPORT_SYMBOL(zil_close);
EXPORT_SYMBOL(zil_replay);
EXPORT_SYMBOL(zil_replaying);
EXPORT_SYMBOL(zil_destroy);
EXPORT_SYMBOL(zil_destroy_sync);
EXPORT_SYMBOL(zil_itx_create);
EXPORT_SYMBOL(zil_itx_destroy);
EXPORT_SYMBOL(zil_itx_assign);
EXPORT_SYMBOL(zil_commit);
-EXPORT_SYMBOL(zil_vdev_offline);
EXPORT_SYMBOL(zil_claim);
EXPORT_SYMBOL(zil_check_log_chain);
EXPORT_SYMBOL(zil_sync);
EXPORT_SYMBOL(zil_clean);
EXPORT_SYMBOL(zil_suspend);
EXPORT_SYMBOL(zil_resume);
EXPORT_SYMBOL(zil_lwb_add_block);
EXPORT_SYMBOL(zil_bp_tree_add);
EXPORT_SYMBOL(zil_set_sync);
EXPORT_SYMBOL(zil_set_logbias);
/* BEGIN CSTYLED */
module_param(zfs_commit_timeout_pct, int, 0644);
MODULE_PARM_DESC(zfs_commit_timeout_pct, "ZIL block open timeout percentage");
module_param(zil_replay_disable, int, 0644);
MODULE_PARM_DESC(zil_replay_disable, "Disable intent logging replay");
module_param(zfs_nocacheflush, int, 0644);
MODULE_PARM_DESC(zfs_nocacheflush, "Disable cache flushes");
module_param(zil_slog_bulk, ulong, 0644);
MODULE_PARM_DESC(zil_slog_bulk, "Limit in bytes slog sync writes per commit");
/* END CSTYLED */
#endif
diff --git a/module/zfs/zio.c b/module/zfs/zio.c
index c6379bfd4c4b..5259a0c6f01d 100644
--- a/module/zfs/zio.c
+++ b/module/zfs/zio.c
@@ -1,4647 +1,4683 @@
/*
* CDDL HEADER START
*
* The contents of this file are subject to the terms of the
* Common Development and Distribution License (the "License").
* You may not use this file except in compliance with the License.
*
* You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
* or http://www.opensolaris.org/os/licensing.
* See the License for the specific language governing permissions
* and limitations under the License.
*
* When distributing Covered Code, include this CDDL HEADER in each
* file and include the License file at usr/src/OPENSOLARIS.LICENSE.
* If applicable, add the following below this CDDL HEADER, with the
* fields enclosed by brackets "[]" replaced with your own identifying
* information: Portions Copyright [yyyy] [name of copyright owner]
*
* CDDL HEADER END
*/
/*
* Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved.
* Copyright (c) 2011, 2017 by Delphix. All rights reserved.
* Copyright (c) 2011 Nexenta Systems, Inc. All rights reserved.
*/
#include <sys/sysmacros.h>
#include <sys/zfs_context.h>
#include <sys/fm/fs/zfs.h>
#include <sys/spa.h>
#include <sys/txg.h>
#include <sys/spa_impl.h>
#include <sys/vdev_impl.h>
#include <sys/zio_impl.h>
#include <sys/zio_compress.h>
#include <sys/zio_checksum.h>
#include <sys/dmu_objset.h>
#include <sys/arc.h>
#include <sys/ddt.h>
#include <sys/blkptr.h>
#include <sys/zfeature.h>
#include <sys/dsl_scan.h>
#include <sys/metaslab_impl.h>
#include <sys/time.h>
#include <sys/trace_zio.h>
#include <sys/abd.h>
#include <sys/dsl_crypt.h>
/*
* ==========================================================================
* I/O type descriptions
* ==========================================================================
*/
const char *zio_type_name[ZIO_TYPES] = {
/*
* Note: Linux kernel thread name length is limited
* so these names will differ from upstream open zfs.
*/
"z_null", "z_rd", "z_wr", "z_fr", "z_cl", "z_ioctl"
};
int zio_dva_throttle_enabled = B_TRUE;
/*
* ==========================================================================
* I/O kmem caches
* ==========================================================================
*/
kmem_cache_t *zio_cache;
kmem_cache_t *zio_link_cache;
kmem_cache_t *zio_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
kmem_cache_t *zio_data_buf_cache[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
uint64_t zio_buf_cache_allocs[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
uint64_t zio_buf_cache_frees[SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT];
#endif
int zio_delay_max = ZIO_DELAY_MAX;
#define ZIO_PIPELINE_CONTINUE 0x100
#define ZIO_PIPELINE_STOP 0x101
#define BP_SPANB(indblkshift, level) \
(((uint64_t)1) << ((level) * ((indblkshift) - SPA_BLKPTRSHIFT)))
#define COMPARE_META_LEVEL 0x80000000ul
/*
* The following actions directly effect the spa's sync-to-convergence logic.
* The values below define the sync pass when we start performing the action.
* Care should be taken when changing these values as they directly impact
* spa_sync() performance. Tuning these values may introduce subtle performance
* pathologies and should only be done in the context of performance analysis.
* These tunables will eventually be removed and replaced with #defines once
* enough analysis has been done to determine optimal values.
*
* The 'zfs_sync_pass_deferred_free' pass must be greater than 1 to ensure that
* regular blocks are not deferred.
*/
int zfs_sync_pass_deferred_free = 2; /* defer frees starting in this pass */
int zfs_sync_pass_dont_compress = 5; /* don't compress starting in this pass */
int zfs_sync_pass_rewrite = 2; /* rewrite new bps starting in this pass */
/*
* An allocating zio is one that either currently has the DVA allocate
* stage set or will have it later in its lifetime.
*/
#define IO_IS_ALLOCATING(zio) ((zio)->io_orig_pipeline & ZIO_STAGE_DVA_ALLOCATE)
int zio_requeue_io_start_cut_in_line = 1;
#ifdef ZFS_DEBUG
int zio_buf_debug_limit = 16384;
#else
int zio_buf_debug_limit = 0;
#endif
static inline void __zio_execute(zio_t *zio);
static void zio_taskq_dispatch(zio_t *, zio_taskq_type_t, boolean_t);
void
zio_init(void)
{
size_t c;
vmem_t *data_alloc_arena = NULL;
zio_cache = kmem_cache_create("zio_cache",
sizeof (zio_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
zio_link_cache = kmem_cache_create("zio_link_cache",
sizeof (zio_link_t), 0, NULL, NULL, NULL, NULL, NULL, 0);
/*
* For small buffers, we want a cache for each multiple of
* SPA_MINBLOCKSIZE. For larger buffers, we want a cache
* for each quarter-power of 2.
*/
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
size_t size = (c + 1) << SPA_MINBLOCKSHIFT;
size_t p2 = size;
size_t align = 0;
size_t cflags = (size > zio_buf_debug_limit) ? KMC_NODEBUG : 0;
#if defined(_ILP32) && defined(_KERNEL)
/*
* Cache size limited to 1M on 32-bit platforms until ARC
* buffers no longer require virtual address space.
*/
if (size > zfs_max_recordsize)
break;
#endif
while (!ISP2(p2))
p2 &= p2 - 1;
#ifndef _KERNEL
/*
* If we are using watchpoints, put each buffer on its own page,
* to eliminate the performance overhead of trapping to the
* kernel when modifying a non-watched buffer that shares the
* page with a watched buffer.
*/
if (arc_watch && !IS_P2ALIGNED(size, PAGESIZE))
continue;
/*
* Here's the problem - on 4K native devices in userland on
* Linux using O_DIRECT, buffers must be 4K aligned or I/O
* will fail with EINVAL, causing zdb (and others) to coredump.
* Since userland probably doesn't need optimized buffer caches,
* we just force 4K alignment on everything.
*/
align = 8 * SPA_MINBLOCKSIZE;
#else
if (size < PAGESIZE) {
align = SPA_MINBLOCKSIZE;
} else if (IS_P2ALIGNED(size, p2 >> 2)) {
align = PAGESIZE;
}
#endif
if (align != 0) {
char name[36];
(void) sprintf(name, "zio_buf_%lu", (ulong_t)size);
zio_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL, NULL, cflags);
(void) sprintf(name, "zio_data_buf_%lu", (ulong_t)size);
zio_data_buf_cache[c] = kmem_cache_create(name, size,
align, NULL, NULL, NULL, NULL,
data_alloc_arena, cflags);
}
}
while (--c != 0) {
ASSERT(zio_buf_cache[c] != NULL);
if (zio_buf_cache[c - 1] == NULL)
zio_buf_cache[c - 1] = zio_buf_cache[c];
ASSERT(zio_data_buf_cache[c] != NULL);
if (zio_data_buf_cache[c - 1] == NULL)
zio_data_buf_cache[c - 1] = zio_data_buf_cache[c];
}
zio_inject_init();
lz4_init();
}
void
zio_fini(void)
{
size_t c;
kmem_cache_t *last_cache = NULL;
kmem_cache_t *last_data_cache = NULL;
for (c = 0; c < SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT; c++) {
#ifdef _ILP32
/*
* Cache size limited to 1M on 32-bit platforms until ARC
* buffers no longer require virtual address space.
*/
if (((c + 1) << SPA_MINBLOCKSHIFT) > zfs_max_recordsize)
break;
#endif
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
if (zio_buf_cache_allocs[c] != zio_buf_cache_frees[c])
(void) printf("zio_fini: [%d] %llu != %llu\n",
(int)((c + 1) << SPA_MINBLOCKSHIFT),
(long long unsigned)zio_buf_cache_allocs[c],
(long long unsigned)zio_buf_cache_frees[c]);
#endif
if (zio_buf_cache[c] != last_cache) {
last_cache = zio_buf_cache[c];
kmem_cache_destroy(zio_buf_cache[c]);
}
zio_buf_cache[c] = NULL;
if (zio_data_buf_cache[c] != last_data_cache) {
last_data_cache = zio_data_buf_cache[c];
kmem_cache_destroy(zio_data_buf_cache[c]);
}
zio_data_buf_cache[c] = NULL;
}
kmem_cache_destroy(zio_link_cache);
kmem_cache_destroy(zio_cache);
zio_inject_fini();
lz4_fini();
}
/*
* ==========================================================================
* Allocate and free I/O buffers
* ==========================================================================
*/
/*
* Use zio_buf_alloc to allocate ZFS metadata. This data will appear in a
* crashdump if the kernel panics, so use it judiciously. Obviously, it's
* useful to inspect ZFS metadata, but if possible, we should avoid keeping
* excess / transient data in-core during a crashdump.
*/
void *
zio_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_allocs[c], 1);
#endif
return (kmem_cache_alloc(zio_buf_cache[c], KM_PUSHPAGE));
}
/*
* Use zio_data_buf_alloc to allocate data. The data will not appear in a
* crashdump if the kernel panics. This exists so that we will limit the amount
* of ZFS data that shows up in a kernel crashdump. (Thus reducing the amount
* of kernel heap dumped to disk when the kernel panics)
*/
void *
zio_data_buf_alloc(size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
return (kmem_cache_alloc(zio_data_buf_cache[c], KM_PUSHPAGE));
}
void
zio_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
#if defined(ZFS_DEBUG) && !defined(_KERNEL)
atomic_add_64(&zio_buf_cache_frees[c], 1);
#endif
kmem_cache_free(zio_buf_cache[c], buf);
}
void
zio_data_buf_free(void *buf, size_t size)
{
size_t c = (size - 1) >> SPA_MINBLOCKSHIFT;
VERIFY3U(c, <, SPA_MAXBLOCKSIZE >> SPA_MINBLOCKSHIFT);
kmem_cache_free(zio_data_buf_cache[c], buf);
}
static void
zio_abd_free(void *abd, size_t size)
{
abd_free((abd_t *)abd);
}
/*
* ==========================================================================
* Push and pop I/O transform buffers
* ==========================================================================
*/
void
zio_push_transform(zio_t *zio, abd_t *data, uint64_t size, uint64_t bufsize,
zio_transform_func_t *transform)
{
zio_transform_t *zt = kmem_alloc(sizeof (zio_transform_t), KM_SLEEP);
/*
* Ensure that anyone expecting this zio to contain a linear ABD isn't
* going to get a nasty surprise when they try to access the data.
*/
IMPLY(abd_is_linear(zio->io_abd), abd_is_linear(data));
zt->zt_orig_abd = zio->io_abd;
zt->zt_orig_size = zio->io_size;
zt->zt_bufsize = bufsize;
zt->zt_transform = transform;
zt->zt_next = zio->io_transform_stack;
zio->io_transform_stack = zt;
zio->io_abd = data;
zio->io_size = size;
}
void
zio_pop_transforms(zio_t *zio)
{
zio_transform_t *zt;
while ((zt = zio->io_transform_stack) != NULL) {
if (zt->zt_transform != NULL)
zt->zt_transform(zio,
zt->zt_orig_abd, zt->zt_orig_size);
if (zt->zt_bufsize != 0)
abd_free(zio->io_abd);
zio->io_abd = zt->zt_orig_abd;
zio->io_size = zt->zt_orig_size;
zio->io_transform_stack = zt->zt_next;
kmem_free(zt, sizeof (zio_transform_t));
}
}
/*
* ==========================================================================
* I/O transform callbacks for subblocks, decompression, and decryption
* ==========================================================================
*/
static void
zio_subblock(zio_t *zio, abd_t *data, uint64_t size)
{
ASSERT(zio->io_size > size);
if (zio->io_type == ZIO_TYPE_READ)
abd_copy(data, zio->io_abd, size);
}
static void
zio_decompress(zio_t *zio, abd_t *data, uint64_t size)
{
if (zio->io_error == 0) {
void *tmp = abd_borrow_buf(data, size);
int ret = zio_decompress_data(BP_GET_COMPRESS(zio->io_bp),
zio->io_abd, tmp, zio->io_size, size);
abd_return_buf_copy(data, tmp, size);
if (ret != 0)
zio->io_error = SET_ERROR(EIO);
}
}
static void
zio_decrypt(zio_t *zio, abd_t *data, uint64_t size)
{
int ret;
void *tmp;
blkptr_t *bp = zio->io_bp;
spa_t *spa = zio->io_spa;
uint64_t dsobj = zio->io_bookmark.zb_objset;
uint64_t lsize = BP_GET_LSIZE(bp);
dmu_object_type_t ot = BP_GET_TYPE(bp);
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
ASSERT(BP_USES_CRYPT(bp));
ASSERT3U(size, !=, 0);
if (zio->io_error != 0)
return;
/*
* Verify the cksum of MACs stored in an indirect bp. It will always
* be possible to verify this since it does not require an encryption
* key.
*/
if (BP_HAS_INDIRECT_MAC_CKSUM(bp)) {
zio_crypt_decode_mac_bp(bp, mac);
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF) {
/*
* We haven't decompressed the data yet, but
* zio_crypt_do_indirect_mac_checksum() requires
* decompressed data to be able to parse out the MACs
* from the indirect block. We decompress it now and
* throw away the result after we are finished.
*/
tmp = zio_buf_alloc(lsize);
ret = zio_decompress_data(BP_GET_COMPRESS(bp),
zio->io_abd, tmp, zio->io_size, lsize);
if (ret != 0) {
ret = SET_ERROR(EIO);
goto error;
}
ret = zio_crypt_do_indirect_mac_checksum(B_FALSE,
tmp, lsize, BP_SHOULD_BYTESWAP(bp), mac);
zio_buf_free(tmp, lsize);
} else {
ret = zio_crypt_do_indirect_mac_checksum_abd(B_FALSE,
zio->io_abd, size, BP_SHOULD_BYTESWAP(bp), mac);
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
/*
* If this is an authenticated block, just check the MAC. It would be
* nice to separate this out into its own flag, but for the moment
* enum zio_flag is out of bits.
*/
if (BP_IS_AUTHENTICATED(bp)) {
if (ot == DMU_OT_OBJSET) {
ret = spa_do_crypt_objset_mac_abd(B_FALSE, spa,
dsobj, zio->io_abd, size, BP_SHOULD_BYTESWAP(bp));
} else {
zio_crypt_decode_mac_bp(bp, mac);
ret = spa_do_crypt_mac_abd(B_FALSE, spa, dsobj,
zio->io_abd, size, mac);
}
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
}
zio_crypt_decode_params_bp(bp, salt, iv);
if (ot == DMU_OT_INTENT_LOG) {
tmp = abd_borrow_buf_copy(zio->io_abd, sizeof (zil_chain_t));
zio_crypt_decode_mac_zil(tmp, mac);
abd_return_buf(zio->io_abd, tmp, sizeof (zil_chain_t));
} else {
zio_crypt_decode_mac_bp(bp, mac);
}
ret = spa_do_crypt_abd(B_FALSE, spa, dsobj, bp, bp->blk_birth,
size, data, zio->io_abd, iv, mac, salt, &no_crypt);
if (no_crypt)
abd_copy(data, zio->io_abd, size);
if (ret != 0)
goto error;
return;
error:
/* assert that the key was found unless this was speculative */
ASSERT(ret != ENOENT || (zio->io_flags & ZIO_FLAG_SPECULATIVE));
/*
* If there was a decryption / authentication error return EIO as
* the io_error. If this was not a speculative zio, create an ereport.
*/
if (ret == ECKSUM) {
zio->io_error = SET_ERROR(EIO);
if ((zio->io_flags & ZIO_FLAG_SPECULATIVE) == 0) {
zfs_ereport_post(FM_EREPORT_ZFS_AUTHENTICATION,
spa, NULL, &zio->io_bookmark, zio, 0, 0);
}
} else {
zio->io_error = ret;
}
}
/*
* ==========================================================================
* I/O parent/child relationships and pipeline interlocks
* ==========================================================================
*/
zio_t *
zio_walk_parents(zio_t *cio, zio_link_t **zl)
{
list_t *pl = &cio->io_parent_list;
*zl = (*zl == NULL) ? list_head(pl) : list_next(pl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_child == cio);
return ((*zl)->zl_parent);
}
zio_t *
zio_walk_children(zio_t *pio, zio_link_t **zl)
{
list_t *cl = &pio->io_child_list;
ASSERT(MUTEX_HELD(&pio->io_lock));
*zl = (*zl == NULL) ? list_head(cl) : list_next(cl, *zl);
if (*zl == NULL)
return (NULL);
ASSERT((*zl)->zl_parent == pio);
return ((*zl)->zl_child);
}
zio_t *
zio_unique_parent(zio_t *cio)
{
zio_link_t *zl = NULL;
zio_t *pio = zio_walk_parents(cio, &zl);
VERIFY3P(zio_walk_parents(cio, &zl), ==, NULL);
return (pio);
}
void
zio_add_child(zio_t *pio, zio_t *cio)
{
zio_link_t *zl = kmem_cache_alloc(zio_link_cache, KM_SLEEP);
/*
* Logical I/Os can have logical, gang, or vdev children.
* Gang I/Os can have gang or vdev children.
* Vdev I/Os can only have vdev children.
* The following ASSERT captures all of these constraints.
*/
ASSERT3S(cio->io_child_type, <=, pio->io_child_type);
zl->zl_parent = pio;
zl->zl_child = cio;
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
ASSERT(pio->io_state[ZIO_WAIT_DONE] == 0);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w] += !cio->io_state[w];
list_insert_head(&pio->io_child_list, zl);
list_insert_head(&cio->io_parent_list, zl);
pio->io_child_count++;
cio->io_parent_count++;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
}
static void
zio_remove_child(zio_t *pio, zio_t *cio, zio_link_t *zl)
{
ASSERT(zl->zl_parent == pio);
ASSERT(zl->zl_child == cio);
mutex_enter(&pio->io_lock);
mutex_enter(&cio->io_lock);
list_remove(&pio->io_child_list, zl);
list_remove(&cio->io_parent_list, zl);
pio->io_child_count--;
cio->io_parent_count--;
mutex_exit(&cio->io_lock);
mutex_exit(&pio->io_lock);
kmem_cache_free(zio_link_cache, zl);
}
static boolean_t
zio_wait_for_children(zio_t *zio, uint8_t childbits, enum zio_wait_type wait)
{
boolean_t waiting = B_FALSE;
mutex_enter(&zio->io_lock);
ASSERT(zio->io_stall == NULL);
for (int c = 0; c < ZIO_CHILD_TYPES; c++) {
if (!(ZIO_CHILD_BIT_IS_SET(childbits, c)))
continue;
uint64_t *countp = &zio->io_children[c][wait];
if (*countp != 0) {
zio->io_stage >>= 1;
ASSERT3U(zio->io_stage, !=, ZIO_STAGE_OPEN);
zio->io_stall = countp;
waiting = B_TRUE;
break;
}
}
mutex_exit(&zio->io_lock);
return (waiting);
}
__attribute__((always_inline))
static inline void
zio_notify_parent(zio_t *pio, zio_t *zio, enum zio_wait_type wait)
{
uint64_t *countp = &pio->io_children[zio->io_child_type][wait];
int *errorp = &pio->io_child_error[zio->io_child_type];
mutex_enter(&pio->io_lock);
if (zio->io_error && !(zio->io_flags & ZIO_FLAG_DONT_PROPAGATE))
*errorp = zio_worst_error(*errorp, zio->io_error);
pio->io_reexecute |= zio->io_reexecute;
ASSERT3U(*countp, >, 0);
(*countp)--;
if (*countp == 0 && pio->io_stall == countp) {
zio_taskq_type_t type =
pio->io_stage < ZIO_STAGE_VDEV_IO_START ? ZIO_TASKQ_ISSUE :
ZIO_TASKQ_INTERRUPT;
pio->io_stall = NULL;
mutex_exit(&pio->io_lock);
/*
* Dispatch the parent zio in its own taskq so that
* the child can continue to make progress. This also
* prevents overflowing the stack when we have deeply nested
* parent-child relationships.
*/
zio_taskq_dispatch(pio, type, B_FALSE);
} else {
mutex_exit(&pio->io_lock);
}
}
static void
zio_inherit_child_errors(zio_t *zio, enum zio_child c)
{
if (zio->io_child_error[c] != 0 && zio->io_error == 0)
zio->io_error = zio->io_child_error[c];
}
int
zio_bookmark_compare(const void *x1, const void *x2)
{
const zio_t *z1 = x1;
const zio_t *z2 = x2;
if (z1->io_bookmark.zb_objset < z2->io_bookmark.zb_objset)
return (-1);
if (z1->io_bookmark.zb_objset > z2->io_bookmark.zb_objset)
return (1);
if (z1->io_bookmark.zb_object < z2->io_bookmark.zb_object)
return (-1);
if (z1->io_bookmark.zb_object > z2->io_bookmark.zb_object)
return (1);
if (z1->io_bookmark.zb_level < z2->io_bookmark.zb_level)
return (-1);
if (z1->io_bookmark.zb_level > z2->io_bookmark.zb_level)
return (1);
if (z1->io_bookmark.zb_blkid < z2->io_bookmark.zb_blkid)
return (-1);
if (z1->io_bookmark.zb_blkid > z2->io_bookmark.zb_blkid)
return (1);
if (z1 < z2)
return (-1);
if (z1 > z2)
return (1);
return (0);
}
/*
* ==========================================================================
* Create the various types of I/O (read, write, free, etc)
* ==========================================================================
*/
static zio_t *
zio_create(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, zio_done_func_t *done,
void *private, zio_type_t type, zio_priority_t priority,
enum zio_flag flags, vdev_t *vd, uint64_t offset,
const zbookmark_phys_t *zb, enum zio_stage stage,
enum zio_stage pipeline)
{
zio_t *zio;
ASSERT3U(psize, <=, SPA_MAXBLOCKSIZE);
ASSERT(P2PHASE(psize, SPA_MINBLOCKSIZE) == 0);
ASSERT(P2PHASE(offset, SPA_MINBLOCKSIZE) == 0);
ASSERT(!vd || spa_config_held(spa, SCL_STATE_ALL, RW_READER));
ASSERT(!bp || !(flags & ZIO_FLAG_CONFIG_WRITER));
ASSERT(vd || stage == ZIO_STAGE_OPEN);
IMPLY(lsize != psize, (flags & ZIO_FLAG_RAW_COMPRESS) != 0);
zio = kmem_cache_alloc(zio_cache, KM_SLEEP);
bzero(zio, sizeof (zio_t));
mutex_init(&zio->io_lock, NULL, MUTEX_NOLOCKDEP, NULL);
cv_init(&zio->io_cv, NULL, CV_DEFAULT, NULL);
list_create(&zio->io_parent_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_parent_node));
list_create(&zio->io_child_list, sizeof (zio_link_t),
offsetof(zio_link_t, zl_child_node));
metaslab_trace_init(&zio->io_alloc_list);
if (vd != NULL)
zio->io_child_type = ZIO_CHILD_VDEV;
else if (flags & ZIO_FLAG_GANG_CHILD)
zio->io_child_type = ZIO_CHILD_GANG;
else if (flags & ZIO_FLAG_DDT_CHILD)
zio->io_child_type = ZIO_CHILD_DDT;
else
zio->io_child_type = ZIO_CHILD_LOGICAL;
if (bp != NULL) {
zio->io_bp = (blkptr_t *)bp;
zio->io_bp_copy = *bp;
zio->io_bp_orig = *bp;
if (type != ZIO_TYPE_WRITE ||
zio->io_child_type == ZIO_CHILD_DDT)
zio->io_bp = &zio->io_bp_copy; /* so caller can free */
if (zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_logical = zio;
if (zio->io_child_type > ZIO_CHILD_GANG && BP_IS_GANG(bp))
pipeline |= ZIO_GANG_STAGES;
}
zio->io_spa = spa;
zio->io_txg = txg;
zio->io_done = done;
zio->io_private = private;
zio->io_type = type;
zio->io_priority = priority;
zio->io_vd = vd;
zio->io_offset = offset;
zio->io_orig_abd = zio->io_abd = data;
zio->io_orig_size = zio->io_size = psize;
zio->io_lsize = lsize;
zio->io_orig_flags = zio->io_flags = flags;
zio->io_orig_stage = zio->io_stage = stage;
zio->io_orig_pipeline = zio->io_pipeline = pipeline;
zio->io_pipeline_trace = ZIO_STAGE_OPEN;
zio->io_state[ZIO_WAIT_READY] = (stage >= ZIO_STAGE_READY);
zio->io_state[ZIO_WAIT_DONE] = (stage >= ZIO_STAGE_DONE);
if (zb != NULL)
zio->io_bookmark = *zb;
if (pio != NULL) {
if (zio->io_logical == NULL)
zio->io_logical = pio->io_logical;
if (zio->io_child_type == ZIO_CHILD_GANG)
zio->io_gang_leader = pio->io_gang_leader;
zio_add_child(pio, zio);
}
taskq_init_ent(&zio->io_tqent);
return (zio);
}
static void
zio_destroy(zio_t *zio)
{
metaslab_trace_fini(&zio->io_alloc_list);
list_destroy(&zio->io_parent_list);
list_destroy(&zio->io_child_list);
mutex_destroy(&zio->io_lock);
cv_destroy(&zio->io_cv);
kmem_cache_free(zio_cache, zio);
}
zio_t *
zio_null(zio_t *pio, spa_t *spa, vdev_t *vd, zio_done_func_t *done,
void *private, enum zio_flag flags)
{
zio_t *zio;
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_NULL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_INTERLOCK_PIPELINE);
return (zio);
}
zio_t *
zio_root(spa_t *spa, zio_done_func_t *done, void *private, enum zio_flag flags)
{
return (zio_null(NULL, spa, NULL, done, private, flags));
}
void
zfs_blkptr_verify(spa_t *spa, const blkptr_t *bp)
{
if (!DMU_OT_IS_VALID(BP_GET_TYPE(bp))) {
zfs_panic_recover("blkptr at %p has invalid TYPE %llu",
bp, (longlong_t)BP_GET_TYPE(bp));
}
if (BP_GET_CHECKSUM(bp) >= ZIO_CHECKSUM_FUNCTIONS ||
BP_GET_CHECKSUM(bp) <= ZIO_CHECKSUM_ON) {
zfs_panic_recover("blkptr at %p has invalid CHECKSUM %llu",
bp, (longlong_t)BP_GET_CHECKSUM(bp));
}
if (BP_GET_COMPRESS(bp) >= ZIO_COMPRESS_FUNCTIONS ||
BP_GET_COMPRESS(bp) <= ZIO_COMPRESS_ON) {
zfs_panic_recover("blkptr at %p has invalid COMPRESS %llu",
bp, (longlong_t)BP_GET_COMPRESS(bp));
}
if (BP_GET_LSIZE(bp) > SPA_MAXBLOCKSIZE) {
zfs_panic_recover("blkptr at %p has invalid LSIZE %llu",
bp, (longlong_t)BP_GET_LSIZE(bp));
}
if (BP_GET_PSIZE(bp) > SPA_MAXBLOCKSIZE) {
zfs_panic_recover("blkptr at %p has invalid PSIZE %llu",
bp, (longlong_t)BP_GET_PSIZE(bp));
}
if (BP_IS_EMBEDDED(bp)) {
if (BPE_GET_ETYPE(bp) > NUM_BP_EMBEDDED_TYPES) {
zfs_panic_recover("blkptr at %p has invalid ETYPE %llu",
bp, (longlong_t)BPE_GET_ETYPE(bp));
}
}
/*
* Pool-specific checks.
*
* Note: it would be nice to verify that the blk_birth and
* BP_PHYSICAL_BIRTH() are not too large. However, spa_freeze()
* allows the birth time of log blocks (and dmu_sync()-ed blocks
* that are in the log) to be arbitrarily large.
*/
for (int i = 0; i < BP_GET_NDVAS(bp); i++) {
uint64_t vdevid = DVA_GET_VDEV(&bp->blk_dva[i]);
if (vdevid >= spa->spa_root_vdev->vdev_children) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
vdev_t *vd = spa->spa_root_vdev->vdev_child[vdevid];
if (vd == NULL) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_hole_ops) {
zfs_panic_recover("blkptr at %p DVA %u has hole "
"VDEV %llu",
bp, i, (longlong_t)vdevid);
continue;
}
if (vd->vdev_ops == &vdev_missing_ops) {
/*
* "missing" vdevs are valid during import, but we
* don't have their detailed info (e.g. asize), so
* we can't perform any more checks on them.
*/
continue;
}
uint64_t offset = DVA_GET_OFFSET(&bp->blk_dva[i]);
uint64_t asize = DVA_GET_ASIZE(&bp->blk_dva[i]);
if (BP_IS_GANG(bp))
asize = vdev_psize_to_asize(vd, SPA_GANGBLOCKSIZE);
if (offset + asize > vd->vdev_asize) {
zfs_panic_recover("blkptr at %p DVA %u has invalid "
"OFFSET %llu",
bp, i, (longlong_t)offset);
}
}
}
zio_t *
zio_read(zio_t *pio, spa_t *spa, const blkptr_t *bp,
abd_t *data, uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, const zbookmark_phys_t *zb)
{
zio_t *zio;
zfs_blkptr_verify(spa, bp);
zio = zio_create(pio, spa, BP_PHYSICAL_BIRTH(bp), bp,
data, size, size, done, private,
ZIO_TYPE_READ, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_READ_PIPELINE : ZIO_READ_PIPELINE);
return (zio);
}
zio_t *
zio_write(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp,
abd_t *data, uint64_t lsize, uint64_t psize, const zio_prop_t *zp,
zio_done_func_t *ready, zio_done_func_t *children_ready,
zio_done_func_t *physdone, zio_done_func_t *done,
void *private, zio_priority_t priority, enum zio_flag flags,
const zbookmark_phys_t *zb)
{
zio_t *zio;
ASSERT(zp->zp_checksum >= ZIO_CHECKSUM_OFF &&
zp->zp_checksum < ZIO_CHECKSUM_FUNCTIONS &&
zp->zp_compress >= ZIO_COMPRESS_OFF &&
zp->zp_compress < ZIO_COMPRESS_FUNCTIONS &&
DMU_OT_IS_VALID(zp->zp_type) &&
zp->zp_level < 32 &&
zp->zp_copies > 0 &&
zp->zp_copies <= spa_max_replication(spa));
zio = zio_create(pio, spa, txg, bp, data, lsize, psize, done, private,
ZIO_TYPE_WRITE, priority, flags, NULL, 0, zb,
ZIO_STAGE_OPEN, (flags & ZIO_FLAG_DDT_CHILD) ?
ZIO_DDT_CHILD_WRITE_PIPELINE : ZIO_WRITE_PIPELINE);
zio->io_ready = ready;
zio->io_children_ready = children_ready;
zio->io_physdone = physdone;
zio->io_prop = *zp;
/*
* Data can be NULL if we are going to call zio_write_override() to
* provide the already-allocated BP. But we may need the data to
* verify a dedup hit (if requested). In this case, don't try to
* dedup (just take the already-allocated BP verbatim). Encrypted
* dedup blocks need data as well so we also disable dedup in this
* case.
*/
if (data == NULL &&
(zio->io_prop.zp_dedup_verify || zio->io_prop.zp_encrypt)) {
zio->io_prop.zp_dedup = zio->io_prop.zp_dedup_verify = B_FALSE;
}
return (zio);
}
zio_t *
zio_rewrite(zio_t *pio, spa_t *spa, uint64_t txg, blkptr_t *bp, abd_t *data,
uint64_t size, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, zbookmark_phys_t *zb)
{
zio_t *zio;
zio = zio_create(pio, spa, txg, bp, data, size, size, done, private,
ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_IO_REWRITE, NULL, 0, zb,
ZIO_STAGE_OPEN, ZIO_REWRITE_PIPELINE);
return (zio);
}
void
zio_write_override(zio_t *zio, blkptr_t *bp, int copies, boolean_t nopwrite)
{
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio->io_stage == ZIO_STAGE_OPEN);
ASSERT(zio->io_txg == spa_syncing_txg(zio->io_spa));
/*
* We must reset the io_prop to match the values that existed
* when the bp was first written by dmu_sync() keeping in mind
* that nopwrite and dedup are mutually exclusive.
*/
zio->io_prop.zp_dedup = nopwrite ? B_FALSE : zio->io_prop.zp_dedup;
zio->io_prop.zp_nopwrite = nopwrite;
zio->io_prop.zp_copies = copies;
zio->io_bp_override = bp;
}
void
zio_free(spa_t *spa, uint64_t txg, const blkptr_t *bp)
{
+ zfs_blkptr_verify(spa, bp);
+
/*
* The check for EMBEDDED is a performance optimization. We
* process the free here (by ignoring it) rather than
* putting it on the list and then processing it in zio_free_sync().
*/
if (BP_IS_EMBEDDED(bp))
return;
metaslab_check_free(spa, bp);
/*
* Frees that are for the currently-syncing txg, are not going to be
* deferred, and which will not need to do a read (i.e. not GANG or
* DEDUP), can be processed immediately. Otherwise, put them on the
* in-memory list for later processing.
*/
if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp) ||
txg != spa->spa_syncing_txg ||
spa_sync_pass(spa) >= zfs_sync_pass_deferred_free) {
bplist_append(&spa->spa_free_bplist[txg & TXG_MASK], bp);
} else {
VERIFY0(zio_wait(zio_free_sync(NULL, spa, txg, bp, 0)));
}
}
zio_t *
zio_free_sync(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
enum zio_flag flags)
{
zio_t *zio;
enum zio_stage stage = ZIO_FREE_PIPELINE;
ASSERT(!BP_IS_HOLE(bp));
ASSERT(spa_syncing_txg(spa) == txg);
ASSERT(spa_sync_pass(spa) < zfs_sync_pass_deferred_free);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
metaslab_check_free(spa, bp);
arc_freed(spa, bp);
dsl_scan_freed(spa, bp);
/*
* GANG and DEDUP blocks can induce a read (for the gang block header,
* or the DDT), so issue them asynchronously so that this thread is
* not tied up.
*/
if (BP_IS_GANG(bp) || BP_GET_DEDUP(bp))
stage |= ZIO_STAGE_ISSUE_ASYNC;
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), NULL, NULL, ZIO_TYPE_FREE, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, stage);
return (zio);
}
zio_t *
zio_claim(zio_t *pio, spa_t *spa, uint64_t txg, const blkptr_t *bp,
zio_done_func_t *done, void *private, enum zio_flag flags)
{
zio_t *zio;
- dprintf_bp(bp, "claiming in txg %llu", txg);
+ zfs_blkptr_verify(spa, bp);
if (BP_IS_EMBEDDED(bp))
return (zio_null(pio, spa, NULL, NULL, NULL, 0));
/*
* A claim is an allocation of a specific block. Claims are needed
* to support immediate writes in the intent log. The issue is that
* immediate writes contain committed data, but in a txg that was
* *not* committed. Upon opening the pool after an unclean shutdown,
* the intent log claims all blocks that contain immediate write data
* so that the SPA knows they're in use.
*
* All claims *must* be resolved in the first txg -- before the SPA
* starts allocating blocks -- so that nothing is allocated twice.
* If txg == 0 we just verify that the block is claimable.
*/
ASSERT3U(spa->spa_uberblock.ub_rootbp.blk_birth, <, spa_first_txg(spa));
ASSERT(txg == spa_first_txg(spa) || txg == 0);
ASSERT(!BP_GET_DEDUP(bp) || !spa_writeable(spa)); /* zdb(1M) */
zio = zio_create(pio, spa, txg, bp, NULL, BP_GET_PSIZE(bp),
BP_GET_PSIZE(bp), done, private, ZIO_TYPE_CLAIM, ZIO_PRIORITY_NOW,
flags, NULL, 0, NULL, ZIO_STAGE_OPEN, ZIO_CLAIM_PIPELINE);
ASSERT0(zio->io_queued_timestamp);
return (zio);
}
zio_t *
zio_ioctl(zio_t *pio, spa_t *spa, vdev_t *vd, int cmd,
zio_done_func_t *done, void *private, enum zio_flag flags)
{
zio_t *zio;
int c;
if (vd->vdev_children == 0) {
zio = zio_create(pio, spa, 0, NULL, NULL, 0, 0, done, private,
ZIO_TYPE_IOCTL, ZIO_PRIORITY_NOW, flags, vd, 0, NULL,
ZIO_STAGE_OPEN, ZIO_IOCTL_PIPELINE);
zio->io_cmd = cmd;
} else {
zio = zio_null(pio, spa, NULL, NULL, NULL, flags);
for (c = 0; c < vd->vdev_children; c++)
zio_nowait(zio_ioctl(zio, spa, vd->vdev_child[c], cmd,
done, private, flags));
}
return (zio);
}
zio_t *
zio_read_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_READ, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_READ_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
return (zio);
}
zio_t *
zio_write_phys(zio_t *pio, vdev_t *vd, uint64_t offset, uint64_t size,
abd_t *data, int checksum, zio_done_func_t *done, void *private,
zio_priority_t priority, enum zio_flag flags, boolean_t labels)
{
zio_t *zio;
ASSERT(vd->vdev_children == 0);
ASSERT(!labels || offset + size <= VDEV_LABEL_START_SIZE ||
offset >= vd->vdev_psize - VDEV_LABEL_END_SIZE);
ASSERT3U(offset + size, <=, vd->vdev_psize);
zio = zio_create(pio, vd->vdev_spa, 0, NULL, data, size, size, done,
private, ZIO_TYPE_WRITE, priority, flags | ZIO_FLAG_PHYSICAL, vd,
offset, NULL, ZIO_STAGE_OPEN, ZIO_WRITE_PHYS_PIPELINE);
zio->io_prop.zp_checksum = checksum;
if (zio_checksum_table[checksum].ci_flags & ZCHECKSUM_FLAG_EMBEDDED) {
/*
* zec checksums are necessarily destructive -- they modify
* the end of the write buffer to hold the verifier/checksum.
* Therefore, we must make a local copy in case the data is
* being written to multiple places in parallel.
*/
abd_t *wbuf = abd_alloc_sametype(data, size);
abd_copy(wbuf, data, size);
zio_push_transform(zio, wbuf, size, size, NULL);
}
return (zio);
}
/*
* Create a child I/O to do some work for us.
*/
zio_t *
zio_vdev_child_io(zio_t *pio, blkptr_t *bp, vdev_t *vd, uint64_t offset,
abd_t *data, uint64_t size, int type, zio_priority_t priority,
enum zio_flag flags, zio_done_func_t *done, void *private)
{
enum zio_stage pipeline = ZIO_VDEV_CHILD_PIPELINE;
zio_t *zio;
- ASSERT(vd->vdev_parent ==
- (pio->io_vd ? pio->io_vd : pio->io_spa->spa_root_vdev));
+ /*
+ * vdev child I/Os do not propagate their error to the parent.
+ * Therefore, for correct operation the caller *must* check for
+ * and handle the error in the child i/o's done callback.
+ * The only exceptions are i/os that we don't care about
+ * (OPTIONAL or REPAIR).
+ */
+ ASSERT((flags & ZIO_FLAG_OPTIONAL) || (flags & ZIO_FLAG_IO_REPAIR) ||
+ done != NULL);
+
+ /*
+ * In the common case, where the parent zio was to a normal vdev,
+ * the child zio must be to a child vdev of that vdev. Otherwise,
+ * the child zio must be to a top-level vdev.
+ */
+ if (pio->io_vd != NULL && pio->io_vd->vdev_ops != &vdev_indirect_ops) {
+ ASSERT3P(vd->vdev_parent, ==, pio->io_vd);
+ } else {
+ ASSERT3P(vd, ==, vd->vdev_top);
+ }
if (type == ZIO_TYPE_READ && bp != NULL) {
/*
* If we have the bp, then the child should perform the
* checksum and the parent need not. This pushes error
* detection as close to the leaves as possible and
* eliminates redundant checksums in the interior nodes.
*/
pipeline |= ZIO_STAGE_CHECKSUM_VERIFY;
pio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
- if (vd->vdev_children == 0)
+ if (vd->vdev_ops->vdev_op_leaf) {
+ ASSERT0(vd->vdev_children);
offset += VDEV_LABEL_START_SIZE;
+ }
- flags |= ZIO_VDEV_CHILD_FLAGS(pio) | ZIO_FLAG_DONT_PROPAGATE;
+ flags |= ZIO_VDEV_CHILD_FLAGS(pio);
/*
* If we've decided to do a repair, the write is not speculative --
* even if the original read was.
*/
if (flags & ZIO_FLAG_IO_REPAIR)
flags &= ~ZIO_FLAG_SPECULATIVE;
/*
* If we're creating a child I/O that is not associated with a
* top-level vdev, then the child zio is not an allocating I/O.
* If this is a retried I/O then we ignore it since we will
* have already processed the original allocating I/O.
*/
if (flags & ZIO_FLAG_IO_ALLOCATING &&
(vd != vd->vdev_top || (flags & ZIO_FLAG_IO_RETRY))) {
ASSERTV(metaslab_class_t *mc = spa_normal_class(pio->io_spa));
ASSERT(mc->mc_alloc_throttle_enabled);
ASSERT(type == ZIO_TYPE_WRITE);
ASSERT(priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(flags & ZIO_FLAG_IO_REPAIR));
ASSERT(!(pio->io_flags & ZIO_FLAG_IO_REWRITE) ||
pio->io_child_type == ZIO_CHILD_GANG);
flags &= ~ZIO_FLAG_IO_ALLOCATING;
}
zio = zio_create(pio, pio->io_spa, pio->io_txg, bp, data, size, size,
done, private, type, priority, flags, vd, offset, &pio->io_bookmark,
ZIO_STAGE_VDEV_IO_START >> 1, pipeline);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
zio->io_physdone = pio->io_physdone;
if (vd->vdev_ops->vdev_op_leaf && zio->io_logical != NULL)
zio->io_logical->io_phys_children++;
return (zio);
}
zio_t *
zio_vdev_delegated_io(vdev_t *vd, uint64_t offset, abd_t *data, uint64_t size,
int type, zio_priority_t priority, enum zio_flag flags,
zio_done_func_t *done, void *private)
{
zio_t *zio;
ASSERT(vd->vdev_ops->vdev_op_leaf);
zio = zio_create(NULL, vd->vdev_spa, 0, NULL,
data, size, size, done, private, type, priority,
flags | ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_RETRY | ZIO_FLAG_DELEGATED,
vd, offset, NULL,
ZIO_STAGE_VDEV_IO_START >> 1, ZIO_VDEV_CHILD_PIPELINE);
return (zio);
}
void
zio_flush(zio_t *zio, vdev_t *vd)
{
zio_nowait(zio_ioctl(zio, zio->io_spa, vd, DKIOCFLUSHWRITECACHE,
NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_DONT_PROPAGATE | ZIO_FLAG_DONT_RETRY));
}
void
zio_shrink(zio_t *zio, uint64_t size)
{
ASSERT3P(zio->io_executor, ==, NULL);
ASSERT3U(zio->io_orig_size, ==, zio->io_size);
ASSERT3U(size, <=, zio->io_size);
/*
* We don't shrink for raidz because of problems with the
* reconstruction when reading back less than the block size.
* Note, BP_IS_RAIDZ() assumes no compression.
*/
ASSERT(BP_GET_COMPRESS(zio->io_bp) == ZIO_COMPRESS_OFF);
if (!BP_IS_RAIDZ(zio->io_bp)) {
/* we are not doing a raw write */
ASSERT3U(zio->io_size, ==, zio->io_lsize);
zio->io_orig_size = zio->io_size = zio->io_lsize = size;
}
}
/*
* ==========================================================================
* Prepare to read and write logical blocks
* ==========================================================================
*/
static int
zio_read_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
uint64_t psize =
BP_IS_EMBEDDED(bp) ? BPE_GET_PSIZE(bp) : BP_GET_PSIZE(bp);
+ ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
+
if (BP_GET_COMPRESS(bp) != ZIO_COMPRESS_OFF &&
zio->io_child_type == ZIO_CHILD_LOGICAL &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decompress);
}
if (((BP_IS_PROTECTED(bp) && !(zio->io_flags & ZIO_FLAG_RAW_ENCRYPT)) ||
BP_HAS_INDIRECT_MAC_CKSUM(bp)) &&
zio->io_child_type == ZIO_CHILD_LOGICAL) {
zio_push_transform(zio, abd_alloc_sametype(zio->io_abd, psize),
psize, psize, zio_decrypt);
}
if (BP_IS_EMBEDDED(bp) && BPE_GET_ETYPE(bp) == BP_EMBEDDED_TYPE_DATA) {
int psize = BPE_GET_PSIZE(bp);
void *data = abd_borrow_buf(zio->io_abd, psize);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
decode_embedded_bp_compressed(bp, data);
abd_return_buf_copy(zio->io_abd, data, psize);
} else {
ASSERT(!BP_IS_EMBEDDED(bp));
+ ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
}
if (!DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) && BP_GET_LEVEL(bp) == 0)
zio->io_flags |= ZIO_FLAG_DONT_CACHE;
if (BP_GET_TYPE(bp) == DMU_OT_DDT_ZAP)
zio->io_flags |= ZIO_FLAG_DONT_CACHE;
if (BP_GET_DEDUP(bp) && zio->io_child_type == ZIO_CHILD_LOGICAL)
zio->io_pipeline = ZIO_DDT_READ_PIPELINE;
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_write_bp_init(zio_t *zio)
{
if (!IO_IS_ALLOCATING(zio))
return (ZIO_PIPELINE_CONTINUE);
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
if (zio->io_bp_override) {
blkptr_t *bp = zio->io_bp;
zio_prop_t *zp = &zio->io_prop;
ASSERT(bp->blk_birth != zio->io_txg);
ASSERT(BP_GET_DEDUP(zio->io_bp_override) == 0);
*bp = *zio->io_bp_override;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (BP_IS_EMBEDDED(bp))
return (ZIO_PIPELINE_CONTINUE);
/*
* If we've been overridden and nopwrite is set then
* set the flag accordingly to indicate that a nopwrite
* has already occurred.
*/
if (!BP_IS_HOLE(bp) && zp->zp_nopwrite) {
ASSERT(!zp->zp_dedup);
ASSERT3U(BP_GET_CHECKSUM(bp), ==, zp->zp_checksum);
zio->io_flags |= ZIO_FLAG_NOPWRITE;
return (ZIO_PIPELINE_CONTINUE);
}
ASSERT(!zp->zp_nopwrite);
if (BP_IS_HOLE(bp) || !zp->zp_dedup)
return (ZIO_PIPELINE_CONTINUE);
ASSERT((zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP) || zp->zp_dedup_verify);
if (BP_GET_CHECKSUM(bp) == zp->zp_checksum &&
!zp->zp_encrypt) {
BP_SET_DEDUP(bp, 1);
zio->io_pipeline |= ZIO_STAGE_DDT_WRITE;
return (ZIO_PIPELINE_CONTINUE);
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
}
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_write_compress(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_prop_t *zp = &zio->io_prop;
enum zio_compress compress = zp->zp_compress;
blkptr_t *bp = zio->io_bp;
uint64_t lsize = zio->io_lsize;
uint64_t psize = zio->io_size;
int pass = 1;
/*
* If our children haven't all reached the ready stage,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_LOGICAL_BIT |
ZIO_CHILD_GANG_BIT, ZIO_WAIT_READY)) {
return (ZIO_PIPELINE_STOP);
}
if (!IO_IS_ALLOCATING(zio))
return (ZIO_PIPELINE_CONTINUE);
if (zio->io_children_ready != NULL) {
/*
* Now that all our children are ready, run the callback
* associated with this zio in case it wants to modify the
* data to be written.
*/
ASSERT3U(zp->zp_level, >, 0);
zio->io_children_ready(zio);
}
ASSERT(zio->io_child_type != ZIO_CHILD_DDT);
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg) {
/*
* We're rewriting an existing block, which means we're
* working on behalf of spa_sync(). For spa_sync() to
* converge, it must eventually be the case that we don't
* have to allocate new blocks. But compression changes
* the blocksize, which forces a reallocate, and makes
* convergence take longer. Therefore, after the first
* few passes, stop compressing to ensure convergence.
*/
pass = spa_sync_pass(spa);
ASSERT(zio->io_txg == spa_syncing_txg(spa));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!BP_GET_DEDUP(bp));
if (pass >= zfs_sync_pass_dont_compress)
compress = ZIO_COMPRESS_OFF;
/* Make sure someone doesn't change their mind on overwrites */
ASSERT(BP_IS_EMBEDDED(bp) || MIN(zp->zp_copies + BP_IS_GANG(bp),
spa_max_replication(spa)) == BP_GET_NDVAS(bp));
}
/* If it's a compressed write that is not raw, compress the buffer. */
if (compress != ZIO_COMPRESS_OFF &&
!(zio->io_flags & ZIO_FLAG_RAW_COMPRESS)) {
void *cbuf = zio_buf_alloc(lsize);
psize = zio_compress_data(compress, zio->io_abd, cbuf, lsize);
if (psize == 0 || psize == lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
} else if (!zp->zp_dedup && !zp->zp_encrypt &&
psize <= BPE_PAYLOAD_SIZE &&
zp->zp_level == 0 && !DMU_OT_HAS_FILL(zp->zp_type) &&
spa_feature_is_enabled(spa, SPA_FEATURE_EMBEDDED_DATA)) {
encode_embedded_bp_compressed(bp,
cbuf, compress, lsize, psize);
BPE_SET_ETYPE(bp, BP_EMBEDDED_TYPE_DATA);
BP_SET_TYPE(bp, zio->io_prop.zp_type);
BP_SET_LEVEL(bp, zio->io_prop.zp_level);
zio_buf_free(cbuf, lsize);
bp->blk_birth = zio->io_txg;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
ASSERT(spa_feature_is_active(spa,
SPA_FEATURE_EMBEDDED_DATA));
return (ZIO_PIPELINE_CONTINUE);
} else {
/*
* Round up compressed size up to the ashift
* of the smallest-ashift device, and zero the tail.
* This ensures that the compressed size of the BP
* (and thus compressratio property) are correct,
* in that we charge for the padding used to fill out
* the last sector.
*/
ASSERT3U(spa->spa_min_ashift, >=, SPA_MINBLOCKSHIFT);
size_t rounded = (size_t)P2ROUNDUP(psize,
1ULL << spa->spa_min_ashift);
if (rounded >= lsize) {
compress = ZIO_COMPRESS_OFF;
zio_buf_free(cbuf, lsize);
psize = lsize;
} else {
abd_t *cdata = abd_get_from_buf(cbuf, lsize);
abd_take_ownership_of_buf(cdata, B_TRUE);
abd_zero_off(cdata, psize, rounded - psize);
psize = rounded;
zio_push_transform(zio, cdata,
psize, lsize, NULL);
}
}
/*
* We were unable to handle this as an override bp, treat
* it as a regular write I/O.
*/
zio->io_bp_override = NULL;
*bp = zio->io_bp_orig;
zio->io_pipeline = zio->io_orig_pipeline;
} else if ((zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) != 0 &&
zp->zp_type == DMU_OT_DNODE) {
/*
* The DMU actually relies on the zio layer's compression
* to free metadnode blocks that have had all contained
* dnodes freed. As a result, even when doing a raw
* receive, we must check whether the block can be compressed
* to a hole.
*/
psize = zio_compress_data(ZIO_COMPRESS_EMPTY,
zio->io_abd, NULL, lsize);
if (psize == 0)
compress = ZIO_COMPRESS_OFF;
} else {
ASSERT3U(psize, !=, 0);
}
/*
* The final pass of spa_sync() must be all rewrites, but the first
* few passes offer a trade-off: allocating blocks defers convergence,
* but newly allocated blocks are sequential, so they can be written
* to disk faster. Therefore, we allow the first few passes of
* spa_sync() to allocate new blocks, but force rewrites after that.
* There should only be a handful of blocks after pass 1 in any case.
*/
if (!BP_IS_HOLE(bp) && bp->blk_birth == zio->io_txg &&
BP_GET_PSIZE(bp) == psize &&
pass >= zfs_sync_pass_rewrite) {
ASSERT(psize != 0);
enum zio_stage gang_stages = zio->io_pipeline & ZIO_GANG_STAGES;
zio->io_pipeline = ZIO_REWRITE_PIPELINE | gang_stages;
zio->io_flags |= ZIO_FLAG_IO_REWRITE;
} else {
BP_ZERO(bp);
zio->io_pipeline = ZIO_WRITE_PIPELINE;
}
if (psize == 0) {
if (zio->io_bp_orig.blk_birth != 0 &&
spa_feature_is_active(spa, SPA_FEATURE_HOLE_BIRTH)) {
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_BIRTH(bp, zio->io_txg, 0);
}
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
} else {
ASSERT(zp->zp_checksum != ZIO_CHECKSUM_GANG_HEADER);
BP_SET_LSIZE(bp, lsize);
BP_SET_TYPE(bp, zp->zp_type);
BP_SET_LEVEL(bp, zp->zp_level);
BP_SET_PSIZE(bp, psize);
BP_SET_COMPRESS(bp, compress);
BP_SET_CHECKSUM(bp, zp->zp_checksum);
BP_SET_DEDUP(bp, zp->zp_dedup);
BP_SET_BYTEORDER(bp, ZFS_HOST_BYTEORDER);
if (zp->zp_dedup) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!zp->zp_encrypt ||
DMU_OT_IS_ENCRYPTED(zp->zp_type));
zio->io_pipeline = ZIO_DDT_WRITE_PIPELINE;
}
if (zp->zp_nopwrite) {
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
zio->io_pipeline |= ZIO_STAGE_NOP_WRITE;
}
}
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_free_bp_init(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio->io_child_type == ZIO_CHILD_LOGICAL) {
if (BP_GET_DEDUP(bp))
zio->io_pipeline = ZIO_DDT_FREE_PIPELINE;
}
+ ASSERT3P(zio->io_bp, ==, &zio->io_bp_copy);
+
return (ZIO_PIPELINE_CONTINUE);
}
/*
* ==========================================================================
* Execute the I/O pipeline
* ==========================================================================
*/
static void
zio_taskq_dispatch(zio_t *zio, zio_taskq_type_t q, boolean_t cutinline)
{
spa_t *spa = zio->io_spa;
zio_type_t t = zio->io_type;
int flags = (cutinline ? TQ_FRONT : 0);
/*
* If we're a config writer or a probe, the normal issue and
* interrupt threads may all be blocked waiting for the config lock.
* In this case, select the otherwise-unused taskq for ZIO_TYPE_NULL.
*/
if (zio->io_flags & (ZIO_FLAG_CONFIG_WRITER | ZIO_FLAG_PROBE))
t = ZIO_TYPE_NULL;
/*
* A similar issue exists for the L2ARC write thread until L2ARC 2.0.
*/
if (t == ZIO_TYPE_WRITE && zio->io_vd && zio->io_vd->vdev_aux)
t = ZIO_TYPE_NULL;
/*
* If this is a high priority I/O, then use the high priority taskq if
* available.
*/
if (zio->io_priority == ZIO_PRIORITY_NOW &&
spa->spa_zio_taskq[t][q + 1].stqs_count != 0)
q++;
ASSERT3U(q, <, ZIO_TASKQ_TYPES);
/*
* NB: We are assuming that the zio can only be dispatched
* to a single taskq at a time. It would be a grievous error
* to dispatch the zio to another taskq at the same time.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(spa, t, q, (task_func_t *)zio_execute, zio,
flags, &zio->io_tqent);
}
static boolean_t
zio_taskq_member(zio_t *zio, zio_taskq_type_t q)
{
kthread_t *executor = zio->io_executor;
spa_t *spa = zio->io_spa;
for (zio_type_t t = 0; t < ZIO_TYPES; t++) {
spa_taskqs_t *tqs = &spa->spa_zio_taskq[t][q];
uint_t i;
for (i = 0; i < tqs->stqs_count; i++) {
if (taskq_member(tqs->stqs_taskq[i], executor))
return (B_TRUE);
}
}
return (B_FALSE);
}
static int
zio_issue_async(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (ZIO_PIPELINE_STOP);
}
void
zio_interrupt(zio_t *zio)
{
zio_taskq_dispatch(zio, ZIO_TASKQ_INTERRUPT, B_FALSE);
}
void
zio_delay_interrupt(zio_t *zio)
{
/*
* The timeout_generic() function isn't defined in userspace, so
* rather than trying to implement the function, the zio delay
* functionality has been disabled for userspace builds.
*/
#ifdef _KERNEL
/*
* If io_target_timestamp is zero, then no delay has been registered
* for this IO, thus jump to the end of this function and "skip" the
* delay; issuing it directly to the zio layer.
*/
if (zio->io_target_timestamp != 0) {
hrtime_t now = gethrtime();
if (now >= zio->io_target_timestamp) {
/*
* This IO has already taken longer than the target
* delay to complete, so we don't want to delay it
* any longer; we "miss" the delay and issue it
* directly to the zio layer. This is likely due to
* the target latency being set to a value less than
* the underlying hardware can satisfy (e.g. delay
* set to 1ms, but the disks take 10ms to complete an
* IO request).
*/
DTRACE_PROBE2(zio__delay__miss, zio_t *, zio,
hrtime_t, now);
zio_interrupt(zio);
} else {
taskqid_t tid;
hrtime_t diff = zio->io_target_timestamp - now;
clock_t expire_at_tick = ddi_get_lbolt() +
NSEC_TO_TICK(diff);
DTRACE_PROBE3(zio__delay__hit, zio_t *, zio,
hrtime_t, now, hrtime_t, diff);
if (NSEC_TO_TICK(diff) == 0) {
/* Our delay is less than a jiffy - just spin */
zfs_sleep_until(zio->io_target_timestamp);
} else {
/*
* Use taskq_dispatch_delay() in the place of
* OpenZFS's timeout_generic().
*/
tid = taskq_dispatch_delay(system_taskq,
(task_func_t *)zio_interrupt,
zio, TQ_NOSLEEP, expire_at_tick);
if (tid == TASKQID_INVALID) {
/*
* Couldn't allocate a task. Just
* finish the zio without a delay.
*/
zio_interrupt(zio);
}
}
}
return;
}
#endif
DTRACE_PROBE1(zio__delay__skip, zio_t *, zio);
zio_interrupt(zio);
}
static void
zio_deadman_impl(zio_t *pio)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
vdev_t *vd = pio->io_vd;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf) {
vdev_queue_t *vq = &vd->vdev_queue;
zbookmark_phys_t *zb = &pio->io_bookmark;
uint64_t delta = gethrtime() - pio->io_timestamp;
uint64_t failmode = spa_get_deadman_failmode(pio->io_spa);
zfs_dbgmsg("slow zio: zio=%p timestamp=%llu "
"delta=%llu queued=%llu io=%llu "
"path=%s last=%llu "
"type=%d priority=%d flags=0x%x "
"stage=0x%x pipeline=0x%x pipeline-trace=0x%x "
"objset=%llu object=%llu level=%llu blkid=%llu "
"offset=%llu size=%llu error=%d",
pio, pio->io_timestamp,
delta, pio->io_delta, pio->io_delay,
vd->vdev_path, vq->vq_io_complete_ts,
pio->io_type, pio->io_priority, pio->io_flags,
pio->io_state, pio->io_pipeline, pio->io_pipeline_trace,
zb->zb_objset, zb->zb_object, zb->zb_level, zb->zb_blkid,
pio->io_offset, pio->io_size, pio->io_error);
zfs_ereport_post(FM_EREPORT_ZFS_DEADMAN,
pio->io_spa, vd, zb, pio, 0, 0);
if (failmode == ZIO_FAILURE_MODE_CONTINUE &&
taskq_empty_ent(&pio->io_tqent)) {
zio_interrupt(pio);
}
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_deadman_impl(cio);
}
mutex_exit(&pio->io_lock);
}
/*
* Log the critical information describing this zio and all of its children
* using the zfs_dbgmsg() interface then post deadman event for the ZED.
*/
void
zio_deadman(zio_t *pio, char *tag)
{
spa_t *spa = pio->io_spa;
char *name = spa_name(spa);
if (!zfs_deadman_enabled || spa_suspended(spa))
return;
zio_deadman_impl(pio);
switch (spa_get_deadman_failmode(spa)) {
case ZIO_FAILURE_MODE_WAIT:
zfs_dbgmsg("%s waiting for hung I/O to pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_CONTINUE:
zfs_dbgmsg("%s restarting hung I/O for pool '%s'", tag, name);
break;
case ZIO_FAILURE_MODE_PANIC:
fm_panic("%s determined I/O to pool '%s' is hung.", tag, name);
break;
}
}
/*
* Execute the I/O pipeline until one of the following occurs:
* (1) the I/O completes; (2) the pipeline stalls waiting for
* dependent child I/Os; (3) the I/O issues, so we're waiting
* for an I/O completion interrupt; (4) the I/O is delegated by
* vdev-level caching or aggregation; (5) the I/O is deferred
* due to vdev-level queueing; (6) the I/O is handed off to
* another thread. In all cases, the pipeline stops whenever
* there's no CPU work; it never burns a thread in cv_wait_io().
*
* There's no locking on io_stage because there's no legitimate way
* for multiple threads to be attempting to process the same I/O.
*/
static zio_pipe_stage_t *zio_pipeline[];
/*
* zio_execute() is a wrapper around the static function
* __zio_execute() so that we can force __zio_execute() to be
* inlined. This reduces stack overhead which is important
* because __zio_execute() is called recursively in several zio
* code paths. zio_execute() itself cannot be inlined because
* it is externally visible.
*/
void
zio_execute(zio_t *zio)
{
fstrans_cookie_t cookie;
cookie = spl_fstrans_mark();
__zio_execute(zio);
spl_fstrans_unmark(cookie);
}
/*
* Used to determine if in the current context the stack is sized large
* enough to allow zio_execute() to be called recursively. A minimum
* stack size of 16K is required to avoid needing to re-dispatch the zio.
*/
boolean_t
zio_execute_stack_check(zio_t *zio)
{
#if !defined(HAVE_LARGE_STACKS)
dsl_pool_t *dp = spa_get_dsl(zio->io_spa);
/* Executing in txg_sync_thread() context. */
if (dp && curthread == dp->dp_tx.tx_sync_thread)
return (B_TRUE);
/* Pool initialization outside of zio_taskq context. */
if (dp && spa_is_initializing(dp->dp_spa) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE) &&
!zio_taskq_member(zio, ZIO_TASKQ_ISSUE_HIGH))
return (B_TRUE);
#endif /* HAVE_LARGE_STACKS */
return (B_FALSE);
}
__attribute__((always_inline))
static inline void
__zio_execute(zio_t *zio)
{
zio->io_executor = curthread;
ASSERT3U(zio->io_queued_timestamp, >, 0);
while (zio->io_stage < ZIO_STAGE_DONE) {
enum zio_stage pipeline = zio->io_pipeline;
enum zio_stage stage = zio->io_stage;
int rv;
ASSERT(!MUTEX_HELD(&zio->io_lock));
ASSERT(ISP2(stage));
ASSERT(zio->io_stall == NULL);
do {
stage <<= 1;
} while ((stage & pipeline) == 0);
ASSERT(stage <= ZIO_STAGE_DONE);
/*
* If we are in interrupt context and this pipeline stage
* will grab a config lock that is held across I/O,
* or may wait for an I/O that needs an interrupt thread
* to complete, issue async to avoid deadlock.
*
* For VDEV_IO_START, we cut in line so that the io will
* be sent to disk promptly.
*/
if ((stage & ZIO_BLOCKING_STAGES) && zio->io_vd == NULL &&
zio_taskq_member(zio, ZIO_TASKQ_INTERRUPT)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
/*
* If the current context doesn't have large enough stacks
* the zio must be issued asynchronously to prevent overflow.
*/
if (zio_execute_stack_check(zio)) {
boolean_t cut = (stage == ZIO_STAGE_VDEV_IO_START) ?
zio_requeue_io_start_cut_in_line : B_FALSE;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, cut);
return;
}
zio->io_stage = stage;
zio->io_pipeline_trace |= zio->io_stage;
rv = zio_pipeline[highbit64(stage) - 1](zio);
if (rv == ZIO_PIPELINE_STOP)
return;
ASSERT(rv == ZIO_PIPELINE_CONTINUE);
}
}
/*
* ==========================================================================
* Initiate I/O, either sync or async
* ==========================================================================
*/
int
zio_wait(zio_t *zio)
{
long timeout = MSEC_TO_TICK(zfs_deadman_ziotime_ms);
int error;
ASSERT3S(zio->io_stage, ==, ZIO_STAGE_OPEN);
ASSERT3P(zio->io_executor, ==, NULL);
zio->io_waiter = curthread;
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
mutex_enter(&zio->io_lock);
while (zio->io_executor != NULL) {
error = cv_timedwait_io(&zio->io_cv, &zio->io_lock,
ddi_get_lbolt() + timeout);
if (zfs_deadman_enabled && error == -1 &&
gethrtime() - zio->io_queued_timestamp >
spa_deadman_ziotime(zio->io_spa)) {
mutex_exit(&zio->io_lock);
timeout = MSEC_TO_TICK(zfs_deadman_checktime_ms);
zio_deadman(zio, FTAG);
mutex_enter(&zio->io_lock);
}
}
mutex_exit(&zio->io_lock);
error = zio->io_error;
zio_destroy(zio);
return (error);
}
void
zio_nowait(zio_t *zio)
{
ASSERT3P(zio->io_executor, ==, NULL);
if (zio->io_child_type == ZIO_CHILD_LOGICAL &&
zio_unique_parent(zio) == NULL) {
zio_t *pio;
/*
* This is a logical async I/O with no parent to wait for it.
* We add it to the spa_async_root_zio "Godfather" I/O which
* will ensure they complete prior to unloading the pool.
*/
spa_t *spa = zio->io_spa;
kpreempt_disable();
pio = spa->spa_async_zio_root[CPU_SEQID];
kpreempt_enable();
zio_add_child(pio, zio);
}
ASSERT0(zio->io_queued_timestamp);
zio->io_queued_timestamp = gethrtime();
__zio_execute(zio);
}
/*
* ==========================================================================
* Reexecute, cancel, or suspend/resume failed I/O
* ==========================================================================
*/
static void
zio_reexecute(zio_t *pio)
{
zio_t *cio, *cio_next;
ASSERT(pio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(pio->io_orig_stage == ZIO_STAGE_OPEN);
ASSERT(pio->io_gang_leader == NULL);
ASSERT(pio->io_gang_tree == NULL);
pio->io_flags = pio->io_orig_flags;
pio->io_stage = pio->io_orig_stage;
pio->io_pipeline = pio->io_orig_pipeline;
pio->io_reexecute = 0;
pio->io_flags |= ZIO_FLAG_REEXECUTED;
pio->io_pipeline_trace = 0;
pio->io_error = 0;
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_state[w] = 0;
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
pio->io_child_error[c] = 0;
if (IO_IS_ALLOCATING(pio))
BP_ZERO(pio->io_bp);
/*
* As we reexecute pio's children, new children could be created.
* New children go to the head of pio's io_child_list, however,
* so we will (correctly) not reexecute them. The key is that
* the remainder of pio's io_child_list, from 'cio_next' onward,
* cannot be affected by any side effects of reexecuting 'cio'.
*/
zio_link_t *zl = NULL;
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
pio->io_children[cio->io_child_type][w]++;
mutex_exit(&pio->io_lock);
zio_reexecute(cio);
mutex_enter(&pio->io_lock);
}
mutex_exit(&pio->io_lock);
/*
* Now that all children have been reexecuted, execute the parent.
* We don't reexecute "The Godfather" I/O here as it's the
* responsibility of the caller to wait on it.
*/
if (!(pio->io_flags & ZIO_FLAG_GODFATHER)) {
pio->io_queued_timestamp = gethrtime();
__zio_execute(pio);
}
}
void
zio_suspend(spa_t *spa, zio_t *zio, zio_suspend_reason_t reason)
{
if (spa_get_failmode(spa) == ZIO_FAILURE_MODE_PANIC)
fm_panic("Pool '%s' has encountered an uncorrectable I/O "
"failure and the failure mode property for this pool "
"is set to panic.", spa_name(spa));
cmn_err(CE_WARN, "Pool '%s' has encountered an uncorrectable I/O "
"failure and has been suspended.\n", spa_name(spa));
zfs_ereport_post(FM_EREPORT_ZFS_IO_FAILURE, spa, NULL,
NULL, NULL, 0, 0);
mutex_enter(&spa->spa_suspend_lock);
if (spa->spa_suspend_zio_root == NULL)
spa->spa_suspend_zio_root = zio_root(spa, NULL, NULL,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_GODFATHER);
spa->spa_suspended = reason;
if (zio != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
ASSERT(zio != spa->spa_suspend_zio_root);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ASSERT(zio_unique_parent(zio) == NULL);
ASSERT(zio->io_stage == ZIO_STAGE_DONE);
zio_add_child(spa->spa_suspend_zio_root, zio);
}
mutex_exit(&spa->spa_suspend_lock);
}
int
zio_resume(spa_t *spa)
{
zio_t *pio;
/*
* Reexecute all previously suspended i/o.
*/
mutex_enter(&spa->spa_suspend_lock);
spa->spa_suspended = ZIO_SUSPEND_NONE;
cv_broadcast(&spa->spa_suspend_cv);
pio = spa->spa_suspend_zio_root;
spa->spa_suspend_zio_root = NULL;
mutex_exit(&spa->spa_suspend_lock);
if (pio == NULL)
return (0);
zio_reexecute(pio);
return (zio_wait(pio));
}
void
zio_resume_wait(spa_t *spa)
{
mutex_enter(&spa->spa_suspend_lock);
while (spa_suspended(spa))
cv_wait(&spa->spa_suspend_cv, &spa->spa_suspend_lock);
mutex_exit(&spa->spa_suspend_lock);
}
/*
* ==========================================================================
* Gang blocks.
*
* A gang block is a collection of small blocks that looks to the DMU
* like one large block. When zio_dva_allocate() cannot find a block
* of the requested size, due to either severe fragmentation or the pool
* being nearly full, it calls zio_write_gang_block() to construct the
* block from smaller fragments.
*
* A gang block consists of a gang header (zio_gbh_phys_t) and up to
* three (SPA_GBH_NBLKPTRS) gang members. The gang header is just like
* an indirect block: it's an array of block pointers. It consumes
* only one sector and hence is allocatable regardless of fragmentation.
* The gang header's bps point to its gang members, which hold the data.
*
* Gang blocks are self-checksumming, using the bp's <vdev, offset, txg>
* as the verifier to ensure uniqueness of the SHA256 checksum.
* Critically, the gang block bp's blk_cksum is the checksum of the data,
* not the gang header. This ensures that data block signatures (needed for
* deduplication) are independent of how the block is physically stored.
*
* Gang blocks can be nested: a gang member may itself be a gang block.
* Thus every gang block is a tree in which root and all interior nodes are
* gang headers, and the leaves are normal blocks that contain user data.
* The root of the gang tree is called the gang leader.
*
* To perform any operation (read, rewrite, free, claim) on a gang block,
* zio_gang_assemble() first assembles the gang tree (minus data leaves)
* in the io_gang_tree field of the original logical i/o by recursively
* reading the gang leader and all gang headers below it. This yields
* an in-core tree containing the contents of every gang header and the
* bps for every constituent of the gang block.
*
* With the gang tree now assembled, zio_gang_issue() just walks the gang tree
* and invokes a callback on each bp. To free a gang block, zio_gang_issue()
* calls zio_free_gang() -- a trivial wrapper around zio_free() -- for each bp.
* zio_claim_gang() provides a similarly trivial wrapper for zio_claim().
* zio_read_gang() is a wrapper around zio_read() that omits reading gang
* headers, since we already have those in io_gang_tree. zio_rewrite_gang()
* performs a zio_rewrite() of the data or, for gang headers, a zio_rewrite()
* of the gang header plus zio_checksum_compute() of the data to update the
* gang header's blk_cksum as described above.
*
* The two-phase assemble/issue model solves the problem of partial failure --
* what if you'd freed part of a gang block but then couldn't read the
* gang header for another part? Assembling the entire gang tree first
* ensures that all the necessary gang header I/O has succeeded before
* starting the actual work of free, claim, or write. Once the gang tree
* is assembled, free and claim are in-memory operations that cannot fail.
*
* In the event that a gang write fails, zio_dva_unallocate() walks the
* gang tree to immediately free (i.e. insert back into the space map)
* everything we've allocated. This ensures that we don't get ENOSPC
* errors during repeated suspend/resume cycles due to a flaky device.
*
* Gang rewrites only happen during sync-to-convergence. If we can't assemble
* the gang tree, we won't modify the block, so we can safely defer the free
* (knowing that the block is still intact). If we *can* assemble the gang
* tree, then even if some of the rewrites fail, zio_dva_unallocate() will free
* each constituent bp and we can allocate a new block on the next sync pass.
*
* In all cases, the gang tree allows complete recovery from partial failure.
* ==========================================================================
*/
static void
zio_gang_issue_func_done(zio_t *zio)
{
abd_put(zio->io_abd);
}
static zio_t *
zio_read_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
if (gn != NULL)
return (pio);
return (zio_read(pio, pio->io_spa, bp, abd_get_offset(data, offset),
BP_GET_PSIZE(bp), zio_gang_issue_func_done,
NULL, pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark));
}
static zio_t *
zio_rewrite_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
zio_t *zio;
if (gn != NULL) {
abd_t *gbh_abd =
abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
gbh_abd, SPA_GANGBLOCKSIZE, zio_gang_issue_func_done, NULL,
pio->io_priority, ZIO_GANG_CHILD_FLAGS(pio),
&pio->io_bookmark);
/*
* As we rewrite each gang header, the pipeline will compute
* a new gang block header checksum for it; but no one will
* compute a new data checksum, so we do that here. The one
* exception is the gang leader: the pipeline already computed
* its data checksum because that stage precedes gang assembly.
* (Presently, nothing actually uses interior data checksums;
* this is just good hygiene.)
*/
if (gn != pio->io_gang_leader->io_gang_tree) {
abd_t *buf = abd_get_offset(data, offset);
zio_checksum_compute(zio, BP_GET_CHECKSUM(bp),
buf, BP_GET_PSIZE(bp));
abd_put(buf);
}
/*
* If we are here to damage data for testing purposes,
* leave the GBH alone so that we can detect the damage.
*/
if (pio->io_gang_leader->io_flags & ZIO_FLAG_INDUCE_DAMAGE)
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
} else {
zio = zio_rewrite(pio, pio->io_spa, pio->io_txg, bp,
abd_get_offset(data, offset), BP_GET_PSIZE(bp),
zio_gang_issue_func_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
}
return (zio);
}
/* ARGSUSED */
static zio_t *
zio_free_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
return (zio_free_sync(pio, pio->io_spa, pio->io_txg, bp,
ZIO_GANG_CHILD_FLAGS(pio)));
}
/* ARGSUSED */
static zio_t *
zio_claim_gang(zio_t *pio, blkptr_t *bp, zio_gang_node_t *gn, abd_t *data,
uint64_t offset)
{
return (zio_claim(pio, pio->io_spa, pio->io_txg, bp,
NULL, NULL, ZIO_GANG_CHILD_FLAGS(pio)));
}
static zio_gang_issue_func_t *zio_gang_issue_func[ZIO_TYPES] = {
NULL,
zio_read_gang,
zio_rewrite_gang,
zio_free_gang,
zio_claim_gang,
NULL
};
static void zio_gang_tree_assemble_done(zio_t *zio);
static zio_gang_node_t *
zio_gang_node_alloc(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn;
ASSERT(*gnpp == NULL);
gn = kmem_zalloc(sizeof (*gn), KM_SLEEP);
gn->gn_gbh = zio_buf_alloc(SPA_GANGBLOCKSIZE);
*gnpp = gn;
return (gn);
}
static void
zio_gang_node_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
ASSERT(gn->gn_child[g] == NULL);
zio_buf_free(gn->gn_gbh, SPA_GANGBLOCKSIZE);
kmem_free(gn, sizeof (*gn));
*gnpp = NULL;
}
static void
zio_gang_tree_free(zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = *gnpp;
if (gn == NULL)
return;
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++)
zio_gang_tree_free(&gn->gn_child[g]);
zio_gang_node_free(gnpp);
}
static void
zio_gang_tree_assemble(zio_t *gio, blkptr_t *bp, zio_gang_node_t **gnpp)
{
zio_gang_node_t *gn = zio_gang_node_alloc(gnpp);
abd_t *gbh_abd = abd_get_from_buf(gn->gn_gbh, SPA_GANGBLOCKSIZE);
ASSERT(gio->io_gang_leader == gio);
ASSERT(BP_IS_GANG(bp));
zio_nowait(zio_read(gio, gio->io_spa, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_gang_tree_assemble_done, gn, gio->io_priority,
ZIO_GANG_CHILD_FLAGS(gio), &gio->io_bookmark));
}
static void
zio_gang_tree_assemble_done(zio_t *zio)
{
zio_t *gio = zio->io_gang_leader;
zio_gang_node_t *gn = zio->io_private;
blkptr_t *bp = zio->io_bp;
ASSERT(gio == zio_unique_parent(zio));
ASSERT(zio->io_child_count == 0);
if (zio->io_error)
return;
/* this ABD was created from a linear buf in zio_gang_tree_assemble */
if (BP_SHOULD_BYTESWAP(bp))
byteswap_uint64_array(abd_to_buf(zio->io_abd), zio->io_size);
ASSERT3P(abd_to_buf(zio->io_abd), ==, gn->gn_gbh);
ASSERT(zio->io_size == SPA_GANGBLOCKSIZE);
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
abd_put(zio->io_abd);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (!BP_IS_GANG(gbp))
continue;
zio_gang_tree_assemble(gio, gbp, &gn->gn_child[g]);
}
}
static void
zio_gang_tree_issue(zio_t *pio, zio_gang_node_t *gn, blkptr_t *bp, abd_t *data,
uint64_t offset)
{
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
ASSERT(BP_IS_GANG(bp) == !!gn);
ASSERT(BP_GET_CHECKSUM(bp) == BP_GET_CHECKSUM(gio->io_bp));
ASSERT(BP_GET_LSIZE(bp) == BP_GET_PSIZE(bp) || gn == gio->io_gang_tree);
/*
* If you're a gang header, your data is in gn->gn_gbh.
* If you're a gang member, your data is in 'data' and gn == NULL.
*/
zio = zio_gang_issue_func[gio->io_type](pio, bp, gn, data, offset);
if (gn != NULL) {
ASSERT(gn->gn_gbh->zg_tail.zec_magic == ZEC_MAGIC);
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
blkptr_t *gbp = &gn->gn_gbh->zg_blkptr[g];
if (BP_IS_HOLE(gbp))
continue;
zio_gang_tree_issue(zio, gn->gn_child[g], gbp, data,
offset);
offset += BP_GET_PSIZE(gbp);
}
}
if (gn == gio->io_gang_tree)
ASSERT3U(gio->io_size, ==, offset);
if (zio != pio)
zio_nowait(zio);
}
static int
zio_gang_assemble(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == NULL);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
zio_gang_tree_assemble(zio, bp, &zio->io_gang_tree);
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_gang_issue(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
}
ASSERT(BP_IS_GANG(bp) && zio->io_gang_leader == zio);
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
if (zio->io_child_error[ZIO_CHILD_GANG] == 0)
zio_gang_tree_issue(zio, zio->io_gang_tree, bp, zio->io_abd,
0);
else
zio_gang_tree_free(&zio->io_gang_tree);
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
return (ZIO_PIPELINE_CONTINUE);
}
static void
zio_write_gang_member_ready(zio_t *zio)
{
zio_t *pio = zio_unique_parent(zio);
dva_t *cdva = zio->io_bp->blk_dva;
dva_t *pdva = pio->io_bp->blk_dva;
uint64_t asize;
ASSERTV(zio_t *gio = zio->io_gang_leader);
if (BP_IS_HOLE(zio->io_bp))
return;
ASSERT(BP_IS_HOLE(&zio->io_bp_orig));
ASSERT(zio->io_child_type == ZIO_CHILD_GANG);
ASSERT3U(zio->io_prop.zp_copies, ==, gio->io_prop.zp_copies);
ASSERT3U(zio->io_prop.zp_copies, <=, BP_GET_NDVAS(zio->io_bp));
ASSERT3U(pio->io_prop.zp_copies, <=, BP_GET_NDVAS(pio->io_bp));
ASSERT3U(BP_GET_NDVAS(zio->io_bp), <=, BP_GET_NDVAS(pio->io_bp));
mutex_enter(&pio->io_lock);
for (int d = 0; d < BP_GET_NDVAS(zio->io_bp); d++) {
ASSERT(DVA_GET_GANG(&pdva[d]));
asize = DVA_GET_ASIZE(&pdva[d]);
asize += DVA_GET_ASIZE(&cdva[d]);
DVA_SET_ASIZE(&pdva[d], asize);
}
mutex_exit(&pio->io_lock);
}
static void
zio_write_gang_done(zio_t *zio)
{
abd_put(zio->io_abd);
}
static int
zio_write_gang_block(zio_t *pio)
{
spa_t *spa = pio->io_spa;
metaslab_class_t *mc = spa_normal_class(spa);
blkptr_t *bp = pio->io_bp;
zio_t *gio = pio->io_gang_leader;
zio_t *zio;
zio_gang_node_t *gn, **gnpp;
zio_gbh_phys_t *gbh;
abd_t *gbh_abd;
uint64_t txg = pio->io_txg;
uint64_t resid = pio->io_size;
uint64_t lsize;
int copies = gio->io_prop.zp_copies;
int gbh_copies;
zio_prop_t zp;
int error;
/*
* encrypted blocks need DVA[2] free so encrypted gang headers can't
* have a third copy.
*/
gbh_copies = MIN(copies + 1, spa_max_replication(spa));
if (gio->io_prop.zp_encrypt && gbh_copies >= SPA_DVAS_PER_BP)
gbh_copies = SPA_DVAS_PER_BP - 1;
int flags = METASLAB_HINTBP_FAVOR | METASLAB_GANG_HEADER;
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
flags |= METASLAB_ASYNC_ALLOC;
VERIFY(refcount_held(&mc->mc_alloc_slots, pio));
/*
* The logical zio has already placed a reservation for
* 'copies' allocation slots but gang blocks may require
* additional copies. These additional copies
* (i.e. gbh_copies - copies) are guaranteed to succeed
* since metaslab_class_throttle_reserve() always allows
* additional reservations for gang blocks.
*/
VERIFY(metaslab_class_throttle_reserve(mc, gbh_copies - copies,
pio, flags));
}
error = metaslab_alloc(spa, mc, SPA_GANGBLOCKSIZE,
bp, gbh_copies, txg, pio == gio ? NULL : gio->io_bp, flags,
&pio->io_alloc_list, pio);
if (error) {
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
/*
* If we failed to allocate the gang block header then
* we remove any additional allocation reservations that
* we placed here. The original reservation will
* be removed when the logical I/O goes to the ready
* stage.
*/
metaslab_class_throttle_unreserve(mc,
gbh_copies - copies, pio);
}
pio->io_error = error;
return (ZIO_PIPELINE_CONTINUE);
}
if (pio == gio) {
gnpp = &gio->io_gang_tree;
} else {
gnpp = pio->io_private;
ASSERT(pio->io_ready == zio_write_gang_member_ready);
}
gn = zio_gang_node_alloc(gnpp);
gbh = gn->gn_gbh;
bzero(gbh, SPA_GANGBLOCKSIZE);
gbh_abd = abd_get_from_buf(gbh, SPA_GANGBLOCKSIZE);
/*
* Create the gang header.
*/
zio = zio_rewrite(pio, spa, txg, bp, gbh_abd, SPA_GANGBLOCKSIZE,
zio_write_gang_done, NULL, pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
/*
* Create and nowait the gang children.
*/
for (int g = 0; resid != 0; resid -= lsize, g++) {
lsize = P2ROUNDUP(resid / (SPA_GBH_NBLKPTRS - g),
SPA_MINBLOCKSIZE);
ASSERT(lsize >= SPA_MINBLOCKSIZE && lsize <= resid);
zp.zp_checksum = gio->io_prop.zp_checksum;
zp.zp_compress = ZIO_COMPRESS_OFF;
zp.zp_type = DMU_OT_NONE;
zp.zp_level = 0;
zp.zp_copies = gio->io_prop.zp_copies;
zp.zp_dedup = B_FALSE;
zp.zp_dedup_verify = B_FALSE;
zp.zp_nopwrite = B_FALSE;
zp.zp_encrypt = gio->io_prop.zp_encrypt;
zp.zp_byteorder = gio->io_prop.zp_byteorder;
bzero(zp.zp_salt, ZIO_DATA_SALT_LEN);
bzero(zp.zp_iv, ZIO_DATA_IV_LEN);
bzero(zp.zp_mac, ZIO_DATA_MAC_LEN);
zio_t *cio = zio_write(zio, spa, txg, &gbh->zg_blkptr[g],
abd_get_offset(pio->io_abd, pio->io_size - resid), lsize,
lsize, &zp, zio_write_gang_member_ready, NULL, NULL,
zio_write_gang_done, &gn->gn_child[g], pio->io_priority,
ZIO_GANG_CHILD_FLAGS(pio), &pio->io_bookmark);
if (pio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(pio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(!(pio->io_flags & ZIO_FLAG_NODATA));
/*
* Gang children won't throttle but we should
* account for their work, so reserve an allocation
* slot for them here.
*/
VERIFY(metaslab_class_throttle_reserve(mc,
zp.zp_copies, cio, flags));
}
zio_nowait(cio);
}
/*
* Set pio's pipeline to just wait for zio to finish.
*/
pio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
/*
* We didn't allocate this bp, so make sure it doesn't get unmarked.
*/
pio->io_flags &= ~ZIO_FLAG_FASTWRITE;
zio_nowait(zio);
return (ZIO_PIPELINE_CONTINUE);
}
/*
* The zio_nop_write stage in the pipeline determines if allocating a
* new bp is necessary. The nopwrite feature can handle writes in
* either syncing or open context (i.e. zil writes) and as a result is
* mutually exclusive with dedup.
*
* By leveraging a cryptographically secure checksum, such as SHA256, we
* can compare the checksums of the new data and the old to determine if
* allocating a new block is required. Note that our requirements for
* cryptographic strength are fairly weak: there can't be any accidental
* hash collisions, but we don't need to be secure against intentional
* (malicious) collisions. To trigger a nopwrite, you have to be able
* to write the file to begin with, and triggering an incorrect (hash
* collision) nopwrite is no worse than simply writing to the file.
* That said, there are no known attacks against the checksum algorithms
* used for nopwrite, assuming that the salt and the checksums
* themselves remain secret.
*/
static int
zio_nop_write(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
blkptr_t *bp_orig = &zio->io_bp_orig;
zio_prop_t *zp = &zio->io_prop;
ASSERT(BP_GET_LEVEL(bp) == 0);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(zp->zp_nopwrite);
ASSERT(!zp->zp_dedup);
ASSERT(zio->io_bp_override == NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Check to see if the original bp and the new bp have matching
* characteristics (i.e. same checksum, compression algorithms, etc).
* If they don't then just continue with the pipeline which will
* allocate a new bp.
*/
if (BP_IS_HOLE(bp_orig) ||
!(zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE) ||
BP_IS_ENCRYPTED(bp) || BP_IS_ENCRYPTED(bp_orig) ||
BP_GET_CHECKSUM(bp) != BP_GET_CHECKSUM(bp_orig) ||
BP_GET_COMPRESS(bp) != BP_GET_COMPRESS(bp_orig) ||
BP_GET_DEDUP(bp) != BP_GET_DEDUP(bp_orig) ||
zp->zp_copies != BP_GET_NDVAS(bp_orig))
return (ZIO_PIPELINE_CONTINUE);
/*
* If the checksums match then reset the pipeline so that we
* avoid allocating a new bp and issuing any I/O.
*/
if (ZIO_CHECKSUM_EQUAL(bp->blk_cksum, bp_orig->blk_cksum)) {
ASSERT(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_NOPWRITE);
ASSERT3U(BP_GET_PSIZE(bp), ==, BP_GET_PSIZE(bp_orig));
ASSERT3U(BP_GET_LSIZE(bp), ==, BP_GET_LSIZE(bp_orig));
ASSERT(zp->zp_compress != ZIO_COMPRESS_OFF);
ASSERT(bcmp(&bp->blk_prop, &bp_orig->blk_prop,
sizeof (uint64_t)) == 0);
*bp = *bp_orig;
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
zio->io_flags |= ZIO_FLAG_NOPWRITE;
}
return (ZIO_PIPELINE_CONTINUE);
}
/*
* ==========================================================================
* Dedup
* ==========================================================================
*/
static void
zio_ddt_child_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp;
zio_t *pio = zio_unique_parent(zio);
mutex_enter(&pio->io_lock);
ddp = ddt_phys_select(dde, bp);
if (zio->io_error == 0)
ddt_phys_clear(ddp); /* this ddp doesn't need repair */
if (zio->io_error == 0 && dde->dde_repair_abd == NULL)
dde->dde_repair_abd = zio->io_abd;
else
abd_free(zio->io_abd);
mutex_exit(&pio->io_lock);
}
static int
zio_ddt_read_start(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = ddt_repair_start(ddt, bp);
ddt_phys_t *ddp = dde->dde_phys;
ddt_phys_t *ddp_self = ddt_phys_select(dde, bp);
blkptr_t blk;
ASSERT(zio->io_vsd == NULL);
zio->io_vsd = dde;
if (ddp_self == NULL)
return (ZIO_PIPELINE_CONTINUE);
for (int p = 0; p < DDT_PHYS_TYPES; p++, ddp++) {
if (ddp->ddp_phys_birth == 0 || ddp == ddp_self)
continue;
ddt_bp_create(ddt->ddt_checksum, &dde->dde_key, ddp,
&blk);
zio_nowait(zio_read(zio, zio->io_spa, &blk,
abd_alloc_for_io(zio->io_size, B_TRUE),
zio->io_size, zio_ddt_child_read_done, dde,
zio->io_priority, ZIO_DDT_CHILD_FLAGS(zio) |
ZIO_FLAG_DONT_PROPAGATE, &zio->io_bookmark));
}
return (ZIO_PIPELINE_CONTINUE);
}
zio_nowait(zio_read(zio, zio->io_spa, bp,
zio->io_abd, zio->io_size, NULL, NULL, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark));
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_ddt_read_done(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
if (zio_wait_for_children(zio, ZIO_CHILD_DDT_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
}
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_PSIZE(bp) == zio->io_size);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (zio->io_child_error[ZIO_CHILD_DDT]) {
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_vsd;
if (ddt == NULL) {
ASSERT(spa_load_state(zio->io_spa) != SPA_LOAD_NONE);
return (ZIO_PIPELINE_CONTINUE);
}
if (dde == NULL) {
zio->io_stage = ZIO_STAGE_DDT_READ_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_FALSE);
return (ZIO_PIPELINE_STOP);
}
if (dde->dde_repair_abd != NULL) {
abd_copy(zio->io_abd, dde->dde_repair_abd,
zio->io_size);
zio->io_child_error[ZIO_CHILD_DDT] = 0;
}
ddt_repair_done(ddt, dde);
zio->io_vsd = NULL;
}
ASSERT(zio->io_vsd == NULL);
return (ZIO_PIPELINE_CONTINUE);
}
static boolean_t
zio_ddt_collision(zio_t *zio, ddt_t *ddt, ddt_entry_t *dde)
{
spa_t *spa = zio->io_spa;
boolean_t do_raw = !!(zio->io_flags & ZIO_FLAG_RAW);
ASSERT(!(zio->io_bp_override && do_raw));
/*
* Note: we compare the original data, not the transformed data,
* because when zio->io_bp is an override bp, we will not have
* pushed the I/O transforms. That's an important optimization
* because otherwise we'd compress/encrypt all dmu_sync() data twice.
* However, we should never get a raw, override zio so in these
* cases we can compare the io_abd directly. This is useful because
* it allows us to do dedup verification even if we don't have access
* to the original data (for instance, if the encryption keys aren't
* loaded).
*/
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
zio_t *lio = dde->dde_lead_zio[p];
if (lio != NULL && do_raw) {
return (lio->io_size != zio->io_size ||
abd_cmp(zio->io_abd, lio->io_abd) != 0);
} else if (lio != NULL) {
return (lio->io_orig_size != zio->io_orig_size ||
abd_cmp(zio->io_orig_abd, lio->io_orig_abd) != 0);
}
}
for (int p = DDT_PHYS_SINGLE; p <= DDT_PHYS_TRIPLE; p++) {
ddt_phys_t *ddp = &dde->dde_phys[p];
if (ddp->ddp_phys_birth != 0 && do_raw) {
blkptr_t blk = *zio->io_bp;
uint64_t psize;
abd_t *tmpabd;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
psize = BP_GET_PSIZE(&blk);
if (psize != zio->io_size)
return (B_TRUE);
ddt_exit(ddt);
tmpabd = abd_alloc_for_io(psize, B_TRUE);
error = zio_wait(zio_read(NULL, spa, &blk, tmpabd,
psize, NULL, NULL, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE |
ZIO_FLAG_RAW, &zio->io_bookmark));
if (error == 0) {
if (abd_cmp(tmpabd, zio->io_abd) != 0)
error = SET_ERROR(ENOENT);
}
abd_free(tmpabd);
ddt_enter(ddt);
return (error != 0);
} else if (ddp->ddp_phys_birth != 0) {
arc_buf_t *abuf = NULL;
arc_flags_t aflags = ARC_FLAG_WAIT;
blkptr_t blk = *zio->io_bp;
int error;
ddt_bp_fill(ddp, &blk, ddp->ddp_phys_birth);
if (BP_GET_LSIZE(&blk) != zio->io_orig_size)
return (B_TRUE);
ddt_exit(ddt);
error = arc_read(NULL, spa, &blk,
arc_getbuf_func, &abuf, ZIO_PRIORITY_SYNC_READ,
ZIO_FLAG_CANFAIL | ZIO_FLAG_SPECULATIVE,
&aflags, &zio->io_bookmark);
if (error == 0) {
if (abd_cmp_buf(zio->io_orig_abd, abuf->b_data,
zio->io_orig_size) != 0)
error = SET_ERROR(ENOENT);
arc_buf_destroy(abuf, &abuf);
}
ddt_enter(ddt);
return (error != 0);
}
}
return (B_FALSE);
}
static void
zio_ddt_child_write_ready(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
zio_t *pio;
if (zio->io_error)
return;
ddt_enter(ddt);
ASSERT(dde->dde_lead_zio[p] == zio);
ddt_phys_fill(ddp, zio->io_bp);
zio_link_t *zl = NULL;
while ((pio = zio_walk_parents(zio, &zl)) != NULL)
ddt_bp_fill(ddp, pio->io_bp, zio->io_txg);
ddt_exit(ddt);
}
static void
zio_ddt_child_write_done(zio_t *zio)
{
int p = zio->io_prop.zp_copies;
ddt_t *ddt = ddt_select(zio->io_spa, zio->io_bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_enter(ddt);
ASSERT(ddp->ddp_refcnt == 0);
ASSERT(dde->dde_lead_zio[p] == zio);
dde->dde_lead_zio[p] = NULL;
if (zio->io_error == 0) {
zio_link_t *zl = NULL;
while (zio_walk_parents(zio, &zl) != NULL)
ddt_phys_addref(ddp);
} else {
ddt_phys_clear(ddp);
}
ddt_exit(ddt);
}
static void
zio_ddt_ditto_write_done(zio_t *zio)
{
int p = DDT_PHYS_DITTO;
ASSERTV(zio_prop_t *zp = &zio->io_prop);
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(zio->io_spa, bp);
ddt_entry_t *dde = zio->io_private;
ddt_phys_t *ddp = &dde->dde_phys[p];
ddt_key_t *ddk = &dde->dde_key;
ddt_enter(ddt);
ASSERT(ddp->ddp_refcnt == 0);
ASSERT(dde->dde_lead_zio[p] == zio);
dde->dde_lead_zio[p] = NULL;
if (zio->io_error == 0) {
ASSERT(ZIO_CHECKSUM_EQUAL(bp->blk_cksum, ddk->ddk_cksum));
ASSERT(zp->zp_copies < SPA_DVAS_PER_BP);
ASSERT(zp->zp_copies == BP_GET_NDVAS(bp) - BP_IS_GANG(bp));
if (ddp->ddp_phys_birth != 0)
ddt_phys_free(ddt, ddk, ddp, zio->io_txg);
ddt_phys_fill(ddp, bp);
}
ddt_exit(ddt);
}
static int
zio_ddt_write(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t txg = zio->io_txg;
zio_prop_t *zp = &zio->io_prop;
int p = zp->zp_copies;
int ditto_copies;
zio_t *cio = NULL;
zio_t *dio = NULL;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(BP_GET_CHECKSUM(bp) == zp->zp_checksum);
ASSERT(BP_IS_HOLE(bp) || zio->io_bp_override);
ASSERT(!(zio->io_bp_override && (zio->io_flags & ZIO_FLAG_RAW)));
ddt_enter(ddt);
dde = ddt_lookup(ddt, bp, B_TRUE);
ddp = &dde->dde_phys[p];
if (zp->zp_dedup_verify && zio_ddt_collision(zio, ddt, dde)) {
/*
* If we're using a weak checksum, upgrade to a strong checksum
* and try again. If we're already using a strong checksum,
* we can't resolve it, so just convert to an ordinary write.
* (And automatically e-mail a paper to Nature?)
*/
if (!(zio_checksum_table[zp->zp_checksum].ci_flags &
ZCHECKSUM_FLAG_DEDUP)) {
zp->zp_checksum = spa_dedup_checksum(spa);
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
BP_ZERO(bp);
} else {
zp->zp_dedup = B_FALSE;
}
zio->io_pipeline = ZIO_WRITE_PIPELINE;
ddt_exit(ddt);
return (ZIO_PIPELINE_CONTINUE);
}
ditto_copies = ddt_ditto_copies_needed(ddt, dde, ddp);
ASSERT(ditto_copies < SPA_DVAS_PER_BP);
if (ditto_copies > ddt_ditto_copies_present(dde) &&
dde->dde_lead_zio[DDT_PHYS_DITTO] == NULL) {
zio_prop_t czp = *zp;
czp.zp_copies = ditto_copies;
/*
* If we arrived here with an override bp, we won't have run
* the transform stack, so we won't have the data we need to
* generate a child i/o. So, toss the override bp and restart.
* This is safe, because using the override bp is just an
* optimization; and it's rare, so the cost doesn't matter.
*/
if (zio->io_bp_override) {
zio_pop_transforms(zio);
zio->io_stage = ZIO_STAGE_OPEN;
zio->io_pipeline = ZIO_WRITE_PIPELINE;
zio->io_bp_override = NULL;
BP_ZERO(bp);
ddt_exit(ddt);
return (ZIO_PIPELINE_CONTINUE);
}
dio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, &czp, NULL, NULL,
NULL, zio_ddt_ditto_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(dio, zio->io_abd, zio->io_size, 0, NULL);
dde->dde_lead_zio[DDT_PHYS_DITTO] = dio;
}
if (ddp->ddp_phys_birth != 0 || dde->dde_lead_zio[p] != NULL) {
if (ddp->ddp_phys_birth != 0)
ddt_bp_fill(ddp, bp, txg);
if (dde->dde_lead_zio[p] != NULL)
zio_add_child(zio, dde->dde_lead_zio[p]);
else
ddt_phys_addref(ddp);
} else if (zio->io_bp_override) {
ASSERT(bp->blk_birth == txg);
ASSERT(BP_EQUAL(bp, zio->io_bp_override));
ddt_phys_fill(ddp, bp);
ddt_phys_addref(ddp);
} else {
cio = zio_write(zio, spa, txg, bp, zio->io_orig_abd,
zio->io_orig_size, zio->io_orig_size, zp,
zio_ddt_child_write_ready, NULL, NULL,
zio_ddt_child_write_done, dde, zio->io_priority,
ZIO_DDT_CHILD_FLAGS(zio), &zio->io_bookmark);
zio_push_transform(cio, zio->io_abd, zio->io_size, 0, NULL);
dde->dde_lead_zio[p] = cio;
}
ddt_exit(ddt);
if (cio)
zio_nowait(cio);
if (dio)
zio_nowait(dio);
return (ZIO_PIPELINE_CONTINUE);
}
ddt_entry_t *freedde; /* for debugging */
static int
zio_ddt_free(zio_t *zio)
{
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
ddt_t *ddt = ddt_select(spa, bp);
ddt_entry_t *dde;
ddt_phys_t *ddp;
ASSERT(BP_GET_DEDUP(bp));
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
ddt_enter(ddt);
freedde = dde = ddt_lookup(ddt, bp, B_TRUE);
if (dde) {
ddp = ddt_phys_select(dde, bp);
if (ddp)
ddt_phys_decref(ddp);
}
ddt_exit(ddt);
return (ZIO_PIPELINE_CONTINUE);
}
/*
* ==========================================================================
* Allocate and free blocks
* ==========================================================================
*/
static zio_t *
zio_io_to_allocate(spa_t *spa)
{
zio_t *zio;
ASSERT(MUTEX_HELD(&spa->spa_alloc_lock));
zio = avl_first(&spa->spa_alloc_tree);
if (zio == NULL)
return (NULL);
ASSERT(IO_IS_ALLOCATING(zio));
/*
* Try to place a reservation for this zio. If we're unable to
* reserve then we throttle.
*/
if (!metaslab_class_throttle_reserve(spa_normal_class(spa),
zio->io_prop.zp_copies, zio, 0)) {
return (NULL);
}
avl_remove(&spa->spa_alloc_tree, zio);
ASSERT3U(zio->io_stage, <, ZIO_STAGE_DVA_ALLOCATE);
return (zio);
}
static int
zio_dva_throttle(zio_t *zio)
{
spa_t *spa = zio->io_spa;
zio_t *nio;
if (zio->io_priority == ZIO_PRIORITY_SYNC_WRITE ||
!spa_normal_class(zio->io_spa)->mc_alloc_throttle_enabled ||
zio->io_child_type == ZIO_CHILD_GANG ||
zio->io_flags & ZIO_FLAG_NODATA) {
return (ZIO_PIPELINE_CONTINUE);
}
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
ASSERT3U(zio->io_queued_timestamp, >, 0);
ASSERT(zio->io_stage == ZIO_STAGE_DVA_THROTTLE);
mutex_enter(&spa->spa_alloc_lock);
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
avl_add(&spa->spa_alloc_tree, zio);
nio = zio_io_to_allocate(zio->io_spa);
mutex_exit(&spa->spa_alloc_lock);
if (nio == zio)
return (ZIO_PIPELINE_CONTINUE);
if (nio != NULL) {
ASSERT(nio->io_stage == ZIO_STAGE_DVA_THROTTLE);
/*
* We are passing control to a new zio so make sure that
* it is processed by a different thread. We do this to
* avoid stack overflows that can occur when parents are
* throttled and children are making progress. We allow
* it to go to the head of the taskq since it's already
* been waiting.
*/
zio_taskq_dispatch(nio, ZIO_TASKQ_ISSUE, B_TRUE);
}
return (ZIO_PIPELINE_STOP);
}
void
zio_allocate_dispatch(spa_t *spa)
{
zio_t *zio;
mutex_enter(&spa->spa_alloc_lock);
zio = zio_io_to_allocate(spa);
mutex_exit(&spa->spa_alloc_lock);
if (zio == NULL)
return;
ASSERT3U(zio->io_stage, ==, ZIO_STAGE_DVA_THROTTLE);
ASSERT0(zio->io_error);
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE, B_TRUE);
}
static int
zio_dva_allocate(zio_t *zio)
{
spa_t *spa = zio->io_spa;
metaslab_class_t *mc = spa_normal_class(spa);
blkptr_t *bp = zio->io_bp;
int error;
int flags = 0;
if (zio->io_gang_leader == NULL) {
ASSERT(zio->io_child_type > ZIO_CHILD_GANG);
zio->io_gang_leader = zio;
}
ASSERT(BP_IS_HOLE(bp));
ASSERT0(BP_GET_NDVAS(bp));
ASSERT3U(zio->io_prop.zp_copies, >, 0);
ASSERT3U(zio->io_prop.zp_copies, <=, spa_max_replication(spa));
ASSERT3U(zio->io_size, ==, BP_GET_PSIZE(bp));
flags |= (zio->io_flags & ZIO_FLAG_FASTWRITE) ? METASLAB_FASTWRITE : 0;
if (zio->io_flags & ZIO_FLAG_NODATA)
flags |= METASLAB_DONT_THROTTLE;
if (zio->io_flags & ZIO_FLAG_GANG_CHILD)
flags |= METASLAB_GANG_CHILD;
if (zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE)
flags |= METASLAB_ASYNC_ALLOC;
error = metaslab_alloc(spa, mc, zio->io_size, bp,
zio->io_prop.zp_copies, zio->io_txg, NULL, flags,
&zio->io_alloc_list, zio);
if (error != 0) {
spa_dbgmsg(spa, "%s: metaslab allocation failure: zio %p, "
"size %llu, error %d", spa_name(spa), zio, zio->io_size,
error);
if (error == ENOSPC && zio->io_size > SPA_MINBLOCKSIZE)
return (zio_write_gang_block(zio));
zio->io_error = error;
}
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_dva_free(zio_t *zio)
{
metaslab_free(zio->io_spa, zio->io_bp, zio->io_txg, B_FALSE);
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_dva_claim(zio_t *zio)
{
int error;
error = metaslab_claim(zio->io_spa, zio->io_bp, zio->io_txg);
if (error)
zio->io_error = error;
return (ZIO_PIPELINE_CONTINUE);
}
/*
* Undo an allocation. This is used by zio_done() when an I/O fails
* and we want to give back the block we just allocated.
* This handles both normal blocks and gang blocks.
*/
static void
zio_dva_unallocate(zio_t *zio, zio_gang_node_t *gn, blkptr_t *bp)
{
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp));
ASSERT(zio->io_bp_override == NULL);
if (!BP_IS_HOLE(bp))
metaslab_free(zio->io_spa, bp, bp->blk_birth, B_TRUE);
if (gn != NULL) {
for (int g = 0; g < SPA_GBH_NBLKPTRS; g++) {
zio_dva_unallocate(zio, gn->gn_child[g],
&gn->gn_gbh->zg_blkptr[g]);
}
}
}
/*
* Try to allocate an intent log block. Return 0 on success, errno on failure.
*/
int
zio_alloc_zil(spa_t *spa, objset_t *os, uint64_t txg, blkptr_t *new_bp,
uint64_t size, boolean_t *slog)
{
int error = 1;
zio_alloc_list_t io_alloc_list;
ASSERT(txg > spa_syncing_txg(spa));
metaslab_trace_init(&io_alloc_list);
error = metaslab_alloc(spa, spa_log_class(spa), size, new_bp, 1,
txg, NULL, METASLAB_FASTWRITE, &io_alloc_list, NULL);
if (error == 0) {
*slog = TRUE;
} else {
error = metaslab_alloc(spa, spa_normal_class(spa), size,
new_bp, 1, txg, NULL, METASLAB_FASTWRITE,
&io_alloc_list, NULL);
if (error == 0)
*slog = FALSE;
}
metaslab_trace_fini(&io_alloc_list);
if (error == 0) {
BP_SET_LSIZE(new_bp, size);
BP_SET_PSIZE(new_bp, size);
BP_SET_COMPRESS(new_bp, ZIO_COMPRESS_OFF);
BP_SET_CHECKSUM(new_bp,
spa_version(spa) >= SPA_VERSION_SLIM_ZIL
? ZIO_CHECKSUM_ZILOG2 : ZIO_CHECKSUM_ZILOG);
BP_SET_TYPE(new_bp, DMU_OT_INTENT_LOG);
BP_SET_LEVEL(new_bp, 0);
BP_SET_DEDUP(new_bp, 0);
BP_SET_BYTEORDER(new_bp, ZFS_HOST_BYTEORDER);
/*
* encrypted blocks will require an IV and salt. We generate
* these now since we will not be rewriting the bp at
* rewrite time.
*/
if (os->os_encrypted) {
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t salt[ZIO_DATA_SALT_LEN];
BP_SET_CRYPT(new_bp, B_TRUE);
VERIFY0(spa_crypt_get_salt(spa,
dmu_objset_id(os), salt));
VERIFY0(zio_crypt_generate_iv(iv));
zio_crypt_encode_params_bp(new_bp, salt, iv);
}
} else {
zfs_dbgmsg("%s: zil block allocation failure: "
"size %llu, error %d", spa_name(spa), size, error);
}
return (error);
}
/*
* Free an intent log block.
*/
void
zio_free_zil(spa_t *spa, uint64_t txg, blkptr_t *bp)
{
ASSERT(BP_GET_TYPE(bp) == DMU_OT_INTENT_LOG);
ASSERT(!BP_IS_GANG(bp));
zio_free(spa, txg, bp);
}
/*
* ==========================================================================
* Read and write to physical devices
* ==========================================================================
*/
/*
* Issue an I/O to the underlying vdev. Typically the issue pipeline
* stops after this stage and will resume upon I/O completion.
* However, there are instances where the vdev layer may need to
* continue the pipeline when an I/O was not issued. Since the I/O
* that was sent to the vdev layer might be different than the one
* currently active in the pipeline (see vdev_queue_io()), we explicitly
* force the underlying vdev layers to call either zio_execute() or
* zio_interrupt() to ensure that the pipeline continues with the correct I/O.
*/
static int
zio_vdev_io_start(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
uint64_t align;
spa_t *spa = zio->io_spa;
zio->io_delay = 0;
ASSERT(zio->io_error == 0);
ASSERT(zio->io_child_error[ZIO_CHILD_VDEV] == 0);
if (vd == NULL) {
if (!(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_enter(spa, SCL_ZIO, zio, RW_READER);
/*
* The mirror_ops handle multiple DVAs in a single BP.
*/
vdev_mirror_ops.vdev_op_io_start(zio);
return (ZIO_PIPELINE_STOP);
}
ASSERT3P(zio->io_logical, !=, zio);
+ if (zio->io_type == ZIO_TYPE_WRITE && zio->io_vd->vdev_removing) {
+ /*
+ * Note: the code can handle other kinds of writes,
+ * but we don't expect them.
+ */
+ ASSERT(zio->io_flags &
+ (ZIO_FLAG_PHYSICAL | ZIO_FLAG_SELF_HEAL |
+ ZIO_FLAG_INDUCE_DAMAGE));
+ }
align = 1ULL << vd->vdev_top->vdev_ashift;
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL) &&
P2PHASE(zio->io_size, align) != 0) {
/* Transform logical writes to be a full physical block size. */
uint64_t asize = P2ROUNDUP(zio->io_size, align);
abd_t *abuf = abd_alloc_sametype(zio->io_abd, asize);
ASSERT(vd == vd->vdev_top);
if (zio->io_type == ZIO_TYPE_WRITE) {
abd_copy(abuf, zio->io_abd, zio->io_size);
abd_zero_off(abuf, zio->io_size, asize - zio->io_size);
}
zio_push_transform(zio, abuf, asize, asize, zio_subblock);
}
/*
* If this is not a physical io, make sure that it is properly aligned
* before proceeding.
*/
if (!(zio->io_flags & ZIO_FLAG_PHYSICAL)) {
ASSERT0(P2PHASE(zio->io_offset, align));
ASSERT0(P2PHASE(zio->io_size, align));
} else {
/*
* For physical writes, we allow 512b aligned writes and assume
* the device will perform a read-modify-write as necessary.
*/
ASSERT0(P2PHASE(zio->io_offset, SPA_MINBLOCKSIZE));
ASSERT0(P2PHASE(zio->io_size, SPA_MINBLOCKSIZE));
}
VERIFY(zio->io_type != ZIO_TYPE_WRITE || spa_writeable(spa));
/*
* If this is a repair I/O, and there's no self-healing involved --
* that is, we're just resilvering what we expect to resilver --
* then don't do the I/O unless zio's txg is actually in vd's DTL.
* This prevents spurious resilvering with nested replication.
* For example, given a mirror of mirrors, (A+B)+(C+D), if only
* A is out of date, we'll read from C+D, then use the data to
* resilver A+B -- but we don't actually want to resilver B, just A.
* The top-level mirror has no way to know this, so instead we just
* discard unnecessary repairs as we work our way down the vdev tree.
* The same logic applies to any form of nested replication:
* ditto + mirror, RAID-Z + replacing, etc. This covers them all.
*/
if ((zio->io_flags & ZIO_FLAG_IO_REPAIR) &&
!(zio->io_flags & ZIO_FLAG_SELF_HEAL) &&
zio->io_txg != 0 && /* not a delegated i/o */
!vdev_dtl_contains(vd, DTL_PARTIAL, zio->io_txg, 1)) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
zio_vdev_io_bypass(zio);
return (ZIO_PIPELINE_CONTINUE);
}
if (vd->vdev_ops->vdev_op_leaf &&
(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE)) {
if (zio->io_type == ZIO_TYPE_READ && vdev_cache_read(zio))
return (ZIO_PIPELINE_CONTINUE);
if ((zio = vdev_queue_io(zio)) == NULL)
return (ZIO_PIPELINE_STOP);
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
zio_interrupt(zio);
return (ZIO_PIPELINE_STOP);
}
zio->io_delay = gethrtime();
}
vd->vdev_ops->vdev_op_io_start(zio);
return (ZIO_PIPELINE_STOP);
}
static int
zio_vdev_io_done(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
vdev_ops_t *ops = vd ? vd->vdev_ops : &vdev_mirror_ops;
boolean_t unexpected_error = B_FALSE;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
}
ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE);
if (zio->io_delay)
zio->io_delay = gethrtime() - zio->io_delay;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf) {
vdev_queue_io_done(zio);
if (zio->io_type == ZIO_TYPE_WRITE)
vdev_cache_write(zio);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_device_injections(vd, zio,
EIO, EILSEQ);
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_label_injection(zio, EIO);
if (zio->io_error) {
if (!vdev_accessible(vd, zio)) {
zio->io_error = SET_ERROR(ENXIO);
} else {
unexpected_error = B_TRUE;
}
}
}
ops->vdev_op_io_done(zio);
if (unexpected_error)
VERIFY(vdev_probe(vd, zio) == NULL);
return (ZIO_PIPELINE_CONTINUE);
}
/*
* This function is used to change the priority of an existing zio that is
* currently in-flight. This is used by the arc to upgrade priority in the
* event that a demand read is made for a block that is currently queued
* as a scrub or async read IO. Otherwise, the high priority read request
* would end up having to wait for the lower priority IO.
*/
void
zio_change_priority(zio_t *pio, zio_priority_t priority)
{
zio_t *cio, *cio_next;
zio_link_t *zl = NULL;
ASSERT3U(priority, <, ZIO_PRIORITY_NUM_QUEUEABLE);
if (pio->io_vd != NULL && pio->io_vd->vdev_ops->vdev_op_leaf) {
vdev_queue_change_io_priority(pio, priority);
} else {
pio->io_priority = priority;
}
mutex_enter(&pio->io_lock);
for (cio = zio_walk_children(pio, &zl); cio != NULL; cio = cio_next) {
cio_next = zio_walk_children(pio, &zl);
zio_change_priority(cio, priority);
}
mutex_exit(&pio->io_lock);
}
/*
* For non-raidz ZIOs, we can just copy aside the bad data read from the
* disk, and use that to finish the checksum ereport later.
*/
static void
zio_vsd_default_cksum_finish(zio_cksum_report_t *zcr,
const abd_t *good_buf)
{
/* no processing needed */
zfs_ereport_finish_checksum(zcr, good_buf, zcr->zcr_cbdata, B_FALSE);
}
/*ARGSUSED*/
void
zio_vsd_default_cksum_report(zio_t *zio, zio_cksum_report_t *zcr, void *ignored)
{
void *abd = abd_alloc_sametype(zio->io_abd, zio->io_size);
abd_copy(abd, zio->io_abd, zio->io_size);
zcr->zcr_cbinfo = zio->io_size;
zcr->zcr_cbdata = abd;
zcr->zcr_finish = zio_vsd_default_cksum_finish;
zcr->zcr_free = zio_abd_free;
}
static int
zio_vdev_io_assess(zio_t *zio)
{
vdev_t *vd = zio->io_vd;
if (zio_wait_for_children(zio, ZIO_CHILD_VDEV_BIT, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
}
if (vd == NULL && !(zio->io_flags & ZIO_FLAG_CONFIG_WRITER))
spa_config_exit(zio->io_spa, SCL_ZIO, zio);
if (zio->io_vsd != NULL) {
zio->io_vsd_ops->vsd_free(zio);
zio->io_vsd = NULL;
}
if (zio_injection_enabled && zio->io_error == 0)
zio->io_error = zio_handle_fault_injection(zio, EIO);
/*
* If the I/O failed, determine whether we should attempt to retry it.
*
* On retry, we cut in line in the issue queue, since we don't want
* compression/checksumming/etc. work to prevent our (cheap) IO reissue.
*/
if (zio->io_error && vd == NULL &&
!(zio->io_flags & (ZIO_FLAG_DONT_RETRY | ZIO_FLAG_IO_RETRY))) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DONT_QUEUE)); /* not a leaf */
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_BYPASS)); /* not a leaf */
zio->io_error = 0;
zio->io_flags |= ZIO_FLAG_IO_RETRY |
ZIO_FLAG_DONT_CACHE | ZIO_FLAG_DONT_AGGREGATE;
zio->io_stage = ZIO_STAGE_VDEV_IO_START >> 1;
zio_taskq_dispatch(zio, ZIO_TASKQ_ISSUE,
zio_requeue_io_start_cut_in_line);
return (ZIO_PIPELINE_STOP);
}
/*
* If we got an error on a leaf device, convert it to ENXIO
* if the device is not accessible at all.
*/
if (zio->io_error && vd != NULL && vd->vdev_ops->vdev_op_leaf &&
!vdev_accessible(vd, zio))
zio->io_error = SET_ERROR(ENXIO);
/*
* If we can't write to an interior vdev (mirror or RAID-Z),
* set vdev_cant_write so that we stop trying to allocate from it.
*/
if (zio->io_error == ENXIO && zio->io_type == ZIO_TYPE_WRITE &&
vd != NULL && !vd->vdev_ops->vdev_op_leaf) {
vd->vdev_cant_write = B_TRUE;
}
/*
* If a cache flush returns ENOTSUP or ENOTTY, we know that no future
* attempts will ever succeed. In this case we set a persistent bit so
* that we don't bother with it in the future.
*/
if ((zio->io_error == ENOTSUP || zio->io_error == ENOTTY) &&
zio->io_type == ZIO_TYPE_IOCTL &&
zio->io_cmd == DKIOCFLUSHWRITECACHE && vd != NULL)
vd->vdev_nowritecache = B_TRUE;
if (zio->io_error)
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (vd != NULL && vd->vdev_ops->vdev_op_leaf &&
zio->io_physdone != NULL) {
ASSERT(!(zio->io_flags & ZIO_FLAG_DELEGATED));
ASSERT(zio->io_child_type == ZIO_CHILD_VDEV);
zio->io_physdone(zio->io_logical);
}
return (ZIO_PIPELINE_CONTINUE);
}
void
zio_vdev_io_reissue(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_stage >>= 1;
}
void
zio_vdev_io_redone(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_DONE);
zio->io_stage >>= 1;
}
void
zio_vdev_io_bypass(zio_t *zio)
{
ASSERT(zio->io_stage == ZIO_STAGE_VDEV_IO_START);
ASSERT(zio->io_error == 0);
zio->io_flags |= ZIO_FLAG_IO_BYPASS;
zio->io_stage = ZIO_STAGE_VDEV_IO_ASSESS >> 1;
}
/*
* ==========================================================================
* Encrypt and store encryption parameters
* ==========================================================================
*/
/*
* This function is used for ZIO_STAGE_ENCRYPT. It is responsible for
* managing the storage of encryption parameters and passing them to the
* lower-level encryption functions.
*/
static int
zio_encrypt(zio_t *zio)
{
zio_prop_t *zp = &zio->io_prop;
spa_t *spa = zio->io_spa;
blkptr_t *bp = zio->io_bp;
uint64_t psize = BP_GET_PSIZE(bp);
uint64_t dsobj = zio->io_bookmark.zb_objset;
dmu_object_type_t ot = BP_GET_TYPE(bp);
void *enc_buf = NULL;
abd_t *eabd = NULL;
uint8_t salt[ZIO_DATA_SALT_LEN];
uint8_t iv[ZIO_DATA_IV_LEN];
uint8_t mac[ZIO_DATA_MAC_LEN];
boolean_t no_crypt = B_FALSE;
/* the root zio already encrypted the data */
if (zio->io_child_type == ZIO_CHILD_GANG)
return (ZIO_PIPELINE_CONTINUE);
/* only ZIL blocks are re-encrypted on rewrite */
if (!IO_IS_ALLOCATING(zio) && ot != DMU_OT_INTENT_LOG)
return (ZIO_PIPELINE_CONTINUE);
if (!(zp->zp_encrypt || BP_IS_ENCRYPTED(bp))) {
BP_SET_CRYPT(bp, B_FALSE);
return (ZIO_PIPELINE_CONTINUE);
}
/* if we are doing raw encryption set the provided encryption params */
if (zio->io_flags & ZIO_FLAG_RAW_ENCRYPT) {
ASSERT0(BP_GET_LEVEL(bp));
BP_SET_CRYPT(bp, B_TRUE);
BP_SET_BYTEORDER(bp, zp->zp_byteorder);
if (ot != DMU_OT_OBJSET)
zio_crypt_encode_mac_bp(bp, zp->zp_mac);
/* dnode blocks must be written out in the provided byteorder */
if (zp->zp_byteorder != ZFS_HOST_BYTEORDER &&
ot == DMU_OT_DNODE) {
void *bswap_buf = zio_buf_alloc(psize);
abd_t *babd = abd_get_from_buf(bswap_buf, psize);
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
abd_copy_to_buf(bswap_buf, zio->io_abd, psize);
dmu_ot_byteswap[DMU_OT_BYTESWAP(ot)].ob_func(bswap_buf,
psize);
abd_take_ownership_of_buf(babd, B_TRUE);
zio_push_transform(zio, babd, psize, psize, NULL);
}
if (DMU_OT_IS_ENCRYPTED(ot))
zio_crypt_encode_params_bp(bp, zp->zp_salt, zp->zp_iv);
return (ZIO_PIPELINE_CONTINUE);
}
/* indirect blocks only maintain a cksum of the lower level MACs */
if (BP_GET_LEVEL(bp) > 0) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(zio_crypt_do_indirect_mac_checksum_abd(B_TRUE,
zio->io_orig_abd, BP_GET_LSIZE(bp), BP_SHOULD_BYTESWAP(bp),
mac));
zio_crypt_encode_mac_bp(bp, mac);
return (ZIO_PIPELINE_CONTINUE);
}
/*
* Objset blocks are a special case since they have 2 256-bit MACs
* embedded within them.
*/
if (ot == DMU_OT_OBJSET) {
ASSERT0(DMU_OT_IS_ENCRYPTED(ot));
ASSERT3U(BP_GET_COMPRESS(bp), ==, ZIO_COMPRESS_OFF);
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_objset_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, BP_SHOULD_BYTESWAP(bp)));
return (ZIO_PIPELINE_CONTINUE);
}
/* unencrypted object types are only authenticated with a MAC */
if (!DMU_OT_IS_ENCRYPTED(ot)) {
BP_SET_CRYPT(bp, B_TRUE);
VERIFY0(spa_do_crypt_mac_abd(B_TRUE, spa, dsobj,
zio->io_abd, psize, mac));
zio_crypt_encode_mac_bp(bp, mac);
return (ZIO_PIPELINE_CONTINUE);
}
/*
* Later passes of sync-to-convergence may decide to rewrite data
* in place to avoid more disk reallocations. This presents a problem
* for encryption because this consitutes rewriting the new data with
* the same encryption key and IV. However, this only applies to blocks
* in the MOS (particularly the spacemaps) and we do not encrypt the
* MOS. We assert that the zio is allocating or an intent log write
* to enforce this.
*/
ASSERT(IO_IS_ALLOCATING(zio) || ot == DMU_OT_INTENT_LOG);
ASSERT(BP_GET_LEVEL(bp) == 0 || ot == DMU_OT_INTENT_LOG);
ASSERT(spa_feature_is_active(spa, SPA_FEATURE_ENCRYPTION));
ASSERT3U(psize, !=, 0);
enc_buf = zio_buf_alloc(psize);
eabd = abd_get_from_buf(enc_buf, psize);
abd_take_ownership_of_buf(eabd, B_TRUE);
/*
* For an explanation of what encryption parameters are stored
* where, see the block comment in zio_crypt.c.
*/
if (ot == DMU_OT_INTENT_LOG) {
zio_crypt_decode_params_bp(bp, salt, iv);
} else {
BP_SET_CRYPT(bp, B_TRUE);
}
/* Perform the encryption. This should not fail */
VERIFY0(spa_do_crypt_abd(B_TRUE, spa, dsobj, bp, zio->io_txg,
psize, zio->io_abd, eabd, iv, mac, salt, &no_crypt));
/* encode encryption metadata into the bp */
if (ot == DMU_OT_INTENT_LOG) {
/*
* ZIL blocks store the MAC in the embedded checksum, so the
* transform must always be applied.
*/
zio_crypt_encode_mac_zil(enc_buf, mac);
zio_push_transform(zio, eabd, psize, psize, NULL);
} else {
BP_SET_CRYPT(bp, B_TRUE);
zio_crypt_encode_params_bp(bp, salt, iv);
zio_crypt_encode_mac_bp(bp, mac);
if (no_crypt) {
ASSERT3U(ot, ==, DMU_OT_DNODE);
abd_free(eabd);
} else {
zio_push_transform(zio, eabd, psize, psize, NULL);
}
}
return (ZIO_PIPELINE_CONTINUE);
}
/*
* ==========================================================================
* Generate and verify checksums
* ==========================================================================
*/
static int
zio_checksum_generate(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
enum zio_checksum checksum;
if (bp == NULL) {
/*
* This is zio_write_phys().
* We're either generating a label checksum, or none at all.
*/
checksum = zio->io_prop.zp_checksum;
if (checksum == ZIO_CHECKSUM_OFF)
return (ZIO_PIPELINE_CONTINUE);
ASSERT(checksum == ZIO_CHECKSUM_LABEL);
} else {
if (BP_IS_GANG(bp) && zio->io_child_type == ZIO_CHILD_GANG) {
ASSERT(!IO_IS_ALLOCATING(zio));
checksum = ZIO_CHECKSUM_GANG_HEADER;
} else {
checksum = BP_GET_CHECKSUM(bp);
}
}
zio_checksum_compute(zio, checksum, zio->io_abd, zio->io_size);
return (ZIO_PIPELINE_CONTINUE);
}
static int
zio_checksum_verify(zio_t *zio)
{
zio_bad_cksum_t info;
blkptr_t *bp = zio->io_bp;
int error;
ASSERT(zio->io_vd != NULL);
if (bp == NULL) {
/*
* This is zio_read_phys().
* We're either verifying a label checksum, or nothing at all.
*/
if (zio->io_prop.zp_checksum == ZIO_CHECKSUM_OFF)
return (ZIO_PIPELINE_CONTINUE);
ASSERT(zio->io_prop.zp_checksum == ZIO_CHECKSUM_LABEL);
}
if ((error = zio_checksum_error(zio, &info)) != 0) {
zio->io_error = error;
if (error == ECKSUM &&
!(zio->io_flags & ZIO_FLAG_SPECULATIVE)) {
zfs_ereport_start_checksum(zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio,
zio->io_offset, zio->io_size, NULL, &info);
}
}
return (ZIO_PIPELINE_CONTINUE);
}
/*
* Called by RAID-Z to ensure we don't compute the checksum twice.
*/
void
zio_checksum_verified(zio_t *zio)
{
zio->io_pipeline &= ~ZIO_STAGE_CHECKSUM_VERIFY;
}
/*
* ==========================================================================
* Error rank. Error are ranked in the order 0, ENXIO, ECKSUM, EIO, other.
* An error of 0 indicates success. ENXIO indicates whole-device failure,
* which may be transient (e.g. unplugged) or permament. ECKSUM and EIO
* indicate errors that are specific to one I/O, and most likely permanent.
* Any other error is presumed to be worse because we weren't expecting it.
* ==========================================================================
*/
int
zio_worst_error(int e1, int e2)
{
static int zio_error_rank[] = { 0, ENXIO, ECKSUM, EIO };
int r1, r2;
for (r1 = 0; r1 < sizeof (zio_error_rank) / sizeof (int); r1++)
if (e1 == zio_error_rank[r1])
break;
for (r2 = 0; r2 < sizeof (zio_error_rank) / sizeof (int); r2++)
if (e2 == zio_error_rank[r2])
break;
return (r1 > r2 ? e1 : e2);
}
/*
* ==========================================================================
* I/O completion
* ==========================================================================
*/
static int
zio_ready(zio_t *zio)
{
blkptr_t *bp = zio->io_bp;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
if (zio_wait_for_children(zio, ZIO_CHILD_GANG_BIT | ZIO_CHILD_DDT_BIT,
ZIO_WAIT_READY)) {
return (ZIO_PIPELINE_STOP);
}
if (zio->io_ready) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(bp->blk_birth == zio->io_txg || BP_IS_HOLE(bp) ||
(zio->io_flags & ZIO_FLAG_NOPWRITE));
ASSERT(zio->io_children[ZIO_CHILD_GANG][ZIO_WAIT_READY] == 0);
zio->io_ready(zio);
}
if (bp != NULL && bp != &zio->io_bp_copy)
zio->io_bp_copy = *bp;
if (zio->io_error != 0) {
zio->io_pipeline = ZIO_INTERLOCK_PIPELINE;
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(IO_IS_ALLOCATING(zio));
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
/*
* We were unable to allocate anything, unreserve and
* issue the next I/O to allocate.
*/
metaslab_class_throttle_unreserve(
spa_normal_class(zio->io_spa),
zio->io_prop.zp_copies, zio);
zio_allocate_dispatch(zio->io_spa);
}
}
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_READY] = 1;
pio = zio_walk_parents(zio, &zl);
mutex_exit(&zio->io_lock);
/*
* As we notify zio's parents, new parents could be added.
* New parents go to the head of zio's io_parent_list, however,
* so we will (correctly) not notify them. The remainder of zio's
* io_parent_list, from 'pio_next' onward, cannot change because
* all parents must wait for us to be done before they can be done.
*/
for (; pio != NULL; pio = pio_next) {
pio_next = zio_walk_parents(zio, &zl);
zio_notify_parent(pio, zio, ZIO_WAIT_READY);
}
if (zio->io_flags & ZIO_FLAG_NODATA) {
if (BP_IS_GANG(bp)) {
zio->io_flags &= ~ZIO_FLAG_NODATA;
} else {
ASSERT((uintptr_t)zio->io_abd < SPA_MAXBLOCKSIZE);
zio->io_pipeline &= ~ZIO_VDEV_IO_STAGES;
}
}
if (zio_injection_enabled &&
zio->io_spa->spa_syncing_txg == zio->io_txg)
zio_handle_ignored_writes(zio);
return (ZIO_PIPELINE_CONTINUE);
}
/*
* Update the allocation throttle accounting.
*/
static void
zio_dva_throttle_done(zio_t *zio)
{
ASSERTV(zio_t *lio = zio->io_logical);
zio_t *pio = zio_unique_parent(zio);
vdev_t *vd = zio->io_vd;
int flags = METASLAB_ASYNC_ALLOC;
ASSERT3P(zio->io_bp, !=, NULL);
ASSERT3U(zio->io_type, ==, ZIO_TYPE_WRITE);
ASSERT3U(zio->io_priority, ==, ZIO_PRIORITY_ASYNC_WRITE);
ASSERT3U(zio->io_child_type, ==, ZIO_CHILD_VDEV);
ASSERT(vd != NULL);
ASSERT3P(vd, ==, vd->vdev_top);
ASSERT(zio_injection_enabled || !(zio->io_flags & ZIO_FLAG_IO_RETRY));
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT(zio->io_flags & ZIO_FLAG_IO_ALLOCATING);
ASSERT(!(lio->io_flags & ZIO_FLAG_IO_REWRITE));
ASSERT(!(lio->io_orig_flags & ZIO_FLAG_NODATA));
/*
* Parents of gang children can have two flavors -- ones that
* allocated the gang header (will have ZIO_FLAG_IO_REWRITE set)
* and ones that allocated the constituent blocks. The allocation
* throttle needs to know the allocating parent zio so we must find
* it here.
*/
if (pio->io_child_type == ZIO_CHILD_GANG) {
/*
* If our parent is a rewrite gang child then our grandparent
* would have been the one that performed the allocation.
*/
if (pio->io_flags & ZIO_FLAG_IO_REWRITE)
pio = zio_unique_parent(pio);
flags |= METASLAB_GANG_CHILD;
}
ASSERT(IO_IS_ALLOCATING(pio));
ASSERT3P(zio, !=, zio->io_logical);
ASSERT(zio->io_logical != NULL);
ASSERT(!(zio->io_flags & ZIO_FLAG_IO_REPAIR));
ASSERT0(zio->io_flags & ZIO_FLAG_NOPWRITE);
mutex_enter(&pio->io_lock);
metaslab_group_alloc_decrement(zio->io_spa, vd->vdev_id, pio, flags);
mutex_exit(&pio->io_lock);
metaslab_class_throttle_unreserve(spa_normal_class(zio->io_spa),
1, pio);
/*
* Call into the pipeline to see if there is more work that
* needs to be done. If there is work to be done it will be
* dispatched to another taskq thread.
*/
zio_allocate_dispatch(zio->io_spa);
}
static int
zio_done(zio_t *zio)
{
/*
* Always attempt to keep stack usage minimal here since
* we can be called recurisvely up to 19 levels deep.
*/
const uint64_t psize = zio->io_size;
zio_t *pio, *pio_next;
zio_link_t *zl = NULL;
/*
* If our children haven't all completed,
* wait for them and then repeat this pipeline stage.
*/
if (zio_wait_for_children(zio, ZIO_CHILD_ALL_BITS, ZIO_WAIT_DONE)) {
return (ZIO_PIPELINE_STOP);
}
/*
* If the allocation throttle is enabled, then update the accounting.
* We only track child I/Os that are part of an allocating async
* write. We must do this since the allocation is performed
* by the logical I/O but the actual write is done by child I/Os.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING &&
zio->io_child_type == ZIO_CHILD_VDEV) {
ASSERT(spa_normal_class(
zio->io_spa)->mc_alloc_throttle_enabled);
zio_dva_throttle_done(zio);
}
/*
* If the allocation throttle is enabled, verify that
* we have decremented the refcounts for every I/O that was throttled.
*/
if (zio->io_flags & ZIO_FLAG_IO_ALLOCATING) {
ASSERT(zio->io_type == ZIO_TYPE_WRITE);
ASSERT(zio->io_priority == ZIO_PRIORITY_ASYNC_WRITE);
ASSERT(zio->io_bp != NULL);
metaslab_group_alloc_verify(zio->io_spa, zio->io_bp, zio);
VERIFY(refcount_not_held(
&(spa_normal_class(zio->io_spa)->mc_alloc_slots), zio));
}
for (int c = 0; c < ZIO_CHILD_TYPES; c++)
for (int w = 0; w < ZIO_WAIT_TYPES; w++)
ASSERT(zio->io_children[c][w] == 0);
if (zio->io_bp != NULL && !BP_IS_EMBEDDED(zio->io_bp)) {
ASSERT(zio->io_bp->blk_pad[0] == 0);
ASSERT(zio->io_bp->blk_pad[1] == 0);
ASSERT(bcmp(zio->io_bp, &zio->io_bp_copy,
sizeof (blkptr_t)) == 0 ||
(zio->io_bp == zio_unique_parent(zio)->io_bp));
if (zio->io_type == ZIO_TYPE_WRITE && !BP_IS_HOLE(zio->io_bp) &&
zio->io_bp_override == NULL &&
!(zio->io_flags & ZIO_FLAG_IO_REPAIR)) {
ASSERT3U(zio->io_prop.zp_copies, <=,
BP_GET_NDVAS(zio->io_bp));
ASSERT(BP_COUNT_GANG(zio->io_bp) == 0 ||
(BP_COUNT_GANG(zio->io_bp) ==
BP_GET_NDVAS(zio->io_bp)));
}
if (zio->io_flags & ZIO_FLAG_NOPWRITE)
VERIFY(BP_EQUAL(zio->io_bp, &zio->io_bp_orig));
}
/*
* If there were child vdev/gang/ddt errors, they apply to us now.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_VDEV);
zio_inherit_child_errors(zio, ZIO_CHILD_GANG);
zio_inherit_child_errors(zio, ZIO_CHILD_DDT);
/*
* If the I/O on the transformed data was successful, generate any
* checksum reports now while we still have the transformed data.
*/
if (zio->io_error == 0) {
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
uint64_t align = zcr->zcr_align;
uint64_t asize = P2ROUNDUP(psize, align);
abd_t *adata = zio->io_abd;
if (asize != psize) {
adata = abd_alloc(asize, B_TRUE);
abd_copy(adata, zio->io_abd, psize);
abd_zero_off(adata, psize, asize - psize);
}
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, adata);
zfs_ereport_free_checksum(zcr);
if (asize != psize)
abd_free(adata);
}
}
zio_pop_transforms(zio); /* note: may set zio->io_error */
vdev_stat_update(zio, psize);
/*
* If this I/O is attached to a particular vdev is slow, exceeding
* 30 seconds to complete, post an error described the I/O delay.
* We ignore these errors if the device is currently unavailable.
*/
if (zio->io_delay >= MSEC2NSEC(zio_delay_max)) {
if (zio->io_vd != NULL && !vdev_is_dead(zio->io_vd))
zfs_ereport_post(FM_EREPORT_ZFS_DELAY, zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio, 0, 0);
}
if (zio->io_error) {
/*
* If this I/O is attached to a particular vdev,
* generate an error message describing the I/O failure
* at the block level. We ignore these errors if the
* device is currently unavailable.
*/
if (zio->io_error != ECKSUM && zio->io_vd != NULL &&
!vdev_is_dead(zio->io_vd))
zfs_ereport_post(FM_EREPORT_ZFS_IO, zio->io_spa,
zio->io_vd, &zio->io_bookmark, zio, 0, 0);
if ((zio->io_error == EIO || !(zio->io_flags &
(ZIO_FLAG_SPECULATIVE | ZIO_FLAG_DONT_PROPAGATE))) &&
zio == zio->io_logical) {
/*
* For logical I/O requests, tell the SPA to log the
* error and generate a logical data ereport.
*/
spa_log_error(zio->io_spa, &zio->io_bookmark);
zfs_ereport_post(FM_EREPORT_ZFS_DATA, zio->io_spa,
NULL, &zio->io_bookmark, zio, 0, 0);
}
}
if (zio->io_error && zio == zio->io_logical) {
/*
* Determine whether zio should be reexecuted. This will
* propagate all the way to the root via zio_notify_parent().
*/
ASSERT(zio->io_vd == NULL && zio->io_bp != NULL);
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
if (IO_IS_ALLOCATING(zio) &&
!(zio->io_flags & ZIO_FLAG_CANFAIL)) {
if (zio->io_error != ENOSPC)
zio->io_reexecute |= ZIO_REEXECUTE_NOW;
else
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
}
if ((zio->io_type == ZIO_TYPE_READ ||
zio->io_type == ZIO_TYPE_FREE) &&
!(zio->io_flags & ZIO_FLAG_SCAN_THREAD) &&
zio->io_error == ENXIO &&
spa_load_state(zio->io_spa) == SPA_LOAD_NONE &&
spa_get_failmode(zio->io_spa) != ZIO_FAILURE_MODE_CONTINUE)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
if (!(zio->io_flags & ZIO_FLAG_CANFAIL) && !zio->io_reexecute)
zio->io_reexecute |= ZIO_REEXECUTE_SUSPEND;
/*
* Here is a possibly good place to attempt to do
* either combinatorial reconstruction or error correction
* based on checksums. It also might be a good place
* to send out preliminary ereports before we suspend
* processing.
*/
}
/*
* If there were logical child errors, they apply to us now.
* We defer this until now to avoid conflating logical child
* errors with errors that happened to the zio itself when
* updating vdev stats and reporting FMA events above.
*/
zio_inherit_child_errors(zio, ZIO_CHILD_LOGICAL);
if ((zio->io_error || zio->io_reexecute) &&
IO_IS_ALLOCATING(zio) && zio->io_gang_leader == zio &&
!(zio->io_flags & (ZIO_FLAG_IO_REWRITE | ZIO_FLAG_NOPWRITE)))
zio_dva_unallocate(zio, zio->io_gang_tree, zio->io_bp);
zio_gang_tree_free(&zio->io_gang_tree);
/*
* Godfather I/Os should never suspend.
*/
if ((zio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND))
zio->io_reexecute &= ~ZIO_REEXECUTE_SUSPEND;
if (zio->io_reexecute) {
/*
* This is a logical I/O that wants to reexecute.
*
* Reexecute is top-down. When an i/o fails, if it's not
* the root, it simply notifies its parent and sticks around.
* The parent, seeing that it still has children in zio_done(),
* does the same. This percolates all the way up to the root.
* The root i/o will reexecute or suspend the entire tree.
*
* This approach ensures that zio_reexecute() honors
* all the original i/o dependency relationships, e.g.
* parents not executing until children are ready.
*/
ASSERT(zio->io_child_type == ZIO_CHILD_LOGICAL);
zio->io_gang_leader = NULL;
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
/*
* "The Godfather" I/O monitors its children but is
* not a true parent to them. It will track them through
* the pipeline but severs its ties whenever they get into
* trouble (e.g. suspended). This allows "The Godfather"
* I/O to return status without blocking.
*/
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL;
pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
if ((pio->io_flags & ZIO_FLAG_GODFATHER) &&
(zio->io_reexecute & ZIO_REEXECUTE_SUSPEND)) {
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
}
}
if ((pio = zio_unique_parent(zio)) != NULL) {
/*
* We're not a root i/o, so there's nothing to do
* but notify our parent. Don't propagate errors
* upward since we haven't permanently failed yet.
*/
ASSERT(!(zio->io_flags & ZIO_FLAG_GODFATHER));
zio->io_flags |= ZIO_FLAG_DONT_PROPAGATE;
zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
} else if (zio->io_reexecute & ZIO_REEXECUTE_SUSPEND) {
/*
* We'd fail again if we reexecuted now, so suspend
* until conditions improve (e.g. device comes online).
*/
zio_suspend(zio->io_spa, zio, ZIO_SUSPEND_IOERR);
} else {
/*
* Reexecution is potentially a huge amount of work.
* Hand it off to the otherwise-unused claim taskq.
*/
ASSERT(taskq_empty_ent(&zio->io_tqent));
spa_taskq_dispatch_ent(zio->io_spa,
ZIO_TYPE_CLAIM, ZIO_TASKQ_ISSUE,
(task_func_t *)zio_reexecute, zio, 0,
&zio->io_tqent);
}
return (ZIO_PIPELINE_STOP);
}
ASSERT(zio->io_child_count == 0);
ASSERT(zio->io_reexecute == 0);
ASSERT(zio->io_error == 0 || (zio->io_flags & ZIO_FLAG_CANFAIL));
/*
* Report any checksum errors, since the I/O is complete.
*/
while (zio->io_cksum_report != NULL) {
zio_cksum_report_t *zcr = zio->io_cksum_report;
zio->io_cksum_report = zcr->zcr_next;
zcr->zcr_next = NULL;
zcr->zcr_finish(zcr, NULL);
zfs_ereport_free_checksum(zcr);
}
if (zio->io_flags & ZIO_FLAG_FASTWRITE && zio->io_bp &&
!BP_IS_HOLE(zio->io_bp) && !BP_IS_EMBEDDED(zio->io_bp) &&
!(zio->io_flags & ZIO_FLAG_NOPWRITE)) {
metaslab_fastwrite_unmark(zio->io_spa, zio->io_bp);
}
/*
* It is the responsibility of the done callback to ensure that this
* particular zio is no longer discoverable for adoption, and as
* such, cannot acquire any new parents.
*/
if (zio->io_done)
zio->io_done(zio);
mutex_enter(&zio->io_lock);
zio->io_state[ZIO_WAIT_DONE] = 1;
mutex_exit(&zio->io_lock);
zl = NULL;
for (pio = zio_walk_parents(zio, &zl); pio != NULL; pio = pio_next) {
zio_link_t *remove_zl = zl;
pio_next = zio_walk_parents(zio, &zl);
zio_remove_child(pio, zio, remove_zl);
zio_notify_parent(pio, zio, ZIO_WAIT_DONE);
}
if (zio->io_waiter != NULL) {
mutex_enter(&zio->io_lock);
zio->io_executor = NULL;
cv_broadcast(&zio->io_cv);
mutex_exit(&zio->io_lock);
} else {
zio_destroy(zio);
}
return (ZIO_PIPELINE_STOP);
}
/*
* ==========================================================================
* I/O pipeline definition
* ==========================================================================
*/
static zio_pipe_stage_t *zio_pipeline[] = {
NULL,
zio_read_bp_init,
zio_write_bp_init,
zio_free_bp_init,
zio_issue_async,
zio_write_compress,
zio_encrypt,
zio_checksum_generate,
zio_nop_write,
zio_ddt_read_start,
zio_ddt_read_done,
zio_ddt_write,
zio_ddt_free,
zio_gang_assemble,
zio_gang_issue,
zio_dva_throttle,
zio_dva_allocate,
zio_dva_free,
zio_dva_claim,
zio_ready,
zio_vdev_io_start,
zio_vdev_io_done,
zio_vdev_io_assess,
zio_checksum_verify,
zio_done
};
/*
* Compare two zbookmark_phys_t's to see which we would reach first in a
* pre-order traversal of the object tree.
*
* This is simple in every case aside from the meta-dnode object. For all other
* objects, we traverse them in order (object 1 before object 2, and so on).
* However, all of these objects are traversed while traversing object 0, since
* the data it points to is the list of objects. Thus, we need to convert to a
* canonical representation so we can compare meta-dnode bookmarks to
* non-meta-dnode bookmarks.
*
* We do this by calculating "equivalents" for each field of the zbookmark.
* zbookmarks outside of the meta-dnode use their own object and level, and
* calculate the level 0 equivalent (the first L0 blkid that is contained in the
* blocks this bookmark refers to) by multiplying their blkid by their span
* (the number of L0 blocks contained within one block at their level).
* zbookmarks inside the meta-dnode calculate their object equivalent
* (which is L0equiv * dnodes per data block), use 0 for their L0equiv, and use
* level + 1<<31 (any value larger than a level could ever be) for their level.
* This causes them to always compare before a bookmark in their object
* equivalent, compare appropriately to bookmarks in other objects, and to
* compare appropriately to other bookmarks in the meta-dnode.
*/
int
zbookmark_compare(uint16_t dbss1, uint8_t ibs1, uint16_t dbss2, uint8_t ibs2,
const zbookmark_phys_t *zb1, const zbookmark_phys_t *zb2)
{
/*
* These variables represent the "equivalent" values for the zbookmark,
* after converting zbookmarks inside the meta dnode to their
* normal-object equivalents.
*/
uint64_t zb1obj, zb2obj;
uint64_t zb1L0, zb2L0;
uint64_t zb1level, zb2level;
if (zb1->zb_object == zb2->zb_object &&
zb1->zb_level == zb2->zb_level &&
zb1->zb_blkid == zb2->zb_blkid)
return (0);
/*
* BP_SPANB calculates the span in blocks.
*/
zb1L0 = (zb1->zb_blkid) * BP_SPANB(ibs1, zb1->zb_level);
zb2L0 = (zb2->zb_blkid) * BP_SPANB(ibs2, zb2->zb_level);
if (zb1->zb_object == DMU_META_DNODE_OBJECT) {
zb1obj = zb1L0 * (dbss1 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb1L0 = 0;
zb1level = zb1->zb_level + COMPARE_META_LEVEL;
} else {
zb1obj = zb1->zb_object;
zb1level = zb1->zb_level;
}
if (zb2->zb_object == DMU_META_DNODE_OBJECT) {
zb2obj = zb2L0 * (dbss2 << (SPA_MINBLOCKSHIFT - DNODE_SHIFT));
zb2L0 = 0;
zb2level = zb2->zb_level + COMPARE_META_LEVEL;
} else {
zb2obj = zb2->zb_object;
zb2level = zb2->zb_level;
}
/* Now that we have a canonical representation, do the comparison. */
if (zb1obj != zb2obj)
return (zb1obj < zb2obj ? -1 : 1);
else if (zb1L0 != zb2L0)
return (zb1L0 < zb2L0 ? -1 : 1);
else if (zb1level != zb2level)
return (zb1level > zb2level ? -1 : 1);
/*
* This can (theoretically) happen if the bookmarks have the same object
* and level, but different blkids, if the block sizes are not the same.
* There is presently no way to change the indirect block sizes
*/
return (0);
}
/*
* This function checks the following: given that last_block is the place that
* our traversal stopped last time, does that guarantee that we've visited
* every node under subtree_root? Therefore, we can't just use the raw output
* of zbookmark_compare. We have to pass in a modified version of
* subtree_root; by incrementing the block id, and then checking whether
* last_block is before or equal to that, we can tell whether or not having
* visited last_block implies that all of subtree_root's children have been
* visited.
*/
boolean_t
zbookmark_subtree_completed(const dnode_phys_t *dnp,
const zbookmark_phys_t *subtree_root, const zbookmark_phys_t *last_block)
{
zbookmark_phys_t mod_zb = *subtree_root;
mod_zb.zb_blkid++;
ASSERT(last_block->zb_level == 0);
/* The objset_phys_t isn't before anything. */
if (dnp == NULL)
return (B_FALSE);
/*
* We pass in 1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT) for the
* data block size in sectors, because that variable is only used if
* the bookmark refers to a block in the meta-dnode. Since we don't
* know without examining it what object it refers to, and there's no
* harm in passing in this value in other cases, we always pass it in.
*
* We pass in 0 for the indirect block size shift because zb2 must be
* level 0. The indirect block size is only used to calculate the span
* of the bookmark, but since the bookmark must be level 0, the span is
* always 1, so the math works out.
*
* If you make changes to how the zbookmark_compare code works, be sure
* to make sure that this code still works afterwards.
*/
return (zbookmark_compare(dnp->dn_datablkszsec, dnp->dn_indblkshift,
1ULL << (DNODE_BLOCK_SHIFT - SPA_MINBLOCKSHIFT), 0, &mod_zb,
last_block) <= 0);
}
#if defined(_KERNEL) && defined(HAVE_SPL)
EXPORT_SYMBOL(zio_type_name);
EXPORT_SYMBOL(zio_buf_alloc);
EXPORT_SYMBOL(zio_data_buf_alloc);
EXPORT_SYMBOL(zio_buf_free);
EXPORT_SYMBOL(zio_data_buf_free);
module_param(zio_delay_max, int, 0644);
MODULE_PARM_DESC(zio_delay_max, "Max zio millisec delay before posting event");
module_param(zio_requeue_io_start_cut_in_line, int, 0644);
MODULE_PARM_DESC(zio_requeue_io_start_cut_in_line, "Prioritize requeued I/O");
module_param(zfs_sync_pass_deferred_free, int, 0644);
MODULE_PARM_DESC(zfs_sync_pass_deferred_free,
"Defer frees starting in this pass");
module_param(zfs_sync_pass_dont_compress, int, 0644);
MODULE_PARM_DESC(zfs_sync_pass_dont_compress,
"Don't compress starting in this pass");
module_param(zfs_sync_pass_rewrite, int, 0644);
MODULE_PARM_DESC(zfs_sync_pass_rewrite,
"Rewrite new bps starting in this pass");
module_param(zio_dva_throttle_enabled, int, 0644);
MODULE_PARM_DESC(zio_dva_throttle_enabled,
"Throttle block allocations in the ZIO pipeline");
#endif
diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run
index fb489d9fe437..7b51ef47ba5a 100644
--- a/tests/runfiles/linux.run
+++ b/tests/runfiles/linux.run
@@ -1,810 +1,824 @@
#
# This file and its contents are supplied under the terms of the
# Common Development and Distribution License ("CDDL"), version 1.0.
# You may only use this file in accordance with the terms of version
# 1.0 of the CDDL.
#
# A full copy of the text of the CDDL should have accompanied this
# source. A copy of the CDDL is also available via the Internet at
# http://www.illumos.org/license/CDDL.
#
[DEFAULT]
pre = setup
quiet = False
pre_user = root
user = root
timeout = 600
post_user = root
post = cleanup
outputdir = /var/tmp/test_results
tags = ['functional']
# Update to use ZFS_ACL_* variables and user_run helper.
# posix_001_pos
# posix_002_pos
[tests/functional/acl/posix]
tests = ['posix_003_pos']
tags = ['functional', 'acl', 'posix']
[tests/functional/arc]
tests = ['dbufstats_001_pos', 'dbufstats_002_pos']
tags = ['functional', 'arc']
[tests/functional/atime]
tests = ['atime_001_pos', 'atime_002_neg', 'atime_003_pos']
tags = ['functional', 'atime']
[tests/functional/bootfs]
tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos',
'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_006_pos', 'bootfs_007_pos',
'bootfs_008_pos']
tags = ['functional', 'bootfs']
[tests/functional/cache]
tests = ['cache_001_pos', 'cache_002_pos', 'cache_003_pos', 'cache_004_neg',
'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg',
'cache_009_pos', 'cache_010_neg', 'cache_011_pos']
tags = ['functional', 'cache']
[tests/functional/cachefile]
tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos',
'cachefile_004_pos']
tags = ['functional', 'cachefile']
# 'sensitive_none_lookup', 'sensitive_none_delete',
# 'sensitive_formd_lookup', 'sensitive_formd_delete',
# 'insensitive_none_lookup', 'insensitive_none_delete',
# 'insensitive_formd_lookup', 'insensitive_formd_delete',
# 'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete',
# 'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete']
[tests/functional/casenorm]
tests = ['case_all_values', 'norm_all_values']
tags = ['functional', 'casenorm']
[tests/functional/channel_program/lua_core]
tests = ['tst.args_to_lua', 'tst.divide_by_zero', 'tst.exists',
'tst.integer_illegal', 'tst.integer_overflow', 'tst.language_functions_neg',
'tst.language_functions_pos', 'tst.large_prog', 'tst.libraries',
'tst.memory_limit', 'tst.nested_neg', 'tst.nested_pos', 'tst.nvlist_to_lua',
'tst.recursive_neg', 'tst.recursive_pos', 'tst.return_large',
'tst.return_nvlist_neg', 'tst.return_nvlist_pos',
'tst.return_recursive_table', 'tst.timeout']
tags = ['functional', 'channel_program', 'lua_core']
[tests/functional/channel_program/synctask_core]
tests = ['tst.destroy_fs', 'tst.destroy_snap', 'tst.get_count_and_limit',
'tst.get_index_props', 'tst.get_mountpoint', 'tst.get_neg',
'tst.get_number_props', 'tst.get_string_props', 'tst.get_type',
'tst.get_userquota', 'tst.get_written', 'tst.list_children',
'tst.list_clones', 'tst.list_snapshots', 'tst.list_system_props',
'tst.list_user_props', 'tst.parse_args_neg','tst.promote_conflict',
'tst.promote_multiple', 'tst.promote_simple', 'tst.rollback_mult',
'tst.rollback_one', 'tst.snapshot_destroy', 'tst.snapshot_neg',
'tst.snapshot_recursive', 'tst.snapshot_simple']
tags = ['functional', 'channel_program', 'synctask_core']
[tests/functional/chattr]
tests = ['chattr_001_pos', 'chattr_002_neg']
tags = ['functional', 'chattr']
[tests/functional/checksum]
tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'filetest_001_pos']
tags = ['functional', 'checksum']
[tests/functional/clean_mirror]
tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos',
'clean_mirror_003_pos', 'clean_mirror_004_pos']
tags = ['functional', 'clean_mirror']
[tests/functional/cli_root/zdb]
tests = ['zdb_001_neg', 'zdb_002_pos', 'zdb_003_pos', 'zdb_004_pos',
'zdb_005_pos', 'zdb_006_pos']
pre =
post =
tags = ['functional', 'cli_root', 'zdb']
[tests/functional/cli_root/zfs]
tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg']
tags = ['functional', 'cli_root', 'zfs']
[tests/functional/cli_root/zfs_bookmark]
tests = ['zfs_bookmark_cliargs']
tags = ['functional', 'cli_root', 'zfs_bookmark']
[tests/functional/cli_root/zfs_change-key]
tests = ['zfs_change-key', 'zfs_change-key_child', 'zfs_change-key_format',
'zfs_change-key_inherit', 'zfs_change-key_load', 'zfs_change-key_location',
'zfs_change-key_pbkdf2iters']
tags = ['functional', 'cli_root', 'zfs_change-key']
[tests/functional/cli_root/zfs_clone]
tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos',
'zfs_clone_004_pos', 'zfs_clone_005_pos', 'zfs_clone_006_pos',
'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg',
'zfs_clone_010_pos', 'zfs_clone_encrypted']
tags = ['functional', 'cli_root', 'zfs_clone']
[tests/functional/cli_root/zfs_copies]
tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_003_pos',
'zfs_copies_004_neg', 'zfs_copies_005_neg', 'zfs_copies_006_pos']
tags = ['functional', 'cli_root', 'zfs_copies']
[tests/functional/cli_root/zfs_create]
tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos',
'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos',
'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg',
'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos',
'zfs_create_013_pos', 'zfs_create_014_pos', 'zfs_create_encrypted',
'zfs_create_crypt_combos']
tags = ['functional', 'cli_root', 'zfs_create']
[tests/functional/cli_root/zfs_destroy]
tests = ['zfs_destroy_001_pos', 'zfs_destroy_002_pos', 'zfs_destroy_003_pos',
'zfs_destroy_004_pos', 'zfs_destroy_005_neg', 'zfs_destroy_006_neg',
'zfs_destroy_007_neg', 'zfs_destroy_008_pos', 'zfs_destroy_009_pos',
'zfs_destroy_010_pos', 'zfs_destroy_011_pos', 'zfs_destroy_012_pos',
'zfs_destroy_013_neg', 'zfs_destroy_014_pos', 'zfs_destroy_015_pos',
'zfs_destroy_016_pos']
tags = ['functional', 'cli_root', 'zfs_destroy']
[tests/functional/cli_root/zfs_diff]
tests = ['zfs_diff_changes', 'zfs_diff_cliargs', 'zfs_diff_timestamp',
'zfs_diff_types']
tags = ['functional', 'cli_root', 'zfs_diff']
[tests/functional/cli_root/zfs_get]
tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos',
'zfs_get_004_pos', 'zfs_get_005_neg', 'zfs_get_006_neg', 'zfs_get_007_neg',
'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg']
tags = ['functional', 'cli_root', 'zfs_get']
[tests/functional/cli_root/zfs_inherit]
tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos']
tags = ['functional', 'cli_root', 'zfs_inherit']
[tests/functional/cli_root/zfs_load-key]
tests = ['zfs_load-key', 'zfs_load-key_all', 'zfs_load-key_file',
'zfs_load-key_location', 'zfs_load-key_noop', 'zfs_load-key_recursive']
tags = ['functional', 'cli_root', 'zfs_load-key']
# zfs_mount_006_pos - https://github.com/zfsonlinux/zfs/issues/4990
[tests/functional/cli_root/zfs_mount]
tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos',
'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_007_pos',
'zfs_mount_008_pos', 'zfs_mount_009_neg', 'zfs_mount_010_neg',
'zfs_mount_011_neg', 'zfs_mount_012_neg', 'zfs_mount_all_001_pos',
'zfs_mount_encrypted', 'zfs_mount_remount', 'zfs_multi_mount']
tags = ['functional', 'cli_root', 'zfs_mount']
[tests/functional/cli_root/zfs_program]
tests = ['zfs_program_json']
tags = ['functional', 'cli_root', 'zfs_program']
[tests/functional/cli_root/zfs_promote]
tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos',
'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg',
'zfs_promote_007_neg', 'zfs_promote_008_pos', 'zfs_promote_encryptionroot']
tags = ['functional', 'cli_root', 'zfs_promote']
[tests/functional/cli_root/zfs_property]
tests = ['zfs_written_property_001_pos']
tags = ['functional', 'cli_root', 'zfs_property']
# zfs_receive_004_neg - Fails for OpenZFS on illumos
[tests/functional/cli_root/zfs_receive]
tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos',
'zfs_receive_005_neg', 'zfs_receive_006_pos',
'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg',
'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos',
'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos',
'receive-o-x_props_override', 'zfs_receive_from_encrypted',
'zfs_receive_to_encrypted', 'zfs_receive_raw',
'zfs_receive_raw_incremental']
tags = ['functional', 'cli_root', 'zfs_receive']
# zfs_rename_006_pos - https://github.com/zfsonlinux/zfs/issues/5647
# zfs_rename_009_neg - https://github.com/zfsonlinux/zfs/issues/5648
[tests/functional/cli_root/zfs_rename]
tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos',
'zfs_rename_004_neg', 'zfs_rename_005_neg',
'zfs_rename_007_pos', 'zfs_rename_008_pos',
'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg',
'zfs_rename_013_pos', 'zfs_rename_encrypted_child',
'zfs_rename_to_encrypted']
tags = ['functional', 'cli_root', 'zfs_rename']
[tests/functional/cli_root/zfs_reservation]
tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos']
tags = ['functional', 'cli_root', 'zfs_reservation']
[tests/functional/cli_root/zfs_rollback]
tests = ['zfs_rollback_001_pos', 'zfs_rollback_002_pos',
'zfs_rollback_003_neg', 'zfs_rollback_004_neg']
tags = ['functional', 'cli_root', 'zfs_rollback']
[tests/functional/cli_root/zfs_send]
tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos',
'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos',
'zfs_send_007_pos', 'zfs_send_encrypted', 'zfs_send_raw',
'zfs_send_sparse', 'zfs_send-b']
tags = ['functional', 'cli_root', 'zfs_send']
[tests/functional/cli_root/zfs_set]
tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos',
'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos',
'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos',
'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos',
'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos',
'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos',
'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg',
'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos',
'mountpoint_003_pos', 'ro_props_001_pos', 'zfs_set_keylocation']
tags = ['functional', 'cli_root', 'zfs_set']
[tests/functional/cli_root/zfs_share]
tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos',
'zfs_share_004_pos', 'zfs_share_005_pos', 'zfs_share_006_pos',
'zfs_share_007_neg', 'zfs_share_008_neg', 'zfs_share_009_neg',
'zfs_share_010_neg', 'zfs_share_011_pos']
tags = ['functional', 'cli_root', 'zfs_share']
[tests/functional/cli_root/zfs_snapshot]
tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg',
'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg',
'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg',
'zfs_snapshot_009_pos']
tags = ['functional', 'cli_root', 'zfs_snapshot']
[tests/functional/cli_root/zfs_unload-key]
tests = ['zfs_unload-key', 'zfs_unload-key_all', 'zfs_unload-key_recursive']
tags = ['functional', 'cli_root', 'zfs_unload-key']
[tests/functional/cli_root/zfs_unmount]
tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos',
'zfs_unmount_004_pos', 'zfs_unmount_005_pos', 'zfs_unmount_006_pos',
'zfs_unmount_007_neg', 'zfs_unmount_008_neg', 'zfs_unmount_009_pos',
'zfs_unmount_all_001_pos']
tags = ['functional', 'cli_root', 'zfs_unmount']
[tests/functional/cli_root/zfs_unshare]
tests = ['zfs_unshare_001_pos', 'zfs_unshare_002_pos', 'zfs_unshare_003_pos',
'zfs_unshare_004_neg', 'zfs_unshare_005_neg', 'zfs_unshare_006_pos']
tags = ['functional', 'cli_root', 'zfs_unshare']
[tests/functional/cli_root/zfs_upgrade]
tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos',
'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg',
'zfs_upgrade_007_neg']
tags = ['functional', 'cli_root', 'zfs_upgrade']
[tests/functional/cli_root/zpool]
tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos']
tags = ['functional', 'cli_root', 'zpool']
[tests/functional/cli_root/zpool_add]
tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos',
'zpool_add_004_pos', 'zpool_add_005_pos', 'zpool_add_006_pos',
'zpool_add_007_neg', 'zpool_add_008_neg', 'zpool_add_009_neg',
'zpool_add_010_pos',
'add-o_ashift', 'add_prop_ashift', 'add_nested_replacing_spare']
tags = ['functional', 'cli_root', 'zpool_add']
[tests/functional/cli_root/zpool_attach]
tests = ['zpool_attach_001_neg', 'attach-o_ashift']
tags = ['functional', 'cli_root', 'zpool_attach']
[tests/functional/cli_root/zpool_clear]
tests = ['zpool_clear_001_pos', 'zpool_clear_002_neg', 'zpool_clear_003_neg',
'zpool_clear_readonly']
tags = ['functional', 'cli_root', 'zpool_clear']
[tests/functional/cli_root/zpool_create]
tests = ['zpool_create_001_pos', 'zpool_create_002_pos',
'zpool_create_003_pos', 'zpool_create_004_pos', 'zpool_create_005_pos',
'zpool_create_006_pos', 'zpool_create_007_neg', 'zpool_create_008_pos',
'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_011_neg',
'zpool_create_012_neg', 'zpool_create_014_neg',
'zpool_create_015_neg', 'zpool_create_016_pos', 'zpool_create_017_neg',
'zpool_create_018_pos', 'zpool_create_019_pos', 'zpool_create_020_pos',
'zpool_create_021_pos', 'zpool_create_022_pos', 'zpool_create_023_neg',
'zpool_create_024_pos',
'zpool_create_encrypted', 'zpool_create_crypt_combos',
'zpool_create_features_001_pos', 'zpool_create_features_002_pos',
'zpool_create_features_003_pos', 'zpool_create_features_004_neg',
'zpool_create_features_005_pos',
'create-o_ashift']
tags = ['functional', 'cli_root', 'zpool_create']
[tests/functional/cli_root/zpool_destroy]
tests = ['zpool_destroy_001_pos', 'zpool_destroy_002_pos',
'zpool_destroy_003_neg']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_destroy']
[tests/functional/cli_root/zpool_detach]
tests = ['zpool_detach_001_neg']
tags = ['functional', 'cli_root', 'zpool_detach']
[tests/functional/cli_root/zpool_events]
tests = ['zpool_events_clear', 'zpool_events_cliargs', 'zpool_events_follow',
'zpool_events_poolname']
tags = ['functional', 'cli_root', 'zpool_events']
[tests/functional/cli_root/zpool_expand]
tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos',
'zpool_expand_003_neg', 'zpool_expand_004_pos']
tags = ['functional', 'cli_root', 'zpool_expand']
[tests/functional/cli_root/zpool_export]
tests = ['zpool_export_001_pos', 'zpool_export_002_pos',
'zpool_export_003_neg', 'zpool_export_004_pos']
tags = ['functional', 'cli_root', 'zpool_export']
[tests/functional/cli_root/zpool_get]
tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos',
'zpool_get_004_neg']
tags = ['functional', 'cli_root', 'zpool_get']
[tests/functional/cli_root/zpool_history]
tests = ['zpool_history_001_neg', 'zpool_history_002_pos']
tags = ['functional', 'cli_root', 'zpool_history']
# zpool_import_missing_003_pos - https://github.com/zfsonlinux/zfs/issues/6839
[tests/functional/cli_root/zpool_import]
tests = ['zpool_import_001_pos', 'zpool_import_002_pos',
'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos',
'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos',
'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg',
'zpool_import_012_pos', 'zpool_import_013_neg', 'zpool_import_014_pos',
'zpool_import_015_pos',
'zpool_import_features_001_pos', 'zpool_import_features_002_neg',
'zpool_import_features_003_pos','zpool_import_missing_001_pos',
'zpool_import_missing_002_pos',
'zpool_import_rename_001_pos', 'zpool_import_all_001_pos',
'zpool_import_encrypted', 'zpool_import_encrypted_load',
'zpool_import_errata3']
tags = ['functional', 'cli_root', 'zpool_import']
[tests/functional/cli_root/zpool_labelclear]
tests = ['zpool_labelclear_active', 'zpool_labelclear_exported']
pre =
post =
tags = ['functional', 'cli_root', 'zpool_labelclear']
[tests/functional/cli_root/zpool_offline]
tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg', 'zpool_offline_003_pos']
tags = ['functional', 'cli_root', 'zpool_offline']
[tests/functional/cli_root/zpool_online]
tests = ['zpool_online_001_pos', 'zpool_online_002_neg']
tags = ['functional', 'cli_root', 'zpool_online']
[tests/functional/cli_root/zpool_remove]
tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos',
'zpool_remove_003_pos']
tags = ['functional', 'cli_root', 'zpool_remove']
[tests/functional/cli_root/zpool_reopen]
tests = ['zpool_reopen_001_pos', 'zpool_reopen_002_pos',
'zpool_reopen_003_pos', 'zpool_reopen_004_pos', 'zpool_reopen_005_pos',
'zpool_reopen_006_neg']
tags = ['functional', 'cli_root', 'zpool_reopen']
[tests/functional/cli_root/zpool_replace]
tests = ['zpool_replace_001_neg', 'replace-o_ashift', 'replace_prop_ashift']
tags = ['functional', 'cli_root', 'zpool_replace']
[tests/functional/cli_root/zpool_scrub]
tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos',
'zpool_scrub_004_pos', 'zpool_scrub_005_pos',
'zpool_scrub_encrypted_unloaded', 'zpool_scrub_offline_device']
tags = ['functional', 'cli_root', 'zpool_scrub']
[tests/functional/cli_root/zpool_set]
tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg',
'zpool_set_ashift', 'zpool_set_features']
tags = ['functional', 'cli_root', 'zpool_set']
[tests/functional/cli_root/zpool_split]
tests = ['zpool_split_cliargs', 'zpool_split_devices',
'zpool_split_encryption', 'zpool_split_props', 'zpool_split_vdevs']
tags = ['functional', 'cli_root', 'zpool_split']
[tests/functional/cli_root/zpool_status]
tests = ['zpool_status_001_pos', 'zpool_status_002_pos','zpool_status_003_pos',
'zpool_status_-c_disable', 'zpool_status_-c_homedir',
'zpool_status_-c_searchpath']
user =
tags = ['functional', 'cli_root', 'zpool_status']
[tests/functional/cli_root/zpool_sync]
tests = ['zpool_sync_001_pos', 'zpool_sync_002_neg']
tags = ['functional', 'cli_root', 'zpool_sync']
[tests/functional/cli_root/zpool_upgrade]
tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_002_pos',
'zpool_upgrade_003_pos', 'zpool_upgrade_004_pos',
'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg',
'zpool_upgrade_007_pos', 'zpool_upgrade_008_pos',
'zpool_upgrade_009_neg']
tags = ['functional', 'cli_root', 'zpool_upgrade']
[tests/functional/cli_user/misc]
tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg',
'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg',
'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg',
'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg',
'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg',
'zfs_share_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg',
'zfs_unmount_001_neg', 'zfs_unshare_001_neg', 'zfs_upgrade_001_neg',
'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg',
'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg',
'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg',
'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg',
'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg',
'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg',
'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos',
'arc_summary_001_pos', 'arc_summary_002_neg',
'arc_summary3_001_pos', 'dbufstat_001_pos']
user =
tags = ['functional', 'cli_user', 'misc']
[tests/functional/cli_user/zfs_list]
tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos',
'zfs_list_004_neg', 'zfs_list_007_pos', 'zfs_list_008_neg']
user =
tags = ['functional', 'cli_user', 'zfs_list']
[tests/functional/cli_user/zpool_iostat]
tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos',
'zpool_iostat_003_neg', 'zpool_iostat_004_pos',
'zpool_iostat_005_pos', 'zpool_iostat_-c_disable',
'zpool_iostat_-c_homedir', 'zpool_iostat_-c_searchpath']
user =
tags = ['functional', 'cli_user', 'zpool_iostat']
[tests/functional/cli_user/zpool_list]
tests = ['zpool_list_001_pos', 'zpool_list_002_neg']
user =
tags = ['functional', 'cli_user', 'zpool_list']
[tests/functional/compression]
tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos',
'compress_004_pos']
tags = ['functional', 'compression']
[tests/functional/ctime]
tests = ['ctime_001_pos' ]
tags = ['functional', 'ctime']
[tests/functional/deadman]
tests = ['deadman_sync', 'deadman_zio']
pre =
post =
tags = ['functional', 'deadman']
[tests/functional/delegate]
tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos',
'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos',
'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg',
'zfs_allow_010_pos', 'zfs_allow_011_neg', 'zfs_allow_012_neg',
'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos',
'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos',
'zfs_unallow_007_neg', 'zfs_unallow_008_neg']
tags = ['functional', 'delegate']
[tests/functional/devices]
tests = ['devices_001_pos', 'devices_002_neg', 'devices_003_pos']
tags = ['functional', 'devices']
[tests/functional/events]
tests = ['events_001_pos', 'events_002_pos', 'zed_rc_filter']
tags = ['functional', 'events']
[tests/functional/exec]
tests = ['exec_001_pos', 'exec_002_neg']
tags = ['functional', 'exec']
[tests/functional/fault]
tests = ['auto_online_001_pos', 'auto_replace_001_pos', 'auto_spare_001_pos',
'auto_spare_002_pos', 'auto_spare_ashift', 'auto_spare_multiple',
'scrub_after_resilver']
tags = ['functional', 'fault']
[tests/functional/features/async_destroy]
tests = ['async_destroy_001_pos']
tags = ['functional', 'features', 'async_destroy']
[tests/functional/features/large_dnode]
tests = ['large_dnode_001_pos', 'large_dnode_002_pos', 'large_dnode_003_pos',
'large_dnode_004_neg', 'large_dnode_005_pos', 'large_dnode_006_pos',
'large_dnode_007_neg', 'large_dnode_008_pos', 'large_dnode_009_pos']
tags = ['functional', 'features', 'large_dnode']
[tests/functional/grow_pool]
tests = ['grow_pool_001_pos']
tags = ['functional', 'grow_pool']
[tests/functional/grow_replicas]
tests = ['grow_replicas_001_pos']
pre =
post =
tags = ['functional', 'grow_replicas']
[tests/functional/history]
tests = ['history_001_pos', 'history_002_pos', 'history_003_pos',
'history_004_pos', 'history_005_neg', 'history_006_neg',
'history_007_pos', 'history_008_pos', 'history_009_pos',
'history_010_pos']
tags = ['functional', 'history']
[tests/functional/hkdf]
tests = ['run_hkdf_test']
tags = ['functional', 'hkdf']
[tests/functional/inheritance]
tests = ['inherit_001_pos']
pre =
tags = ['functional', 'inheritance']
[tests/functional/inuse]
tests = ['inuse_001_pos', 'inuse_003_pos', 'inuse_004_pos',
'inuse_005_pos', 'inuse_006_pos', 'inuse_007_pos', 'inuse_008_pos',
'inuse_009_pos']
post =
tags = ['functional', 'inuse']
[tests/functional/large_files]
tests = ['large_files_001_pos', 'large_files_002_pos']
tags = ['functional', 'large_files']
[tests/functional/largest_pool]
tests = ['largest_pool_001_pos']
pre =
post =
tags = ['functional', 'largest_pool']
[tests/functional/link_count]
tests = ['link_count_001']
tags = ['functional', 'link_count']
[tests/functional/migration]
tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos',
'migration_004_pos', 'migration_005_pos', 'migration_006_pos',
'migration_007_pos', 'migration_008_pos', 'migration_009_pos',
'migration_010_pos', 'migration_011_pos', 'migration_012_pos']
tags = ['functional', 'migration']
[tests/functional/mmap]
tests = ['mmap_write_001_pos', 'mmap_read_001_pos', 'mmap_libaio_001_pos']
tags = ['functional', 'mmap']
[tests/functional/mmp]
tests = ['mmp_on_thread', 'mmp_on_uberblocks', 'mmp_on_off', 'mmp_interval',
'mmp_active_import', 'mmp_inactive_import', 'mmp_exported_import',
'mmp_write_uberblocks', 'mmp_reset_interval', 'multihost_history']
tags = ['functional', 'mmp']
[tests/functional/mount]
tests = ['umount_001', 'umountall_001']
tags = ['functional', 'mount']
[tests/functional/mv_files]
tests = ['mv_files_001_pos', 'mv_files_002_pos']
tags = ['functional', 'mv_files']
[tests/functional/nestedfs]
tests = ['nestedfs_001_pos']
tags = ['functional', 'nestedfs']
[tests/functional/no_space]
tests = ['enospc_001_pos', 'enospc_002_pos', 'enospc_003_pos']
tags = ['functional', 'no_space']
[tests/functional/nopwrite]
tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative',
'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync',
'nopwrite_varying_compression', 'nopwrite_volume']
tags = ['functional', 'nopwrite']
[tests/functional/online_offline]
tests = ['online_offline_001_pos', 'online_offline_002_neg',
'online_offline_003_neg']
tags = ['functional', 'online_offline']
[tests/functional/pool_names]
tests = ['pool_names_001_pos', 'pool_names_002_neg']
pre =
post =
tags = ['functional', 'pool_names']
[tests/functional/poolversion]
tests = ['poolversion_001_pos', 'poolversion_002_pos']
tags = ['functional', 'poolversion']
[tests/functional/privilege]
tests = ['privilege_001_pos', 'privilege_002_pos']
tags = ['functional', 'privilege']
[tests/functional/projectquota]
tests = ['projectid_001_pos', 'projectid_002_pos', 'projectid_003_pos',
'projectquota_001_pos', 'projectquota_002_pos', 'projectquota_003_pos',
'projectquota_004_neg', 'projectquota_005_pos', 'projectquota_006_pos',
'projectquota_007_pos', 'projectquota_008_pos', 'projectquota_009_pos',
'projectspace_001_pos', 'projectspace_002_pos', 'projectspace_003_pos',
'projectspace_004_pos',
'projecttree_001_pos', 'projecttree_002_pos', 'projecttree_003_neg' ]
tags = ['functional', 'projectquota']
[tests/functional/quota]
tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos',
'quota_004_pos', 'quota_005_pos', 'quota_006_neg']
tags = ['functional', 'quota']
[tests/functional/raidz]
tests = ['raidz_001_neg', 'raidz_002_pos']
tags = ['functional', 'raidz']
[tests/functional/redundancy]
tests = ['redundancy_001_pos', 'redundancy_002_pos', 'redundancy_003_pos',
'redundancy_004_neg']
tags = ['functional', 'redundancy']
[tests/functional/refquota]
tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos',
'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg']
tags = ['functional', 'refquota']
# refreserv_004_pos - Fails for OpenZFS on illumos
[tests/functional/refreserv]
tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos',
'refreserv_005_pos']
tags = ['functional', 'refreserv']
+[tests/functional/removal]
+pre =
+tests = ['removal_sanity', 'removal_all_vdev', 'removal_check_space',
+ 'removal_condense_export',
+ 'removal_multiple_indirection', 'removal_remap',
+ 'removal_remap_deadlists',
+ 'removal_with_add', 'removal_with_create_fs', 'removal_with_dedup',
+ 'removal_with_export', 'removal_with_ganging', 'removal_with_remap',
+ 'removal_with_remove', 'removal_with_scrub', 'removal_with_send',
+ 'removal_with_send_recv', 'removal_with_snapshot', 'removal_with_write',
+ 'removal_with_zdb', 'removal_resume_export',
+ 'remove_mirror', 'remove_mirror_sanity', 'remove_raidz']
+tags = ['functional', 'removal']
+
[tests/functional/rename_dirs]
tests = ['rename_dirs_001_pos']
tags = ['functional', 'rename_dirs']
[tests/functional/replacement]
tests = ['replacement_001_pos', 'replacement_002_pos', 'replacement_003_pos']
tags = ['functional', 'replacement']
# reservation_001_pos - https://github.com/zfsonlinux/zfs/issues/4445
# reservation_013_pos - https://github.com/zfsonlinux/zfs/issues/4444
# reservation_018_pos - https://github.com/zfsonlinux/zfs/issues/5642
[tests/functional/reservation]
tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos',
'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos',
'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos',
'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos',
'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos',
'reservation_016_pos', 'reservation_017_pos', 'reservation_019_pos',
'reservation_020_pos', 'reservation_021_neg', 'reservation_022_pos']
tags = ['functional', 'reservation']
[tests/functional/rootpool]
tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_pos']
tags = ['functional', 'rootpool']
# rsend_008_pos - https://github.com/zfsonlinux/zfs/issues/6066
[tests/functional/rsend]
tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos',
'rsend_005_pos', 'rsend_006_pos', 'rsend_007_pos', 'rsend_009_pos',
'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos',
'rsend_013_pos', 'rsend_014_pos',
'rsend_019_pos', 'rsend_020_pos',
'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos',
'send-c_verify_ratio', 'send-c_verify_contents', 'send-c_props',
'send-c_incremental', 'send-c_volume', 'send-c_zstreamdump',
'send-c_lz4_disabled', 'send-c_recv_lz4_disabled',
'send-c_mixed_compression', 'send-c_stream_size_estimate', 'send-cD',
'send-c_embedded_blocks', 'send-c_resume', 'send-cpL_varied_recsize',
'send-c_recv_dedup', 'send_encrypted_files', 'send_encrypted_heirarchy',
'send_freeobjects', 'send_realloc_dnode_size']
tags = ['functional', 'rsend']
[tests/functional/scrub_mirror]
tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos',
'scrub_mirror_003_pos', 'scrub_mirror_004_pos']
tags = ['functional', 'scrub_mirror']
[tests/functional/slog]
tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos',
'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg',
'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_012_neg',
'slog_013_pos', 'slog_014_pos', 'slog_015_neg', 'slog_replay_fs',
'slog_replay_volume']
tags = ['functional', 'slog']
[tests/functional/snapshot]
tests = ['clone_001_pos', 'rollback_001_pos', 'rollback_002_pos',
'rollback_003_pos', 'snapshot_001_pos', 'snapshot_002_pos',
'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos',
'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos',
'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos',
'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos',
'snapshot_015_pos', 'snapshot_016_pos', 'snapshot_017_pos']
tags = ['functional', 'snapshot']
# snapused_004_pos - https://github.com/zfsonlinux/zfs/issues/5513
[tests/functional/snapused]
tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos',
'snapused_005_pos']
tags = ['functional', 'snapused']
[tests/functional/sparse]
tests = ['sparse_001_pos']
tags = ['functional', 'sparse']
[tests/functional/threadsappend]
tests = ['threadsappend_001_pos']
tags = ['functional', 'threadsappend']
[tests/functional/tmpfile]
tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos']
tags = ['functional', 'tmpfile']
[tests/functional/truncate]
tests = ['truncate_001_pos', 'truncate_002_pos', 'truncate_timestamps']
tags = ['functional', 'truncate']
[tests/functional/upgrade]
tests = ['upgrade_userobj_001_pos', 'upgrade_projectquota_001_pos']
tags = ['functional', 'upgrade']
[tests/functional/user_namespace]
tests = ['user_namespace_001']
tags = ['functional', 'user_namespace']
[tests/functional/userquota]
tests = [
'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos',
'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos',
'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos',
'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg',
'userquota_013_pos',
'userspace_001_pos', 'userspace_002_pos', 'userspace_003_pos',
'groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos' ]
tags = ['functional', 'userquota']
# vdev_zaps_007_pos -- fails due to a pre-existing issue with zpool split
[tests/functional/vdev_zaps]
tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos',
'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos']
tags = ['functional', 'vdev_zaps']
[tests/functional/write_dirs]
tests = ['write_dirs_001_pos', 'write_dirs_002_pos']
tags = ['functional', 'write_dirs']
[tests/functional/xattr]
tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos',
'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg', 'xattr_008_pos',
'xattr_009_neg', 'xattr_010_neg', 'xattr_011_pos', 'xattr_012_pos',
'xattr_013_pos']
tags = ['functional', 'xattr']
[tests/functional/zvol/zvol_ENOSPC]
tests = ['zvol_ENOSPC_001_pos']
tags = ['functional', 'zvol', 'zvol_ENOSPC']
[tests/functional/zvol/zvol_cli]
tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg']
tags = ['functional', 'zvol', 'zvol_cli']
[tests/functional/zvol/zvol_misc]
tests = ['zvol_misc_001_neg', 'zvol_misc_002_pos', 'zvol_misc_003_neg',
'zvol_misc_004_pos', 'zvol_misc_005_neg', 'zvol_misc_006_pos',
'zvol_misc_snapdev', 'zvol_misc_volmode', 'zvol_misc_zil']
tags = ['functional', 'zvol', 'zvol_misc']
[tests/functional/zvol/zvol_swap]
tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_003_pos',
'zvol_swap_004_pos', 'zvol_swap_005_pos', 'zvol_swap_006_pos']
tags = ['functional', 'zvol', 'zvol_swap']
[tests/functional/libzfs]
tests = ['many_fds']
tags = ['functional', 'libzfs']
diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib
index 327da2b9fad5..00326dcdc407 100644
--- a/tests/zfs-tests/include/libtest.shlib
+++ b/tests/zfs-tests/include/libtest.shlib
@@ -1,3424 +1,3481 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
# Copyright 2016 Nexenta Systems, Inc.
# Copyright (c) 2017 Lawrence Livermore National Security, LLC.
# Copyright (c) 2017 Datto Inc.
# Copyright (c) 2017 Open-E, Inc. All Rights Reserved.
#
. ${STF_TOOLS}/include/logapi.shlib
. ${STF_SUITE}/include/math.shlib
. ${STF_SUITE}/include/blkdev.shlib
#
# Apply constrained path when available. This is required since the
# PATH may have been modified by sudo's secure_path behavior.
#
if [ -n "$STF_PATH" ]; then
PATH="$STF_PATH"
fi
# Linux kernel version comparison function
#
# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version
#
# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ]
#
function linux_version
{
typeset ver="$1"
[[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+")
typeset version=$(echo $ver | cut -d '.' -f 1)
typeset major=$(echo $ver | cut -d '.' -f 2)
typeset minor=$(echo $ver | cut -d '.' -f 3)
[[ -z "$version" ]] && version=0
[[ -z "$major" ]] && major=0
[[ -z "$minor" ]] && minor=0
echo $((version * 10000 + major * 100 + minor))
}
# Determine if this is a Linux test system
#
# Return 0 if platform Linux, 1 if otherwise
function is_linux
{
if [[ $(uname -o) == "GNU/Linux" ]]; then
return 0
else
return 1
fi
}
# Determine if this is a 32-bit system
#
# Return 0 if platform is 32-bit, 1 if otherwise
function is_32bit
{
if [[ $(getconf LONG_BIT) == "32" ]]; then
return 0
else
return 1
fi
}
# Determine if kmemleak is enabled
#
# Return 0 if kmemleak is enabled, 1 if otherwise
function is_kmemleak
{
if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then
return 0
else
return 1
fi
}
# Determine whether a dataset is mounted
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
#
# Return 0 if dataset is mounted; 1 if unmounted; 2 on error
function ismounted
{
typeset fstype=$2
[[ -z $fstype ]] && fstype=zfs
typeset out dir name ret
case $fstype in
zfs)
if [[ "$1" == "/"* ]] ; then
for out in $(zfs mount | awk '{print $2}'); do
[[ $1 == $out ]] && return 0
done
else
for out in $(zfs mount | awk '{print $1}'); do
[[ $1 == $out ]] && return 0
done
fi
;;
ufs|nfs)
out=$(df -F $fstype $1 2>/dev/null)
ret=$?
(($ret != 0)) && return $ret
dir=${out%%\(*}
dir=${dir%% *}
name=${out##*\(}
name=${name%%\)*}
name=${name%% *}
[[ "$1" == "$dir" || "$1" == "$name" ]] && return 0
;;
ext*)
out=$(df -t $fstype $1 2>/dev/null)
return $?
;;
zvol)
if [[ -L "$ZVOL_DEVDIR/$1" ]]; then
link=$(readlink -f $ZVOL_DEVDIR/$1)
[[ -n "$link" ]] && \
mount | grep -q "^$link" && \
return 0
fi
;;
esac
return 1
}
# Return 0 if a dataset is mounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function mounted
{
ismounted $1 $2
(($? == 0)) && return 0
return 1
}
# Return 0 if a dataset is unmounted; 1 otherwise
#
# $1 dataset name
# $2 filesystem type; optional - defaulted to zfs
function unmounted
{
ismounted $1 $2
(($? == 1)) && return 0
return 1
}
# split line on ","
#
# $1 - line to split
function splitline
{
echo $1 | sed "s/,/ /g"
}
function default_setup
{
default_setup_noexit "$@"
log_pass
}
#
# Given a list of disks, setup storage pools and datasets.
#
function default_setup_noexit
{
typeset disklist=$1
typeset container=$2
typeset volume=$3
log_note begin default_setup_noexit
if is_global_zone; then
if poolexists $TESTPOOL ; then
destroy_pool $TESTPOOL
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL $disklist
else
reexport_pool
fi
rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR
mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
if [[ -n $container ]]; then
rm -rf $TESTDIR1 || \
log_unresolved Could not remove $TESTDIR1
mkdir -p $TESTDIR1 || \
log_unresolved Could not create $TESTDIR1
log_must zfs create $TESTPOOL/$TESTCTR
log_must zfs set canmount=off $TESTPOOL/$TESTCTR
log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1
log_must zfs set mountpoint=$TESTDIR1 \
$TESTPOOL/$TESTCTR/$TESTFS1
fi
if [[ -n $volume ]]; then
if is_global_zone ; then
log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL
block_device_wait
else
log_must zfs create $TESTPOOL/$TESTVOL
fi
fi
}
#
# Given a list of disks, setup a storage pool, file system and
# a container.
#
function default_container_setup
{
typeset disklist=$1
default_setup "$disklist" "true"
}
#
# Given a list of disks, setup a storage pool,file system
# and a volume.
#
function default_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "" "true"
}
#
# Given a list of disks, setup a storage pool,file system,
# a container and a volume.
#
function default_container_volume_setup
{
typeset disklist=$1
default_setup "$disklist" "true" "true"
}
#
# Create a snapshot on a filesystem or volume. Defaultly create a snapshot on
# filesystem
#
# $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS
# $2 snapshot name. Default, $TESTSNAP
#
function create_snapshot
{
typeset fs_vol=${1:-$TESTPOOL/$TESTFS}
typeset snap=${2:-$TESTSNAP}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
if snapexists $fs_vol@$snap; then
log_fail "$fs_vol@$snap already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
log_must zfs snapshot $fs_vol@$snap
}
#
# Create a clone from a snapshot, default clone name is $TESTCLONE.
#
# $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default.
# $2 Clone name, $TESTPOOL/$TESTCLONE is default.
#
function create_clone # snapshot clone
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
typeset clone=${2:-$TESTPOOL/$TESTCLONE}
[[ -z $snap ]] && \
log_fail "Snapshot name is undefined."
[[ -z $clone ]] && \
log_fail "Clone name is undefined."
log_must zfs clone $snap $clone
}
#
# Create a bookmark of the given snapshot. Defaultly create a bookmark on
# filesystem.
#
# $1 Existing filesystem or volume name. Default, $TESTFS
# $2 Existing snapshot name. Default, $TESTSNAP
# $3 bookmark name. Default, $TESTBKMARK
#
function create_bookmark
{
typeset fs_vol=${1:-$TESTFS}
typeset snap=${2:-$TESTSNAP}
typeset bkmark=${3:-$TESTBKMARK}
[[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined."
[[ -z $snap ]] && log_fail "Snapshot's name is undefined."
[[ -z $bkmark ]] && log_fail "Bookmark's name is undefined."
if bkmarkexists $fs_vol#$bkmark; then
log_fail "$fs_vol#$bkmark already exists."
fi
datasetexists $fs_vol || \
log_fail "$fs_vol must exist."
snapexists $fs_vol@$snap || \
log_fail "$fs_vol@$snap must exist."
log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark
}
#
# Create a temporary clone result of an interrupted resumable 'zfs receive'
# $1 Destination filesystem name. Must not exist, will be created as the result
# of this function along with its %recv temporary clone
# $2 Source filesystem name. Must not exist, will be created and destroyed
#
function create_recv_clone
{
typeset recvfs="$1"
typeset sendfs="${2:-$TESTPOOL/create_recv_clone}"
typeset snap="$sendfs@snap1"
typeset incr="$sendfs@snap2"
typeset mountpoint="$TESTDIR/create_recv_clone"
typeset sendfile="$TESTDIR/create_recv_clone.zsnap"
[[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined."
datasetexists $recvfs && log_fail "Recv filesystem must not exist."
datasetexists $sendfs && log_fail "Send filesystem must not exist."
log_must zfs create -o mountpoint="$mountpoint" $sendfs
log_must zfs snapshot $snap
log_must eval "zfs send $snap | zfs recv -u $recvfs"
log_must mkfile 1m "$mountpoint/data"
log_must zfs snapshot $incr
log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 > $sendfile"
log_mustnot eval "zfs recv -su $recvfs < $sendfile"
destroy_dataset "$sendfs" "-r"
log_must rm -f "$sendfile"
if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then
log_fail "Error creating temporary $recvfs/%recv clone"
fi
}
function default_mirror_setup
{
default_mirror_setup_noexit $1 $2 $3
log_pass
}
#
# Given a pair of disks, set up a storage pool and dataset for the mirror
# @parameters: $1 the primary side of the mirror
# $2 the secondary side of the mirror
# @uses: ZPOOL ZFS TESTPOOL TESTFS
function default_mirror_setup_noexit
{
readonly func="default_mirror_setup_noexit"
typeset primary=$1
typeset secondary=$2
[[ -z $primary ]] && \
log_fail "$func: No parameters passed"
[[ -z $secondary ]] && \
log_fail "$func: No secondary partition passed"
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL mirror $@
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
}
#
# create a number of mirrors.
# We create a number($1) of 2 way mirrors using the pairs of disks named
# on the command line. These mirrors are *not* mounted
# @parameters: $1 the number of mirrors to create
# $... the devices to use to create the mirrors on
# @uses: ZPOOL ZFS TESTPOOL
function setup_mirrors
{
typeset -i nmirrors=$1
shift
while ((nmirrors > 0)); do
log_must test -n "$1" -a -n "$2"
[[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors
log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2
shift 2
((nmirrors = nmirrors - 1))
done
}
#
# create a number of raidz pools.
# We create a number($1) of 2 raidz pools using the pairs of disks named
# on the command line. These pools are *not* mounted
# @parameters: $1 the number of pools to create
# $... the devices to use to create the pools on
# @uses: ZPOOL ZFS TESTPOOL
function setup_raidzs
{
typeset -i nraidzs=$1
shift
while ((nraidzs > 0)); do
log_must test -n "$1" -a -n "$2"
[[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs
log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2
shift 2
((nraidzs = nraidzs - 1))
done
}
#
# Destroy the configured testpool mirrors.
# the mirrors are of the form ${TESTPOOL}{number}
# @uses: ZPOOL ZFS TESTPOOL
function destroy_mirrors
{
default_cleanup_noexit
log_pass
}
#
# Given a minimum of two disks, set up a storage pool and dataset for the raid-z
# $1 the list of disks
#
function default_raidz_setup
{
typeset disklist="$*"
disks=(${disklist[*]})
if [[ ${#disks[*]} -lt 2 ]]; then
log_fail "A raid-z requires a minimum of two disks."
fi
[[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL
log_must zpool create -f $TESTPOOL raidz $disklist
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS
log_pass
}
#
# Common function used to cleanup storage pools and datasets.
#
# Invoked at the start of the test suite to ensure the system
# is in a known state, and also at the end of each set of
# sub-tests to ensure errors from one set of tests doesn't
# impact the execution of the next set.
function default_cleanup
{
default_cleanup_noexit
log_pass
}
#
# Utility function used to list all available pool names.
#
# NOTE: $KEEP is a variable containing pool names, separated by a newline
# character, that must be excluded from the returned list.
#
function get_all_pools
{
zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS"
}
function default_cleanup_noexit
{
typeset pool=""
#
# Destroying the pool will also destroy any
# filesystems it contains.
#
if is_global_zone; then
zfs unmount -a > /dev/null 2>&1
ALL_POOLS=$(get_all_pools)
# Here, we loop through the pools we're allowed to
# destroy, only destroying them if it's safe to do
# so.
while [ ! -z ${ALL_POOLS} ]
do
for pool in ${ALL_POOLS}
do
if safe_to_destroy_pool $pool ;
then
destroy_pool $pool
fi
ALL_POOLS=$(get_all_pools)
done
done
zfs mount -a
else
typeset fs=""
for fs in $(zfs list -H -o name \
| grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do
destroy_dataset "$fs" "-Rf"
done
# Need cleanup here to avoid garbage dir left.
for fs in $(zfs list -H -o name); do
[[ $fs == /$ZONE_POOL ]] && continue
[[ -d $fs ]] && log_must rm -rf $fs/*
done
#
# Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to
# the default value
#
for fs in $(zfs list -H -o name); do
if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then
log_must zfs set reservation=none $fs
log_must zfs set recordsize=128K $fs
log_must zfs set mountpoint=/$fs $fs
typeset enc=""
enc=$(get_prop encryption $fs)
if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \
[[ "$enc" == "off" ]]; then
log_must zfs set checksum=on $fs
fi
log_must zfs set compression=off $fs
log_must zfs set atime=on $fs
log_must zfs set devices=off $fs
log_must zfs set exec=on $fs
log_must zfs set setuid=on $fs
log_must zfs set readonly=off $fs
log_must zfs set snapdir=hidden $fs
log_must zfs set aclmode=groupmask $fs
log_must zfs set aclinherit=secure $fs
fi
done
fi
[[ -d $TESTDIR ]] && \
log_must rm -rf $TESTDIR
disk1=${DISKS%% *}
if is_mpath_device $disk1; then
delete_partitions
fi
}
#
# Common function used to cleanup storage pools, file systems
# and containers.
#
function default_container_cleanup
{
if ! is_global_zone; then
reexport_pool
fi
ismounted $TESTPOOL/$TESTCTR/$TESTFS1
[[ $? -eq 0 ]] && \
log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1
destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R"
destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf"
[[ -e $TESTDIR1 ]] && \
log_must rm -rf $TESTDIR1 > /dev/null 2>&1
default_cleanup
}
#
# Common function used to cleanup snapshot of file system or volume. Default to
# delete the file system's snapshot
#
# $1 snapshot name
#
function destroy_snapshot
{
typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if ! snapexists $snap; then
log_fail "'$snap' does not existed."
fi
#
# For the sake of the value which come from 'get_prop' is not equal
# to the really mountpoint when the snapshot is unmounted. So, firstly
# check and make sure this snapshot's been mounted in current system.
#
typeset mtpt=""
if ismounted $snap; then
mtpt=$(get_prop mountpoint $snap)
(($? != 0)) && \
log_fail "get_prop mountpoint $snap failed."
fi
destroy_dataset "$snap"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup clone.
#
# $1 clone name
#
function destroy_clone
{
typeset clone=${1:-$TESTPOOL/$TESTCLONE}
if ! datasetexists $clone; then
log_fail "'$clone' does not existed."
fi
# With the same reason in destroy_snapshot
typeset mtpt=""
if ismounted $clone; then
mtpt=$(get_prop mountpoint $clone)
(($? != 0)) && \
log_fail "get_prop mountpoint $clone failed."
fi
destroy_dataset "$clone"
[[ $mtpt != "" && -d $mtpt ]] && \
log_must rm -rf $mtpt
}
#
# Common function used to cleanup bookmark of file system or volume. Default
# to delete the file system's bookmark.
#
# $1 bookmark name
#
function destroy_bookmark
{
typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK}
if ! bkmarkexists $bkmark; then
log_fail "'$bkmarkp' does not existed."
fi
destroy_dataset "$bkmark"
}
# Return 0 if a snapshot exists; $? otherwise
#
# $1 - snapshot name
function snapexists
{
zfs list -H -t snapshot "$1" > /dev/null 2>&1
return $?
}
#
# Return 0 if a bookmark exists; $? otherwise
#
# $1 - bookmark name
#
function bkmarkexists
{
zfs list -H -t bookmark "$1" > /dev/null 2>&1
return $?
}
#
# Set a property to a certain value on a dataset.
# Sets a property of the dataset to the value as passed in.
# @param:
# $1 dataset who's property is being set
# $2 property to set
# $3 value to set property to
# @return:
# 0 if the property could be set.
# non-zero otherwise.
# @use: ZFS
#
function dataset_setprop
{
typeset fn=dataset_setprop
if (($# < 3)); then
log_note "$fn: Insufficient parameters (need 3, had $#)"
return 1
fi
typeset output=
output=$(zfs set $2=$3 $1 2>&1)
typeset rv=$?
if ((rv != 0)); then
log_note "Setting property on $1 failed."
log_note "property $2=$3"
log_note "Return Code: $rv"
log_note "Output: $output"
return $rv
fi
return 0
}
#
# Assign suite defined dataset properties.
# This function is used to apply the suite's defined default set of
# properties to a dataset.
# @parameters: $1 dataset to use
# @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP
# @returns:
# 0 if the dataset has been altered.
# 1 if no pool name was passed in.
# 2 if the dataset could not be found.
# 3 if the dataset could not have it's properties set.
#
function dataset_set_defaultproperties
{
typeset dataset="$1"
[[ -z $dataset ]] && return 1
typeset confset=
typeset -i found=0
for confset in $(zfs list); do
if [[ $dataset = $confset ]]; then
found=1
break
fi
done
[[ $found -eq 0 ]] && return 2
if [[ -n $COMPRESSION_PROP ]]; then
dataset_setprop $dataset compression $COMPRESSION_PROP || \
return 3
log_note "Compression set to '$COMPRESSION_PROP' on $dataset"
fi
if [[ -n $CHECKSUM_PROP ]]; then
dataset_setprop $dataset checksum $CHECKSUM_PROP || \
return 3
log_note "Checksum set to '$CHECKSUM_PROP' on $dataset"
fi
return 0
}
#
# Check a numeric assertion
# @parameter: $@ the assertion to check
# @output: big loud notice if assertion failed
# @use: log_fail
#
function assert
{
(($@)) || log_fail "$@"
}
#
# Function to format partition size of a disk
# Given a disk cxtxdx reduces all partitions
# to 0 size
#
function zero_partitions #<whole_disk_name>
{
typeset diskname=$1
typeset i
if is_linux; then
DSK=$DEV_DSKDIR/$diskname
DSK=$(echo $DSK | sed -e "s|//|/|g")
log_must parted $DSK -s -- mklabel gpt
blockdev --rereadpt $DSK 2>/dev/null
block_device_wait
else
for i in 0 1 3 4 5 6 7
do
log_must set_partition $i "" 0mb $diskname
done
fi
return 0
}
#
# Given a slice, size and disk, this function
# formats the slice to the specified size.
# Size should be specified with units as per
# the `format` command requirements eg. 100mb 3gb
#
# NOTE: This entire interface is problematic for the Linux parted utilty
# which requires the end of the partition to be specified. It would be
# best to retire this interface and replace it with something more flexible.
# At the moment a best effort is made.
#
function set_partition #<slice_num> <slice_start> <size_plus_units> <whole_disk_name>
{
typeset -i slicenum=$1
typeset start=$2
typeset size=$3
typeset disk=$4
if is_linux; then
if [[ -z $size || -z $disk ]]; then
log_fail "The size or disk name is unspecified."
fi
typeset size_mb=${size%%[mMgG]}
size_mb=${size_mb%%[mMgG][bB]}
if [[ ${size:1:1} == 'g' ]]; then
((size_mb = size_mb * 1024))
fi
# Create GPT partition table when setting slice 0 or
# when the device doesn't already contain a GPT label.
parted $DEV_DSKDIR/$disk -s -- print 1 >/dev/null
typeset ret_val=$?
if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then
parted $DEV_DSKDIR/$disk -s -- mklabel gpt
if [[ $? -ne 0 ]]; then
log_note "Failed to create GPT partition table on $disk"
return 1
fi
fi
# When no start is given align on the first cylinder.
if [[ -z "$start" ]]; then
start=1
fi
# Determine the cylinder size for the device and using
# that calculate the end offset in cylinders.
typeset -i cly_size_kb=0
cly_size_kb=$(parted -m $DEV_DSKDIR/$disk -s -- \
unit cyl print | head -3 | tail -1 | \
awk -F '[:k.]' '{print $4}')
((end = (size_mb * 1024 / cly_size_kb) + start))
parted $DEV_DSKDIR/$disk -s -- \
mkpart part$slicenum ${start}cyl ${end}cyl
if [[ $? -ne 0 ]]; then
log_note "Failed to create partition $slicenum on $disk"
return 1
fi
blockdev --rereadpt $DEV_DSKDIR/$disk 2>/dev/null
block_device_wait
else
if [[ -z $slicenum || -z $size || -z $disk ]]; then
log_fail "The slice, size or disk name is unspecified."
fi
typeset format_file=/var/tmp/format_in.$$
echo "partition" >$format_file
echo "$slicenum" >> $format_file
echo "" >> $format_file
echo "" >> $format_file
echo "$start" >> $format_file
echo "$size" >> $format_file
echo "label" >> $format_file
echo "" >> $format_file
echo "q" >> $format_file
echo "q" >> $format_file
format -e -s -d $disk -f $format_file
fi
typeset ret_val=$?
rm -f $format_file
if [[ $ret_val -ne 0 ]]; then
log_note "Unable to format $disk slice $slicenum to $size"
return 1
fi
return 0
}
#
# Delete all partitions on all disks - this is specifically for the use of multipath
# devices which currently can only be used in the test suite as raw/un-partitioned
# devices (ie a zpool cannot be created on a whole mpath device that has partitions)
#
function delete_partitions
{
typeset -i j=1
if [[ -z $DISK_ARRAY_NUM ]]; then
DISK_ARRAY_NUM=$(echo ${DISKS} | nawk '{print NF}')
fi
if [[ -z $DISKSARRAY ]]; then
DISKSARRAY=$DISKS
fi
if is_linux; then
if (( $DISK_ARRAY_NUM == 1 )); then
while ((j < MAX_PARTITIONS)); do
parted $DEV_DSKDIR/$DISK -s rm $j \
> /dev/null 2>&1
if (( $? == 1 )); then
lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 1 )); then
log_note "Partitions for $DISK should be deleted"
else
log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
fi
return 0
else
lsblk | egrep ${DISK}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 0 )); then
log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted"
fi
fi
((j = j+1))
done
else
for disk in `echo $DISKSARRAY`; do
while ((j < MAX_PARTITIONS)); do
parted $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1
if (( $? == 1 )); then
lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 1 )); then
log_note "Partitions for $disk should be deleted"
else
log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
fi
j=7
else
lsblk | egrep ${disk}${SLICE_PREFIX}${j} > /dev/null
if (( $? == 0 )); then
log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted"
fi
fi
((j = j+1))
done
j=1
done
fi
fi
return 0
}
#
# Get the end cyl of the given slice
#
function get_endslice #<disk> <slice>
{
typeset disk=$1
typeset slice=$2
if [[ -z $disk || -z $slice ]] ; then
log_fail "The disk name or slice number is unspecified."
fi
if is_linux; then
endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \
grep "part${slice}" | \
awk '{print $3}' | \
sed 's,cyl,,')
((endcyl = (endcyl + 1)))
else
disk=${disk#/dev/dsk/}
disk=${disk#/dev/rdsk/}
disk=${disk%s*}
typeset -i ratio=0
ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \
grep "sectors\/cylinder" | \
awk '{print $2}')
if ((ratio == 0)); then
return
fi
typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 |
nawk -v token="$slice" '{if ($1==token) print $6}')
((endcyl = (endcyl + 1) / ratio))
fi
echo $endcyl
}
#
# Given a size,disk and total slice number, this function formats the
# disk slices from 0 to the total slice number with the same specified
# size.
#
function partition_disk #<slice_size> <whole_disk_name> <total_slices>
{
typeset -i i=0
typeset slice_size=$1
typeset disk_name=$2
typeset total_slices=$3
typeset cyl
zero_partitions $disk_name
while ((i < $total_slices)); do
if ! is_linux; then
if ((i == 2)); then
((i = i + 1))
continue
fi
fi
log_must set_partition $i "$cyl" $slice_size $disk_name
cyl=$(get_endslice $disk_name $i)
((i = i+1))
done
}
#
# This function continues to write to a filenum number of files into dirnum
# number of directories until either file_write returns an error or the
# maximum number of files per directory have been written.
#
# Usage:
# fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data]
#
# Return value: 0 on success
# non 0 on error
#
# Where :
# destdir: is the directory where everything is to be created under
# dirnum: the maximum number of subdirectories to use, -1 no limit
# filenum: the maximum number of files per subdirectory
# bytes: number of bytes to write
# num_writes: numer of types to write out bytes
# data: the data that will be written
#
# E.g.
# file_fs /testdir 20 25 1024 256 0
#
# Note: bytes * num_writes equals the size of the testfile
#
function fill_fs # destdir dirnum filenum bytes num_writes data
{
typeset destdir=${1:-$TESTDIR}
typeset -i dirnum=${2:-50}
typeset -i filenum=${3:-50}
typeset -i bytes=${4:-8192}
typeset -i num_writes=${5:-10240}
typeset -i data=${6:-0}
typeset -i odirnum=1
typeset -i idirnum=0
typeset -i fn=0
typeset -i retval=0
log_must mkdir -p $destdir/$idirnum
while (($odirnum > 0)); do
if ((dirnum >= 0 && idirnum >= dirnum)); then
odirnum=0
break
fi
file_write -o create -f $destdir/$idirnum/$TESTFILE.$fn \
-b $bytes -c $num_writes -d $data
retval=$?
if (($retval != 0)); then
odirnum=0
break
fi
if (($fn >= $filenum)); then
fn=0
((idirnum = idirnum + 1))
log_must mkdir -p $destdir/$idirnum
else
((fn = fn + 1))
fi
done
return $retval
}
#
# Simple function to get the specified property. If unable to
# get the property then exits.
#
# Note property is in 'parsable' format (-p)
#
function get_prop # property dataset
{
typeset prop_val
typeset prop=$1
typeset dataset=$2
prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null)
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for dataset " \
"$dataset"
return 1
fi
echo "$prop_val"
return 0
}
#
# Simple function to get the specified property of pool. If unable to
# get the property then exits.
#
# Note property is in 'parsable' format (-p)
#
function get_pool_prop # property pool
{
typeset prop_val
typeset prop=$1
typeset pool=$2
if poolexists $pool ; then
prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \
awk '{print $3}')
if [[ $? -ne 0 ]]; then
log_note "Unable to get $prop property for pool " \
"$pool"
return 1
fi
else
log_note "Pool $pool not exists."
return 1
fi
echo "$prop_val"
return 0
}
# Return 0 if a pool exists; $? otherwise
#
# $1 - pool name
function poolexists
{
typeset pool=$1
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
zpool get name "$pool" > /dev/null 2>&1
return $?
}
# Return 0 if all the specified datasets exist; $? otherwise
#
# $1-n dataset name
function datasetexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs get name $1 > /dev/null 2>&1 || \
return $?
shift
done
return 0
}
# return 0 if none of the specified datasets exists, otherwise return 1.
#
# $1-n dataset name
function datasetnonexists
{
if (($# == 0)); then
log_note "No dataset name given."
return 1
fi
while (($# > 0)); do
zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \
&& return 1
shift
done
return 0
}
#
# Given a mountpoint, or a dataset name, determine if it is shared via NFS.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared
{
typeset fs=$1
typeset mtpt
if [[ $fs != "/"* ]] ; then
if datasetnonexists "$fs" ; then
return 1
else
mtpt=$(get_prop mountpoint "$fs")
case $mtpt in
none|legacy|-) return 1
;;
*) fs=$mtpt
;;
esac
fi
fi
if is_linux; then
for mtpt in `share | awk '{print $1}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
fi
for mtpt in `share | awk '{print $2}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
typeset stat=$(svcs -H -o STA nfs/server:default)
if [[ $stat != "ON" ]]; then
log_note "Current nfs/server status: $stat"
fi
return 1
}
#
# Given a dataset name determine if it is shared via SMB.
#
# Returns 0 if shared, 1 otherwise.
#
function is_shared_smb
{
typeset fs=$1
typeset mtpt
if datasetnonexists "$fs" ; then
return 1
else
fs=$(echo $fs | sed 's@/@_@g')
fi
if is_linux; then
for mtpt in `net usershare list | awk '{print $1}'` ; do
if [[ $mtpt == $fs ]] ; then
return 0
fi
done
return 1
else
log_unsupported "Currently unsupported by the test framework"
return 1
fi
}
#
# Given a mountpoint, determine if it is not shared via NFS.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared
{
typeset fs=$1
is_shared $fs
if (($? == 0)); then
return 1
fi
return 0
}
#
# Given a dataset determine if it is not shared via SMB.
#
# Returns 0 if not shared, 1 otherwise.
#
function not_shared_smb
{
typeset fs=$1
is_shared_smb $fs
if (($? == 0)); then
return 1
fi
return 0
}
#
# Helper function to unshare a mountpoint.
#
function unshare_fs #fs
{
typeset fs=$1
is_shared $fs || is_shared_smb $fs
if (($? == 0)); then
log_must zfs unshare $fs
fi
return 0
}
#
# Helper function to share a NFS mountpoint.
#
function share_nfs #fs
{
typeset fs=$1
if is_linux; then
is_shared $fs
if (($? != 0)); then
log_must share "*:$fs"
fi
else
is_shared $fs
if (($? != 0)); then
log_must share -F nfs $fs
fi
fi
return 0
}
#
# Helper function to unshare a NFS mountpoint.
#
function unshare_nfs #fs
{
typeset fs=$1
if is_linux; then
is_shared $fs
if (($? == 0)); then
log_must unshare -u "*:$fs"
fi
else
is_shared $fs
if (($? == 0)); then
log_must unshare -F nfs $fs
fi
fi
return 0
}
#
# Helper function to show NFS shares.
#
function showshares_nfs
{
if is_linux; then
share -v
else
share -F nfs
fi
return 0
}
#
# Helper function to show SMB shares.
#
function showshares_smb
{
if is_linux; then
net usershare list
else
share -F smb
fi
return 0
}
#
# Check NFS server status and trigger it online.
#
function setup_nfs_server
{
# Cannot share directory in non-global zone.
#
if ! is_global_zone; then
log_note "Cannot trigger NFS server by sharing in LZ."
return
fi
if is_linux; then
#
# Re-synchronize /var/lib/nfs/etab with /etc/exports and
# /etc/exports.d./* to provide a clean test environment.
#
log_must share -r
log_note "NFS server must be started prior to running ZTS."
return
fi
typeset nfs_fmri="svc:/network/nfs/server:default"
if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then
#
# Only really sharing operation can enable NFS server
# to online permanently.
#
typeset dummy=/tmp/dummy
if [[ -d $dummy ]]; then
log_must rm -rf $dummy
fi
log_must mkdir $dummy
log_must share $dummy
#
# Waiting for fmri's status to be the final status.
# Otherwise, in transition, an asterisk (*) is appended for
# instances, unshare will reverse status to 'DIS' again.
#
# Waiting for 1's at least.
#
log_must sleep 1
timeout=10
while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]]
do
log_must sleep 1
((timeout -= 1))
done
log_must unshare $dummy
log_must rm -rf $dummy
fi
log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'"
}
#
# To verify whether calling process is in global zone
#
# Return 0 if in global zone, 1 in non-global zone
#
function is_global_zone
{
if is_linux; then
return 0
else
typeset cur_zone=$(zonename 2>/dev/null)
if [[ $cur_zone != "global" ]]; then
return 1
fi
return 0
fi
}
#
# Verify whether test is permitted to run from
# global zone, local zone, or both
#
# $1 zone limit, could be "global", "local", or "both"(no limit)
#
# Return 0 if permitted, otherwise exit with log_unsupported
#
function verify_runnable # zone limit
{
typeset limit=$1
[[ -z $limit ]] && return 0
if is_global_zone ; then
case $limit in
global|both)
;;
local) log_unsupported "Test is unable to run from "\
"global zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
else
case $limit in
local|both)
;;
global) log_unsupported "Test is unable to run from "\
"local zone."
;;
*) log_note "Warning: unknown limit $limit - " \
"use both."
;;
esac
reexport_pool
fi
return 0
}
# Return 0 if create successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# $2-n - [keyword] devs_list
function create_pool #pool devs_list
{
typeset pool=${1%%/*}
shift
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 1
fi
if poolexists $pool ; then
destroy_pool $pool
fi
if is_global_zone ; then
[[ -d /$pool ]] && rm -rf /$pool
log_must zpool create -f $pool $@
fi
return 0
}
# Return 0 if destroy successfully or the pool exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - pool name
# Destroy pool with the given parameters.
function destroy_pool #pool
{
typeset pool=${1%%/*}
typeset mtpt
if [[ -z $pool ]]; then
log_note "No pool name given."
return 1
fi
if is_global_zone ; then
if poolexists "$pool" ; then
mtpt=$(get_prop mountpoint "$pool")
# At times, syseventd/udev activity can cause attempts
# to destroy a pool to fail with EBUSY. We retry a few
# times allowing failures before requiring the destroy
# to succeed.
log_must_busy zpool destroy -f $pool
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Pool does not exist. ($pool)"
return 1
fi
fi
return 0
}
# Return 0 if destroy successfully or the dataset exists; $? otherwise
# Note: In local zones, this function should return 0 silently.
#
# $1 - dataset name
# $2 - custom arguments for zfs destroy
# Destroy dataset with the given parameters.
function destroy_dataset #dataset #args
{
typeset dataset=$1
typeset mtpt
typeset args=${2:-""}
if [[ -z $dataset ]]; then
log_note "No dataset name given."
return 1
fi
if is_global_zone ; then
if datasetexists "$dataset" ; then
mtpt=$(get_prop mountpoint "$dataset")
log_must_busy zfs destroy $args $dataset
[[ -d $mtpt ]] && \
log_must rm -rf $mtpt
else
log_note "Dataset does not exist. ($dataset)"
return 1
fi
fi
return 0
}
#
# Firstly, create a pool with 5 datasets. Then, create a single zone and
# export the 5 datasets to it. In addition, we also add a ZFS filesystem
# and a zvol device to the zone.
#
# $1 zone name
# $2 zone root directory prefix
# $3 zone ip
#
function zfs_zones_setup #zone_name zone_root zone_ip
{
typeset zone_name=${1:-$(hostname)-z}
typeset zone_root=${2:-"/zone_root"}
typeset zone_ip=${3:-"10.1.1.10"}
typeset prefix_ctr=$ZONE_CTR
typeset pool_name=$ZONE_POOL
typeset -i cntctr=5
typeset -i i=0
# Create pool and 5 container within it
#
[[ -d /$pool_name ]] && rm -rf /$pool_name
log_must zpool create -f $pool_name $DISKS
while ((i < cntctr)); do
log_must zfs create $pool_name/$prefix_ctr$i
((i += 1))
done
# create a zvol
log_must zfs create -V 1g $pool_name/zone_zvol
block_device_wait
#
# If current system support slog, add slog device for pool
#
if verify_slog_support ; then
typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2"
log_must mkfile $MINVDEVSIZE $sdevs
log_must zpool add $pool_name log mirror $sdevs
fi
# this isn't supported just yet.
# Create a filesystem. In order to add this to
# the zone, it must have it's mountpoint set to 'legacy'
# log_must zfs create $pool_name/zfs_filesystem
# log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem
[[ -d $zone_root ]] && \
log_must rm -rf $zone_root/$zone_name
[[ ! -d $zone_root ]] && \
log_must mkdir -p -m 0700 $zone_root/$zone_name
# Create zone configure file and configure the zone
#
typeset zone_conf=/tmp/zone_conf.$$
echo "create" > $zone_conf
echo "set zonepath=$zone_root/$zone_name" >> $zone_conf
echo "set autoboot=true" >> $zone_conf
i=0
while ((i < cntctr)); do
echo "add dataset" >> $zone_conf
echo "set name=$pool_name/$prefix_ctr$i" >> \
$zone_conf
echo "end" >> $zone_conf
((i += 1))
done
# add our zvol to the zone
echo "add device" >> $zone_conf
echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf
echo "end" >> $zone_conf
# add a corresponding zvol rdsk to the zone
echo "add device" >> $zone_conf
echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf
echo "end" >> $zone_conf
# once it's supported, we'll add our filesystem to the zone
# echo "add fs" >> $zone_conf
# echo "set type=zfs" >> $zone_conf
# echo "set special=$pool_name/zfs_filesystem" >> $zone_conf
# echo "set dir=/export/zfs_filesystem" >> $zone_conf
# echo "end" >> $zone_conf
echo "verify" >> $zone_conf
echo "commit" >> $zone_conf
log_must zonecfg -z $zone_name -f $zone_conf
log_must rm -f $zone_conf
# Install the zone
zoneadm -z $zone_name install
if (($? == 0)); then
log_note "SUCCESS: zoneadm -z $zone_name install"
else
log_fail "FAIL: zoneadm -z $zone_name install"
fi
# Install sysidcfg file
#
typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg
echo "system_locale=C" > $sysidcfg
echo "terminal=dtterm" >> $sysidcfg
echo "network_interface=primary {" >> $sysidcfg
echo "hostname=$zone_name" >> $sysidcfg
echo "}" >> $sysidcfg
echo "name_service=NONE" >> $sysidcfg
echo "root_password=mo791xfZ/SFiw" >> $sysidcfg
echo "security_policy=NONE" >> $sysidcfg
echo "timezone=US/Eastern" >> $sysidcfg
# Boot this zone
log_must zoneadm -z $zone_name boot
}
#
# Reexport TESTPOOL & TESTPOOL(1-4)
#
function reexport_pool
{
typeset -i cntctr=5
typeset -i i=0
while ((i < cntctr)); do
if ((i == 0)); then
TESTPOOL=$ZONE_POOL/$ZONE_CTR$i
if ! ismounted $TESTPOOL; then
log_must zfs mount $TESTPOOL
fi
else
eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i
if eval ! ismounted \$TESTPOOL$i; then
log_must eval zfs mount \$TESTPOOL$i
fi
fi
((i += 1))
done
}
#
# Verify a given disk or pool state
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_state # pool disk state{online,offline,degraded}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
[[ -z $pool ]] || [[ -z $state ]] \
&& log_fail "Arguments invalid or missing"
if [[ -z $disk ]]; then
#check pool state only
zpool get -H -o value health $pool \
| grep -i "$state" > /dev/null 2>&1
else
zpool status -v $pool | grep "$disk" \
| grep -i "$state" > /dev/null 2>&1
fi
return $?
}
#
# Get the mountpoint of snapshot
# For the snapshot use <mp_filesystem>/.zfs/snapshot/<snap>
# as its mountpoint
#
function snapshot_mountpoint
{
typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP}
if [[ $dataset != *@* ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
typeset fs=${dataset%@*}
typeset snap=${dataset#*@}
if [[ -z $fs || -z $snap ]]; then
log_fail "Error name of snapshot '$dataset'."
fi
echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap
}
#
# Given a device and 'ashift' value verify it's correctly set on every label
#
function verify_ashift # device ashift
{
typeset device="$1"
typeset ashift="$2"
zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / {
if (ashift != $2)
exit 1;
else
count++;
} END {
if (count != 4)
exit 1;
else
exit 0;
}'
return $?
}
#
# Given a pool and file system, this function will verify the file system
# using the zdb internal tool. Note that the pool is exported and imported
# to ensure it has consistent state.
#
function verify_filesys # pool filesystem dir
{
typeset pool="$1"
typeset filesys="$2"
typeset zdbout="/tmp/zdbout.$$"
shift
shift
typeset dirs=$@
typeset search_path=""
log_note "Calling zdb to verify filesystem '$filesys'"
zfs unmount -a > /dev/null 2>&1
log_must zpool export $pool
if [[ -n $dirs ]] ; then
for dir in $dirs ; do
search_path="$search_path -d $dir"
done
fi
log_must zpool import $search_path $pool
zdb -cudi $filesys > $zdbout 2>&1
if [[ $? != 0 ]]; then
log_note "Output: zdb -cudi $filesys"
cat $zdbout
log_fail "zdb detected errors with: '$filesys'"
fi
log_must zfs mount -a
log_must rm -rf $zdbout
}
#
# Given a pool, and this function list all disks in the pool
#
function get_disklist # pool
{
typeset disklist=""
disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \
grep -v "\-\-\-\-\-" | \
egrep -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$")
echo $disklist
}
#
# Given a pool, and this function list all disks in the pool with their full
# path (like "/dev/sda" instead of "sda").
#
function get_disklist_fullpath # pool
{
args="-P $1"
get_disklist $args
}
# /**
# This function kills a given list of processes after a time period. We use
# this in the stress tests instead of STF_TIMEOUT so that we can have processes
# run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT
# would be listed as FAIL, which we don't want : we're happy with stress tests
# running for a certain amount of time, then finishing.
#
# @param $1 the time in seconds after which we should terminate these processes
# @param $2..$n the processes we wish to terminate.
# */
function stress_timeout
{
typeset -i TIMEOUT=$1
shift
typeset cpids="$@"
log_note "Waiting for child processes($cpids). " \
"It could last dozens of minutes, please be patient ..."
log_must sleep $TIMEOUT
log_note "Killing child processes after ${TIMEOUT} stress timeout."
typeset pid
for pid in $cpids; do
ps -p $pid > /dev/null 2>&1
if (($? == 0)); then
log_must kill -USR1 $pid
fi
done
}
#
# Verify a given hotspare disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_hotspare_state # pool disk state{inuse,avail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "spares")
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Wait until a hotspare transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_hotspare_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#$/DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_hotspare_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Verify a given slog disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_slog_state # pool disk state{online,offline,unavail}
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk "logs")
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Verify a given vdev disk is inuse or avail
#
# Return 0 is pool/disk matches expected state, 1 otherwise
#
function check_vdev_state # pool disk state{online,offline,unavail}
{
typeset pool=$1
typeset disk=${2#$/DEV_DSKDIR/}
typeset state=$3
cur_state=$(get_device_state $pool $disk)
if [[ $state != ${cur_state} ]]; then
return 1
fi
return 0
}
#
# Wait until a vdev transitions to a given state or times out.
#
# Return 0 when pool/disk matches expected state, 1 on timeout.
#
function wait_vdev_state # pool disk state timeout
{
typeset pool=$1
typeset disk=${2#$/DEV_DSKDIR/}
typeset state=$3
typeset timeout=${4:-60}
typeset -i i=0
while [[ $i -lt $timeout ]]; do
if check_vdev_state $pool $disk $state; then
return 0
fi
i=$((i+1))
sleep 1
done
return 1
}
#
# Check the output of 'zpool status -v <pool>',
# and to see if the content of <token> contain the <keyword> specified.
#
# Return 0 is contain, 1 otherwise
#
function check_pool_status # pool token keyword <verbose>
{
typeset pool=$1
typeset token=$2
typeset keyword=$3
typeset verbose=${4:-false}
scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" '
($1==token) {print $0}')
if [[ $verbose == true ]]; then
log_note $scan
fi
echo $scan | grep -i "$keyword" > /dev/null 2>&1
return $?
}
#
# These 6 following functions are instance of check_pool_status()
# is_pool_resilvering - to check if the pool is resilver in progress
# is_pool_resilvered - to check if the pool is resilver completed
# is_pool_scrubbing - to check if the pool is scrub in progress
# is_pool_scrubbed - to check if the pool is scrub completed
# is_pool_scrub_stopped - to check if the pool is scrub stopped
# is_pool_scrub_paused - to check if the pool has scrub paused
+# is_pool_removing - to check if the pool is removing a vdev
+# is_pool_removed - to check if the pool is remove completed
#
function is_pool_resilvering #pool <verbose>
{
check_pool_status "$1" "scan" "resilver in progress since " $2
return $?
}
function is_pool_resilvered #pool <verbose>
{
check_pool_status "$1" "scan" "resilvered " $2
return $?
}
function is_pool_scrubbing #pool <verbose>
{
check_pool_status "$1" "scan" "scrub in progress since " $2
return $?
}
function is_pool_scrubbed #pool <verbose>
{
check_pool_status "$1" "scan" "scrub repaired" $2
return $?
}
function is_pool_scrub_stopped #pool <verbose>
{
check_pool_status "$1" "scan" "scrub canceled" $2
return $?
}
function is_pool_scrub_paused #pool <verbose>
{
check_pool_status "$1" "scan" "scrub paused since " $2
return $?
}
+function is_pool_removing #pool
+{
+ check_pool_status "$1" "remove" "in progress since "
+ return $?
+}
+
+function is_pool_removed #pool
+{
+ check_pool_status "$1" "remove" "completed on"
+ return $?
+}
+
#
# Use create_pool()/destroy_pool() to clean up the information in
# in the given disk to avoid slice overlapping.
#
function cleanup_devices #vdevs
{
typeset pool="foopool$$"
if poolexists $pool ; then
destroy_pool $pool
fi
create_pool $pool $@
destroy_pool $pool
return 0
}
#/**
# A function to find and locate free disks on a system or from given
# disks as the parameter. It works by locating disks that are in use
# as swap devices and dump devices, and also disks listed in /etc/vfstab
#
# $@ given disks to find which are free, default is all disks in
# the test system
#
# @return a string containing the list of available disks
#*/
function find_disks
{
# Trust provided list, no attempt is made to locate unused devices.
if is_linux; then
echo "$@"
return
fi
sfi=/tmp/swaplist.$$
dmpi=/tmp/dumpdev.$$
max_finddisksnum=${MAX_FINDDISKSNUM:-6}
swap -l > $sfi
dumpadm > $dmpi 2>/dev/null
# write an awk script that can process the output of format
# to produce a list of disks we know about. Note that we have
# to escape "$2" so that the shell doesn't interpret it while
# we're creating the awk script.
# -------------------
cat > /tmp/find_disks.awk <<EOF
#!/bin/nawk -f
BEGIN { FS="."; }
/^Specify disk/{
searchdisks=0;
}
{
if (searchdisks && \$2 !~ "^$"){
split(\$2,arr," ");
print arr[1];
}
}
/^AVAILABLE DISK SELECTIONS:/{
searchdisks=1;
}
EOF
#---------------------
chmod 755 /tmp/find_disks.awk
disks=${@:-$(echo "" | format -e 2>/dev/null | /tmp/find_disks.awk)}
rm /tmp/find_disks.awk
unused=""
for disk in $disks; do
# Check for mounted
grep "${disk}[sp]" /etc/mnttab >/dev/null
(($? == 0)) && continue
# Check for swap
grep "${disk}[sp]" $sfi >/dev/null
(($? == 0)) && continue
# check for dump device
grep "${disk}[sp]" $dmpi >/dev/null
(($? == 0)) && continue
# check to see if this disk hasn't been explicitly excluded
# by a user-set environment variable
echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null
(($? == 0)) && continue
unused_candidates="$unused_candidates $disk"
done
rm $sfi
rm $dmpi
# now just check to see if those disks do actually exist
# by looking for a device pointing to the first slice in
# each case. limit the number to max_finddisksnum
count=0
for disk in $unused_candidates; do
if [ -b $DEV_DSKDIR/${disk}s0 ]; then
if [ $count -lt $max_finddisksnum ]; then
unused="$unused $disk"
# do not impose limit if $@ is provided
[[ -z $@ ]] && ((count = count + 1))
fi
fi
done
# finally, return our disk list
echo $unused
}
#
# Add specified user to specified group
#
# $1 group name
# $2 user name
# $3 base of the homedir (optional)
#
function add_user #<group_name> <user_name> <basedir>
{
typeset gname=$1
typeset uname=$2
typeset basedir=${3:-"/var/tmp"}
if ((${#gname} == 0 || ${#uname} == 0)); then
log_fail "group name or user name are not defined."
fi
log_must useradd -g $gname -d $basedir/$uname -m $uname
echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.profile
echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.bash_profile
echo "export PATH=\"$STF_PATH\"" >>$basedir/$uname/.login
# Add new users to the same group and the command line utils.
# This allows them to be run out of the original users home
# directory as long as it permissioned to be group readable.
if is_linux; then
cmd_group=$(stat --format="%G" $(which zfs))
log_must usermod -a -G $cmd_group $uname
fi
return 0
}
#
# Delete the specified user.
#
# $1 login name
# $2 base of the homedir (optional)
#
function del_user #<logname> <basedir>
{
typeset user=$1
typeset basedir=${2:-"/var/tmp"}
if ((${#user} == 0)); then
log_fail "login name is necessary."
fi
if id $user > /dev/null 2>&1; then
log_must_retry "currently used" 5 userdel $user
fi
[[ -d $basedir/$user ]] && rm -fr $basedir/$user
return 0
}
#
# Select valid gid and create specified group.
#
# $1 group name
#
function add_group #<group_name>
{
typeset group=$1
if ((${#group} == 0)); then
log_fail "group name is necessary."
fi
# Assign 100 as the base gid, a larger value is selected for
# Linux because for many distributions 1000 and under are reserved.
if is_linux; then
while true; do
groupadd $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
*) return 1 ;;
esac
done
else
typeset -i gid=100
while true; do
groupadd -g $gid $group > /dev/null 2>&1
typeset -i ret=$?
case $ret in
0) return 0 ;;
# The gid is not unique
4) ((gid += 1)) ;;
*) return 1 ;;
esac
done
fi
}
#
# Delete the specified group.
#
# $1 group name
#
function del_group #<group_name>
{
typeset grp=$1
if ((${#grp} == 0)); then
log_fail "group name is necessary."
fi
if is_linux; then
getent group $grp > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist.
2) return 0 ;;
# Name already exists as a group name
0) log_must groupdel $grp ;;
*) return 1 ;;
esac
else
groupmod -n $grp $grp > /dev/null 2>&1
typeset -i ret=$?
case $ret in
# Group does not exist.
6) return 0 ;;
# Name already exists as a group name
9) log_must groupdel $grp ;;
*) return 1 ;;
esac
fi
return 0
}
#
# This function will return true if it's safe to destroy the pool passed
# as argument 1. It checks for pools based on zvols and files, and also
# files contained in a pool that may have a different mountpoint.
#
function safe_to_destroy_pool { # $1 the pool name
typeset pool=""
typeset DONT_DESTROY=""
# We check that by deleting the $1 pool, we're not
# going to pull the rug out from other pools. Do this
# by looking at all other pools, ensuring that they
# aren't built from files or zvols contained in this pool.
for pool in $(zpool list -H -o name)
do
ALTMOUNTPOOL=""
# this is a list of the top-level directories in each of the
# files that make up the path to the files the pool is based on
FILEPOOL=$(zpool status -v $pool | grep /$1/ | \
awk '{print $1}')
# this is a list of the zvols that make up the pool
ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \
| awk '{print $1}')
# also want to determine if it's a file-based pool using an
# alternate mountpoint...
POOL_FILE_DIRS=$(zpool status -v $pool | \
grep / | awk '{print $1}' | \
awk -F/ '{print $2}' | grep -v "dev")
for pooldir in $POOL_FILE_DIRS
do
OUTPUT=$(zfs list -H -r -o mountpoint $1 | \
grep "${pooldir}$" | awk '{print $1}')
ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}"
done
if [ ! -z "$ZVOLPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ZVOLPOOL on $1"
fi
if [ ! -z "$FILEPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $FILEPOOL on $1"
fi
if [ ! -z "$ALTMOUNTPOOL" ]
then
DONT_DESTROY="true"
log_note "Pool $pool is built from $ALTMOUNTPOOL on $1"
fi
done
if [ -z "${DONT_DESTROY}" ]
then
return 0
else
log_note "Warning: it is not safe to destroy $1!"
return 1
fi
}
#
# Get the available ZFS compression options
# $1 option type zfs_set|zfs_compress
#
function get_compress_opts
{
typeset COMPRESS_OPTS
typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \
gzip-6 gzip-7 gzip-8 gzip-9"
if [[ $1 == "zfs_compress" ]] ; then
COMPRESS_OPTS="on lzjb"
elif [[ $1 == "zfs_set" ]] ; then
COMPRESS_OPTS="on off lzjb"
fi
typeset valid_opts="$COMPRESS_OPTS"
zfs get 2>&1 | grep gzip >/dev/null 2>&1
if [[ $? -eq 0 ]]; then
valid_opts="$valid_opts $GZIP_OPTS"
fi
echo "$valid_opts"
}
#
# Verify zfs operation with -p option work as expected
# $1 operation, value could be create, clone or rename
# $2 dataset type, value could be fs or vol
# $3 dataset name
# $4 new dataset name
#
function verify_opt_p_ops
{
typeset ops=$1
typeset datatype=$2
typeset dataset=$3
typeset newdataset=$4
if [[ $datatype != "fs" && $datatype != "vol" ]]; then
log_fail "$datatype is not supported."
fi
# check parameters accordingly
case $ops in
create)
newdataset=$dataset
dataset=""
if [[ $datatype == "vol" ]]; then
ops="create -V $VOLSIZE"
fi
;;
clone)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
log_must snapexists $dataset
;;
rename)
if [[ -z $newdataset ]]; then
log_fail "newdataset should not be empty" \
"when ops is $ops."
fi
log_must datasetexists $dataset
log_mustnot snapexists $dataset
;;
*)
log_fail "$ops is not supported."
;;
esac
# make sure the upper level filesystem does not exist
destroy_dataset "${newdataset%/*}" "-rRf"
# without -p option, operation will fail
log_mustnot zfs $ops $dataset $newdataset
log_mustnot datasetexists $newdataset ${newdataset%/*}
# with -p option, operation should succeed
log_must zfs $ops -p $dataset $newdataset
block_device_wait
if ! datasetexists $newdataset ; then
log_fail "-p option does not work for $ops"
fi
# when $ops is create or clone, redo the operation still return zero
if [[ $ops != "rename" ]]; then
log_must zfs $ops -p $dataset $newdataset
fi
return 0
}
#
# Get configuration of pool
# $1 pool name
# $2 config name
#
function get_config
{
typeset pool=$1
typeset config=$2
typeset alt_root
if ! poolexists "$pool" ; then
return 1
fi
alt_root=$(zpool list -H $pool | awk '{print $NF}')
if [[ $alt_root == "-" ]]; then
value=$(zdb -C $pool | grep "$config:" | awk -F: \
'{print $2}')
else
value=$(zdb -e $pool | grep "$config:" | awk -F: \
'{print $2}')
fi
if [[ -n $value ]] ; then
value=${value#'}
value=${value%'}
fi
echo $value
return 0
}
#
# Privated function. Random select one of items from arguments.
#
# $1 count
# $2-n string
#
function _random_get
{
typeset cnt=$1
shift
typeset str="$@"
typeset -i ind
((ind = RANDOM % cnt + 1))
typeset ret=$(echo "$str" | cut -f $ind -d ' ')
echo $ret
}
#
# Random select one of item from arguments which include NONE string
#
function random_get_with_non
{
typeset -i cnt=$#
((cnt =+ 1))
_random_get "$cnt" "$@"
}
#
# Random select one of item from arguments which doesn't include NONE string
#
function random_get
{
_random_get "$#" "$@"
}
#
# Detect if the current system support slog
#
function verify_slog_support
{
typeset dir=$TEST_BASE_DIR/disk.$$
typeset pool=foo.$$
typeset vdev=$dir/a
typeset sdev=$dir/b
mkdir -p $dir
mkfile $MINVDEVSIZE $vdev $sdev
typeset -i ret=0
if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then
ret=1
fi
rm -r $dir
return $ret
}
#
# The function will generate a dataset name with specific length
# $1, the length of the name
# $2, the base string to construct the name
#
function gen_dataset_name
{
typeset -i len=$1
typeset basestr="$2"
typeset -i baselen=${#basestr}
typeset -i iter=0
typeset l_name=""
if ((len % baselen == 0)); then
((iter = len / baselen))
else
((iter = len / baselen + 1))
fi
while ((iter > 0)); do
l_name="${l_name}$basestr"
((iter -= 1))
done
echo $l_name
}
#
# Get cksum tuple of dataset
# $1 dataset name
#
# sample zdb output:
# Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp
# DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4
# lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P
# fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744
function datasetcksum
{
typeset cksum
sync
cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \
| awk -F= '{print $7}')
echo $cksum
}
#
# Get cksum of file
# #1 file path
#
function checksum
{
typeset cksum
cksum=$(cksum $1 | awk '{print $1}')
echo $cksum
}
#
# Get the given disk/slice state from the specific field of the pool
#
function get_device_state #pool disk field("", "spares","logs")
{
typeset pool=$1
typeset disk=${2#$DEV_DSKDIR/}
typeset field=${3:-$pool}
state=$(zpool status -v "$pool" 2>/dev/null | \
nawk -v device=$disk -v pool=$pool -v field=$field \
'BEGIN {startconfig=0; startfield=0; }
/config:/ {startconfig=1}
(startconfig==1) && ($1==field) {startfield=1; next;}
(startfield==1) && ($1==device) {print $2; exit;}
(startfield==1) &&
($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}')
echo $state
}
#
# print the given directory filesystem type
#
# $1 directory name
#
function get_fstype
{
typeset dir=$1
if [[ -z $dir ]]; then
log_fail "Usage: get_fstype <directory>"
fi
#
# $ df -n /
# / : ufs
#
df -n $dir | awk '{print $3}'
}
#
# Given a disk, label it to VTOC regardless what label was on the disk
# $1 disk
#
function labelvtoc
{
typeset disk=$1
if [[ -z $disk ]]; then
log_fail "The disk name is unspecified."
fi
typeset label_file=/var/tmp/labelvtoc.$$
typeset arch=$(uname -p)
if is_linux; then
log_note "Currently unsupported by the test framework"
return 1
fi
if [[ $arch == "i386" ]]; then
echo "label" > $label_file
echo "0" >> $label_file
echo "" >> $label_file
echo "q" >> $label_file
echo "q" >> $label_file
fdisk -B $disk >/dev/null 2>&1
# wait a while for fdisk finishes
sleep 60
elif [[ $arch == "sparc" ]]; then
echo "label" > $label_file
echo "0" >> $label_file
echo "" >> $label_file
echo "" >> $label_file
echo "" >> $label_file
echo "q" >> $label_file
else
log_fail "unknown arch type"
fi
format -e -s -d $disk -f $label_file
typeset -i ret_val=$?
rm -f $label_file
#
# wait the format to finish
#
sleep 60
if ((ret_val != 0)); then
log_fail "unable to label $disk as VTOC."
fi
return 0
}
#
# check if the system was installed as zfsroot or not
# return: 0 ture, otherwise false
#
function is_zfsroot
{
df -n / | grep zfs > /dev/null 2>&1
return $?
}
#
# get the root filesystem name if it's zfsroot system.
#
# return: root filesystem name
function get_rootfs
{
typeset rootfs=""
if ! is_linux; then
rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootfs"
fi
zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
echo $rootfs
else
log_fail "This is not a zfsroot system."
fi
}
#
# get the rootfs's pool name
# return:
# rootpool name
#
function get_rootpool
{
typeset rootfs=""
typeset rootpool=""
if ! is_linux; then
rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \
/etc/mnttab)
fi
if [[ -z "$rootfs" ]]; then
log_fail "Can not get rootpool"
fi
zfs list $rootfs > /dev/null 2>&1
if (($? == 0)); then
rootpool=`echo $rootfs | awk -F\/ '{print $1}'`
echo $rootpool
else
log_fail "This is not a zfsroot system."
fi
}
#
# Get the package name
#
function get_package_name
{
typeset dirpath=${1:-$STC_NAME}
echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g"
}
#
# Get the word numbers from a string separated by white space
#
function get_word_count
{
echo $1 | wc -w
}
#
# To verify if the require numbers of disks is given
#
function verify_disk_count
{
typeset -i min=${2:-1}
typeset -i count=$(get_word_count "$1")
if ((count < min)); then
log_untested "A minimum of $min disks is required to run." \
" You specified $count disk(s)"
fi
}
function ds_is_volume
{
typeset type=$(get_prop type $1)
[[ $type = "volume" ]] && return 0
return 1
}
function ds_is_filesystem
{
typeset type=$(get_prop type $1)
[[ $type = "filesystem" ]] && return 0
return 1
}
function ds_is_snapshot
{
typeset type=$(get_prop type $1)
[[ $type = "snapshot" ]] && return 0
return 1
}
#
# Check if Trusted Extensions are installed and enabled
#
function is_te_enabled
{
svcs -H -o state labeld 2>/dev/null | grep "enabled"
if (($? != 0)); then
return 1
else
return 0
fi
}
# Utility function to determine if a system has multiple cpus.
function is_mp
{
if is_linux; then
(($(nproc) > 1))
else
(($(psrinfo | wc -l) > 1))
fi
return $?
}
function get_cpu_freq
{
if is_linux; then
lscpu | awk '/CPU MHz/ { print $3 }'
else
psrinfo -v 0 | awk '/processor operates at/ {print $6}'
fi
}
# Run the given command as the user provided.
function user_run
{
typeset user=$1
shift
log_note "user:$user $@"
eval su - \$user -c \"$@\" > /tmp/out 2>/tmp/err
return $?
}
#
# Check if the pool contains the specified vdevs
#
# $1 pool
# $2..n <vdev> ...
#
# Return 0 if the vdevs are contained in the pool, 1 if any of the specified
# vdevs is not in the pool, and 2 if pool name is missing.
#
function vdevs_in_pool
{
typeset pool=$1
typeset vdev
if [[ -z $pool ]]; then
log_note "Missing pool name."
return 2
fi
shift
typeset tmpfile=$(mktemp)
zpool list -Hv "$pool" >$tmpfile
for vdev in $@; do
grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1
[[ $? -ne 0 ]] && return 1
done
rm -f $tmpfile
return 0;
}
function get_max
{
typeset -l i max=$1
shift
for i in "$@"; do
max=$(echo $((max > i ? max : i)))
done
echo $max
}
function get_min
{
typeset -l i min=$1
shift
for i in "$@"; do
min=$(echo $((min < i ? min : i)))
done
echo $min
}
#
# Generate a random number between 1 and the argument.
#
function random
{
typeset max=$1
echo $(( ($RANDOM % $max) + 1 ))
}
# Write data that can be compressed into a directory
function write_compressible
{
typeset dir=$1
typeset megs=$2
typeset nfiles=${3:-1}
typeset bs=${4:-1024k}
typeset fname=${5:-file}
[[ -d $dir ]] || log_fail "No directory: $dir"
# Under Linux fio is not currently used since its behavior can
# differ significantly across versions. This includes missing
# command line options and cases where the --buffer_compress_*
# options fail to behave as expected.
if is_linux; then
typeset file_bytes=$(to_bytes $megs)
typeset bs_bytes=4096
typeset blocks=$(($file_bytes / $bs_bytes))
for (( i = 0; i < $nfiles; i++ )); do
truncate -s $file_bytes $dir/$fname.$i
# Write every third block to get 66% compression.
for (( j = 0; j < $blocks; j += 3 )); do
dd if=/dev/urandom of=$dir/$fname.$i \
seek=$j bs=$bs_bytes count=1 \
conv=notrunc >/dev/null 2>&1
done
done
else
log_must eval "fio \
--name=job \
--fallocate=0 \
--minimal \
--randrepeat=0 \
--buffer_compress_percentage=66 \
--buffer_compress_chunk=4096 \
--directory=$dir \
--numjobs=$nfiles \
--nrfiles=$nfiles \
--rw=write \
--bs=$bs \
--filesize=$megs \
--filename_format='$fname.\$jobnum' >/dev/null"
fi
}
function get_objnum
{
typeset pathname=$1
typeset objnum
[[ -e $pathname ]] || log_fail "No such file or directory: $pathname"
objnum=$(stat -c %i $pathname)
echo $objnum
}
#
# Sync data to the pool
#
# $1 pool name
# $2 boolean to force uberblock (and config including zpool cache file) update
#
function sync_pool #pool <force>
{
typeset pool=${1:-$TESTPOOL}
typeset force=${2:-false}
if [[ $force == true ]]; then
log_must zpool sync -f $pool
else
log_must zpool sync $pool
fi
return 0
}
#
# Wait for zpool 'freeing' property drops to zero.
#
# $1 pool name
#
function wait_freeing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break
log_must sleep 1
done
}
#
# Wait for every device replace operation to complete
#
# $1 pool name
#
function wait_replacing #pool
{
typeset pool=${1:-$TESTPOOL}
while true; do
[[ "" == "$(zpool status $pool |
awk '/replacing-[0-9]+/ {print $1}')" ]] && break
log_must sleep 1
done
}
#
# Wait for a pool to be scrubbed
#
# $1 pool name
# $2 number of seconds to wait (optional)
#
# Returns true when pool has been scrubbed, or false if there's a timeout or if
# no scrub was done.
#
function wait_scrubbed
{
typeset pool=${1:-$TESTPOOL}
typeset iter=${2:-10}
for i in {1..$iter} ; do
if is_pool_scrubbed $pool ; then
return 0
fi
sleep 1
done
return 1
}
# Backup the zed.rc in our test directory so that we can edit it for our test.
#
# Returns: Backup file name. You will need to pass this to zed_rc_restore().
function zed_rc_backup
{
zedrc_backup="$(mktemp)"
cp $ZEDLET_DIR/zed.rc $zedrc_backup
echo $zedrc_backup
}
function zed_rc_restore
{
mv $1 $ZEDLET_DIR/zed.rc
}
#
# Setup custom environment for the ZED.
#
# $@ Optional list of zedlets to run under zed.
function zed_setup
{
if ! is_linux; then
return
fi
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
if [[ ! -e $VDEVID_CONF ]]; then
log_must touch $VDEVID_CONF
fi
if [[ -e $VDEVID_CONF_ETC ]]; then
log_fail "Must not have $VDEVID_CONF_ETC file present on system"
fi
EXTRA_ZEDLETS=$@
# Create a symlink for /etc/zfs/vdev_id.conf file.
log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC
# Setup minimal ZED configuration. Individual test cases should
# add additional ZEDLETs as needed for their specific test.
log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR
log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR
# Scripts must only be user writable.
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
saved_umask=$(umask)
log_must umask 0022
for i in $EXTRA_ZEDLETS ; do
log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR
done
log_must umask $saved_umask
fi
# Customize the zed.rc file to enable the full debug log.
log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc
echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc
}
#
# Cleanup custom ZED environment.
#
# $@ Optional list of zedlets to remove from our test zed.d directory.
function zed_cleanup
{
if ! is_linux; then
return
fi
EXTRA_ZEDLETS=$@
log_must rm -f ${ZEDLET_DIR}/zed.rc
log_must rm -f ${ZEDLET_DIR}/zed-functions.sh
log_must rm -f ${ZEDLET_DIR}/all-syslog.sh
log_must rm -f ${ZEDLET_DIR}/all-debug.sh
log_must rm -f ${ZEDLET_DIR}/state
if [[ -n "$EXTRA_ZEDLETS" ]] ; then
for i in $EXTRA_ZEDLETS ; do
log_must rm -f ${ZEDLET_DIR}/$i
done
fi
log_must rm -f $ZED_LOG
log_must rm -f $ZED_DEBUG_LOG
log_must rm -f $VDEVID_CONF_ETC
log_must rm -f $VDEVID_CONF
rmdir $ZEDLET_DIR
}
#
# Check if ZED is currently running, if not start ZED.
#
function zed_start
{
if ! is_linux; then
return
fi
# ZEDLET_DIR=/var/tmp/zed
if [[ ! -d $ZEDLET_DIR ]]; then
log_must mkdir $ZEDLET_DIR
fi
# Verify the ZED is not already running.
pgrep -x zed > /dev/null
if (($? == 0)); then
log_fail "ZED already running"
fi
log_note "Starting ZED"
# run ZED in the background and redirect foreground logging
# output to $ZED_LOG.
log_must truncate -s 0 $ZED_DEBUG_LOG
log_must eval "zed -vF -d $ZEDLET_DIR -p $ZEDLET_DIR/zed.pid -P $PATH" \
"-s $ZEDLET_DIR/state 2>$ZED_LOG &"
return 0
}
#
# Kill ZED process
#
function zed_stop
{
if ! is_linux; then
return
fi
log_note "Stopping ZED"
if [[ -f ${ZEDLET_DIR}/zed.pid ]]; then
zedpid=$(cat ${ZEDLET_DIR}/zed.pid)
kill $zedpid
while ps -p $zedpid > /dev/null; do
sleep 1
done
rm -f ${ZEDLET_DIR}/zed.pid
fi
return 0
}
#
# Drain all zevents
#
function zed_events_drain
{
while [ $(zpool events -H | wc -l) -ne 0 ]; do
sleep 1
zpool events -c >/dev/null
done
}
# Set a variable in zed.rc to something, un-commenting it in the process.
#
# $1 variable
# $2 value
function zed_rc_set
{
var="$1"
val="$2"
# Remove the line
cmd="'/$var/d'"
eval sed -i $cmd $ZEDLET_DIR/zed.rc
# Add it at the end
echo "$var=$val" >> $ZEDLET_DIR/zed.rc
}
#
# Check is provided device is being active used as a swap device.
#
function is_swap_inuse
{
typeset device=$1
if [[ -z $device ]] ; then
log_note "No device specified."
return 1
fi
if is_linux; then
swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1
else
swap -l | grep -w $device > /dev/null 2>&1
fi
return $?
}
#
# Setup a swap device using the provided device.
#
function swap_setup
{
typeset swapdev=$1
if is_linux; then
log_must eval "mkswap $swapdev > /dev/null 2>&1"
log_must swapon $swapdev
else
log_must swap -a $swapdev
fi
return 0
}
#
# Cleanup a swap device on the provided device.
#
function swap_cleanup
{
typeset swapdev=$1
if is_swap_inuse $swapdev; then
if is_linux; then
log_must swapoff $swapdev
else
log_must swap -d $swapdev
fi
fi
return 0
}
#
# Set a global system tunable (64-bit value)
#
# $1 tunable name
# $2 tunable values
#
function set_tunable64
{
set_tunable_impl "$1" "$2" Z
}
#
# Set a global system tunable (32-bit value)
#
# $1 tunable name
# $2 tunable values
#
function set_tunable32
{
set_tunable_impl "$1" "$2" W
}
function set_tunable_impl
{
typeset tunable="$1"
typeset value="$2"
typeset mdb_cmd="$3"
typeset module="${4:-zfs}"
[[ -z "$tunable" ]] && return 1
[[ -z "$value" ]] && return 1
[[ -z "$mdb_cmd" ]] && return 1
case "$(uname)" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
[[ -w "$zfs_tunables/$tunable" ]] || return 1
echo -n "$value" > "$zfs_tunables/$tunable"
return "$?"
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw
return "$?"
;;
esac
}
#
# Get a global system tunable
#
# $1 tunable name
#
function get_tunable
{
get_tunable_impl "$1"
}
function get_tunable_impl
{
typeset tunable="$1"
typeset module="${2:-zfs}"
[[ -z "$tunable" ]] && return 1
case "$(uname)" in
Linux)
typeset zfs_tunables="/sys/module/$module/parameters"
[[ -f "$zfs_tunables/$tunable" ]] || return 1
cat $zfs_tunables/$tunable
return "$?"
;;
SunOS)
[[ "$module" -eq "zfs" ]] || return 1
;;
esac
return 1
}
+
+#
+# Prints the current time in seconds since UNIX Epoch.
+#
+function current_epoch
+{
+ printf '%(%s)T'
+}
+
+#
+# Get decimal value of global uint32_t variable using mdb.
+#
+function mdb_get_uint32
+{
+ typeset variable=$1
+ typeset value
+
+ value=$(mdb -k -e "$variable/X | ::eval .=U")
+ if [[ $? -ne 0 ]]; then
+ log_fail "Failed to get value of '$variable' from mdb."
+ return 1
+ fi
+
+ echo $value
+ return 0
+}
+
+#
+# Set global uint32_t variable to a decimal value using mdb.
+#
+function mdb_set_uint32
+{
+ typeset variable=$1
+ typeset value=$2
+
+ mdb -kw -e "$variable/W 0t$value" > /dev/null
+ if [[ $? -ne 0 ]]; then
+ echo "Failed to set '$variable' to '$value' in mdb."
+ return 1
+ fi
+
+ return 0
+}
diff --git a/tests/zfs-tests/tests/functional/Makefile.am b/tests/zfs-tests/tests/functional/Makefile.am
index 4510d5112a99..a5bbb36e05ce 100644
--- a/tests/zfs-tests/tests/functional/Makefile.am
+++ b/tests/zfs-tests/tests/functional/Makefile.am
@@ -1,71 +1,72 @@
SUBDIRS = \
acl \
arc \
atime \
bootfs \
cache \
cachefile \
casenorm \
channel_program \
chattr \
checksum \
clean_mirror \
cli_root \
cli_user \
compression \
ctime \
deadman \
delegate \
devices \
events \
exec \
fault \
features \
grow_pool \
grow_replicas \
history \
hkdf \
inheritance \
inuse \
large_files \
largest_pool \
libzfs \
link_count \
migration \
mmap \
mmp \
mount \
mv_files \
nestedfs \
no_space \
nopwrite \
online_offline \
pool_names \
poolversion \
privilege \
projectquota \
quota \
raidz \
redundancy \
refquota \
refreserv \
+ removal \
rename_dirs \
replacement \
reservation \
rootpool \
rsend \
scrub_mirror \
slog \
snapshot \
snapused \
sparse \
threadsappend \
tmpfile \
truncate \
upgrade \
user_namespace \
userquota \
vdev_zaps \
write_dirs \
xattr \
zvol
diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh
index 3f60d9eedade..3e9357063bed 100755
--- a/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh
@@ -1,82 +1,86 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# Copyright 2015 Nexenta Systems, Inc.
#
#
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
#
+#
+# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+#
+
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
#
# Valid datasets and snapshots are accepted as bootfs property values
#
# STRATEGY:
# 1. Create a set of datasets and snapshots in a test pool
# 2. Try setting them as boot filesystems
#
verify_runnable "global"
function cleanup {
if poolexists $TESTPOOL ; then
log_must zpool destroy $TESTPOOL
fi
if [[ -f $VDEV ]]; then
log_must rm -f $VDEV
fi
}
zpool set 2>&1 | grep bootfs > /dev/null
if [ $? -ne 0 ]
then
log_unsupported "bootfs pool property not supported on this release."
fi
log_assert "Valid datasets are accepted as bootfs property values"
log_onexit cleanup
typeset VDEV=$TESTDIR/bootfs_001_pos_a.$$.dat
log_must mkfile $MINVDEVSIZE $VDEV
create_pool "$TESTPOOL" "$VDEV"
log_must zfs create $TESTPOOL/$TESTFS
log_must zfs snapshot $TESTPOOL/$TESTFS@snap
log_must zfs clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone
log_must zpool set bootfs=$TESTPOOL/$TESTFS $TESTPOOL
log_must zpool set bootfs=$TESTPOOL/$TESTFS@snap $TESTPOOL
log_must zpool set bootfs=$TESTPOOL/clone $TESTPOOL
log_must zfs promote $TESTPOOL/clone
log_must zpool set bootfs=$TESTPOOL/clone $TESTPOOL
log_pass "Valid datasets are accepted as bootfs property values"
diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh
index 8ee48dba59fc..6a72bfcdc40c 100755
--- a/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh
+++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh
@@ -1,82 +1,86 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
# Copyright 2015 Nexenta Systems, Inc.
#
#
# Copyright (c) 2012, 2016 by Delphix. All rights reserved.
#
+#
+# Copyright (c) 2012, 2015 by Delphix. All rights reserved.
+#
+
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
#
# Invalid datasets are rejected as boot property values
#
# STRATEGY:
#
# 1. Create a zvol
# 2. Verify that we can't set the bootfs to that dataset
#
verify_runnable "global"
function cleanup {
if datasetexists $TESTPOOL/vol
then
log_must zfs destroy $TESTPOOL/vol
fi
if poolexists $TESTPOOL
then
log_must zpool destroy $TESTPOOL
fi
if [[ -f $VDEV ]]; then
log_must rm -f $VDEV
fi
}
zpool set 2>&1 | grep bootfs > /dev/null
if [ $? -ne 0 ]
then
log_unsupported "bootfs pool property not supported on this release."
fi
log_assert "Invalid datasets are rejected as boot property values"
log_onexit cleanup
typeset VDEV=$TESTDIR/bootfs_002_neg_a.$$.dat
log_must mkfile 400m $VDEV
create_pool "$TESTPOOL" "$VDEV"
log_must zfs create -V 10m $TESTPOOL/vol
block_device_wait
log_mustnot zpool set bootfs=$TESTPOOL/vol $TESTPOOL
log_pass "Invalid datasets are rejected as boot property values"
diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
index d5791372d0e4..2ea82f0f6979 100644
--- a/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
+++ b/tests/zfs-tests/tests/functional/cli_root/zpool_get/zpool_get.cfg
@@ -1,84 +1,86 @@
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright 2009 Sun Microsystems, Inc. All rights reserved.
# Use is subject to license terms.
#
#
-# Copyright (c) 2013 by Delphix. All rights reserved.
+# Copyright (c) 2013, 2014 by Delphix. All rights reserved.
# Copyright 2016 Nexenta Systems, Inc. All rights reserved.
#
# Set the expected properties of zpool
typeset -a properties=(
"size"
"capacity"
"altroot"
"health"
"guid"
"version"
"bootfs"
"delegation"
"autoreplace"
"cachefile"
"failmode"
"listsnapshots"
"autoexpand"
"dedupditto"
"dedupratio"
"free"
"allocated"
"readonly"
"comment"
"expandsize"
"freeing"
"fragmentation"
"leaked"
"multihost"
"feature@async_destroy"
"feature@empty_bpobj"
"feature@lz4_compress"
"feature@multi_vdev_crash_dump"
"feature@spacemap_histogram"
"feature@enabled_txg"
"feature@hole_birth"
"feature@extensible_dataset"
"feature@embedded_data"
"feature@bookmarks"
"feature@filesystem_limits"
"feature@large_blocks"
"feature@sha512"
"feature@skein"
"feature@edonr"
+ "feature@device_removal"
+ "feature@obsolete_counts"
)
# Additional properties added for Linux.
if is_linux; then
properties+=(
"ashift"
"feature@large_dnode"
"feature@userobj_accounting"
"feature@encryption"
"feature@project_quota"
)
fi
diff --git a/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh b/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh
index a6ef964ac5fc..20b61da92dfb 100755
--- a/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh
+++ b/tests/zfs-tests/tests/functional/features/async_destroy/async_destroy_001_pos.ksh
@@ -1,87 +1,102 @@
#!/bin/ksh -p
#
# CDDL HEADER START
#
# The contents of this file are subject to the terms of the
# Common Development and Distribution License (the "License").
# You may not use this file except in compliance with the License.
#
# You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
# or http://www.opensolaris.org/os/licensing.
# See the License for the specific language governing permissions
# and limitations under the License.
#
# When distributing Covered Code, include this CDDL HEADER in each
# file and include the License file at usr/src/OPENSOLARIS.LICENSE.
# If applicable, add the following below this CDDL HEADER, with the
# fields enclosed by brackets "[]" replaced with your own identifying
# information: Portions Copyright [yyyy] [name of copyright owner]
#
# CDDL HEADER END
#
#
# Copyright (c) 2013, 2016 by Delphix. All rights reserved.
#
. $STF_SUITE/include/libtest.shlib
#
# DESCRIPTION:
# Exercise the traversal suspend/resume code in async_destroy by
# destroying a file system that has more blocks than we can free
# in a single txg.
#
# STRATEGY:
# 1. Create a file system
# 2. Set recordsize to 512 to create the maximum number of blocks
# 3. Set compression to off to force zero-ed blocks to be written
# 4. dd a lot of data from /dev/zero to the file system
# 5. Destroy the file system
# 6. Wait for the freeing property to go to 0
# 7. Use zdb to check for leaked blocks
#
TEST_FS=$TESTPOOL/async_destroy
verify_runnable "both"
function cleanup
{
datasetexists $TEST_FS && log_must zfs destroy $TEST_FS
+ log_must set_tunable64 zfs_async_block_max_blocks 100000
}
log_onexit cleanup
log_assert "async_destroy can suspend and resume traversal"
-log_must zfs create -o recordsize=512 -o compression=off $TEST_FS
+log_must zfs create -o recordsize=1k -o compression=off $TEST_FS
-# Create enough blocks that it will take multiple TXGs to free them all.
+# Fill with 128,000 blocks.
log_must dd bs=1024k count=128 if=/dev/zero of=/$TEST_FS/file
+
+#
+# Decrease the max blocks to free each txg, so that freeing takes
+# long enough that we can observe it.
+#
+log_must set_tunable64 zfs_async_block_max_blocks 100
+
log_must sync
log_must zfs destroy $TEST_FS
#
# We monitor the freeing property, to verify we can see blocks being
-# freed while the suspend/resume code is exerciesd.
+# freed while the suspend/resume code is exercised.
#
t0=$SECONDS
count=0
while [[ $((SECONDS - t0)) -lt 10 ]]; do
[[ "0" != "$(zpool list -Ho freeing $TESTPOOL)" ]] && ((count++))
[[ $count -gt 1 ]] && break
sleep 1
done
[[ $count -eq 0 ]] && log_fail "Freeing property remained empty"
+#
+# After a bit, go back to allowing an unlimited amount of freeing
+# per txg.
+#
+sleep 10
+log_must set_tunable64 zfs_async_block_max_blocks 100000
+
# Wait for everything to be freed.
while [[ "0" != "$(zpool list -Ho freeing $TESTPOOL)" ]]; do
[[ $((SECONDS - t0)) -gt 180 ]] && \
log_fail "Timed out waiting for freeing to drop to zero"
done
# Check for leaked blocks.
log_must zdb -b $TESTPOOL
log_pass "async_destroy can suspend and resume traversal"
diff --git a/tests/zfs-tests/tests/functional/removal/Makefile.am b/tests/zfs-tests/tests/functional/removal/Makefile.am
new file mode 100644
index 000000000000..eac82a2f13d2
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/Makefile.am
@@ -0,0 +1,32 @@
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+
+#
+# Copyright (c) 2014, 2015 by Delphix. All rights reserved.
+#
+
+pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/removal
+
+dist_pkgdata_SCRIPTS = \
+ cleanup.ksh removal_all_vdev.ksh removal_check_space.ksh \
+ removal_condense_export.ksh removal_multiple_indirection.ksh \
+ removal_remap_deadlists.ksh removal_remap.ksh \
+ removal_reservation.ksh removal_resume_export.ksh \
+ removal_sanity.ksh removal_with_add.ksh removal_with_create_fs.ksh \
+ removal_with_dedup.ksh removal_with_export.ksh \
+ removal_with_ganging.ksh removal_with_remap.ksh \
+ removal_with_remove.ksh removal_with_scrub.ksh \
+ removal_with_send.ksh removal_with_send_recv.ksh \
+ removal_with_snapshot.ksh removal_with_write.ksh \
+ removal_with_zdb.ksh remove_mirror.ksh remove_mirror_sanity.ksh \
+ remove_raidz.ksh removal.kshlib
+
+pkgexecdir = $(datadir)/@PACKAGE@/zfs-tests/tests/functional/removal
diff --git a/tests/zfs-tests/tests/functional/removal/cleanup.ksh b/tests/zfs-tests/tests/functional/removal/cleanup.ksh
new file mode 100755
index 000000000000..352a9fe10a20
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/cleanup.ksh
@@ -0,0 +1,23 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+
+default_cleanup
diff --git a/tests/zfs-tests/tests/functional/removal/removal.kshlib b/tests/zfs-tests/tests/functional/removal/removal.kshlib
new file mode 100644
index 000000000000..54a2fb3bd1c3
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal.kshlib
@@ -0,0 +1,158 @@
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+export REMOVEDISK=${DISKS%% *}
+export NOTREMOVEDISK=${DISKS##* }
+
+#
+# Waits for the pool to finish a removal. If an optional callback is given,
+# execute it every 0.5s.
+#
+# Example usage:
+#
+# wait_for_removal $TESTPOOL dd if=/dev/urandom of=/$TESTPOOL/file count=1
+#
+function wait_for_removal # pool [callback args]
+{
+ typeset pool=$1
+ typeset callback=$2
+
+ [[ -n $callback ]] && shift 2
+
+ while is_pool_removing $pool; do
+ [[ -z $callback ]] || log_must $callback "$@"
+ sleep 0.5
+ done
+
+ #
+ # The pool state changes before the TXG finishes syncing; wait for
+ # the removal to be completed on disk.
+ #
+ sync_pool
+
+ log_must is_pool_removed $pool
+ return 0
+}
+
+function indirect_vdev_mapping_size # pool
+{
+ typeset pool=$1
+ zdb -P $pool | grep 'indirect vdev' | \
+ sed -E 's/.*\(([0-9]+) in memory\).*/\1/g'
+}
+
+function random_write # file write_size
+{
+ typeset file=$1
+ typeset block_size=$2
+ typeset file_size=$(stat -c%s $file 2>/dev/null)
+ typeset nblocks=$((file_size / block_size))
+
+ [[ -w $file ]] || return 1
+
+ dd if=/dev/urandom of=$file conv=notrunc \
+ bs=$block_size count=1 seek=$((RANDOM % nblocks)) >/dev/null 2>&1
+}
+
+_test_removal_with_operation_count=0
+function _test_removal_with_operation_cb # real_callback
+{
+ typeset real_callback=$1
+
+ $real_callback $_test_removal_with_operation_count || \
+ log_fail $real_callback "failed after" \
+ $_test_removal_with_operation_count "iterations"
+
+ (( _test_removal_with_operation_count++ ))
+
+ log_note "Callback called $((_test_removal_with_operation_count)) times"
+
+ return 0
+}
+
+function start_random_writer # file
+{
+ typeset file=$1
+ (
+ log_note "Starting writer for $file"
+ # This will fail when we destroy the pool.
+ while random_write $file $((2**12)); do
+ :
+ done
+ log_note "Stopping writer for $file"
+ ) &
+}
+
+#
+# The callback should be a function that takes as input the number of
+# iterations and the given arguments.
+#
+function test_removal_with_operation # callback [count]
+{
+ typeset operation=$1
+ typeset count=$2
+
+ [[ -n $count ]] || count=0
+
+ #
+ # To ensure that the removal takes a while, we fragment the pool
+ # by writing random blocks and continue to do during the removal.
+ #
+ log_must mkfile 1g $TESTDIR/$TESTFILE0
+ for i in $(seq $((2**10))); do
+ random_write $TESTDIR/$TESTFILE0 $((2**12)) || \
+ log_fail "Could not write to $TESTDIR/$TESTFILE0."
+ done
+ start_random_writer $TESTDIR/$TESTFILE0 1g
+ killpid=$!
+
+ log_must zpool remove $TESTPOOL $REMOVEDISK
+ log_must wait_for_removal $TESTPOOL \
+ _test_removal_with_operation_cb $operation
+ log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
+ log_must zdb -cd $TESTPOOL
+
+ kill $killpid
+ wait
+
+ #
+ # We would love to assert that the callback happened *during* the
+ # removal, but we don't have the ability to be confident of that
+ # (via limiting bandwidth, etc.) yet. Instead, we try again.
+ #
+ if (( $_test_removal_with_operation_count <= 1 )); then
+ (( count <= 5 )) || log_fail "Attempted test too many times."
+
+ log_note "Callback only called" \
+ $_test_removal_with_operation_count \
+ "times, trying again."
+ default_setup_noexit "$DISKS"
+ test_removal_with_operation $operation $((count + 1))
+ fi
+}
+
+#
+# Kill the background job use by the test_removal_with_operation function.
+#
+function test_removal_with_operation_kill
+{
+ kill $killpid
+ wait $killpid
+ return 0
+}
diff --git a/tests/zfs-tests/tests/functional/removal/removal_all_vdev.ksh b/tests/zfs-tests/tests/functional/removal/removal_all_vdev.ksh
new file mode 100755
index 000000000000..d3a594689a45
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_all_vdev.ksh
@@ -0,0 +1,39 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+for disk in $DISKS; do
+ if [[ "$disk" != "$REMOVEDISK" ]]; then
+ log_must zpool remove $TESTPOOL $disk
+ log_must wait_for_removal $TESTPOOL
+ log_mustnot vdevs_in_pool $TESTPOOL $disk
+ fi
+done
+
+log_must [ "x$(get_disklist $TESTPOOL)" = "x$REMOVEDISK" ]
+
+log_mustnot zpool remove $TESTPOOL $disk
+
+log_pass "Was not able to remove the last device in a pool."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_check_space.ksh b/tests/zfs-tests/tests/functional/removal/removal_check_space.ksh
new file mode 100755
index 000000000000..dec692ada47f
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_check_space.ksh
@@ -0,0 +1,44 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+TMPDIR=${TMPDIR:-$TEST_BASE_DIR}
+log_must mkfile $MINVDEVSIZE $TMPDIR/dsk1
+log_must mkfile $MINVDEVSIZE $TMPDIR/dsk2
+DISKS="$TMPDIR/dsk1 $TMPDIR/dsk2"
+REMOVEDISK=$TMPDIR/dsk1
+
+log_must default_setup_noexit "$DISKS"
+
+function cleanup
+{
+ default_cleanup_noexit
+ log_must rm -f $DISKS
+}
+log_onexit cleanup
+
+# Write a little more than half the pool.
+log_must dd if=/dev/urandom of=/$TESTDIR/$TESTFILE0 bs=$((2**20)) \
+ count=$((MINVDEVSIZE / (1024 * 1024)))
+log_mustnot zpool remove $TESTPOOL $TMPDIR/dsk1
+
+log_pass "Removal will not succeed if insufficient space."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh b/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
new file mode 100755
index 000000000000..ad33caec84af
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_condense_export.ksh
@@ -0,0 +1,90 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+if is_linux; then
+ log_unsupported "ZDB fails during concurrent pool activity."
+fi
+
+function reset
+{
+ log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 0
+ log_must set_tunable64 zfs_condense_min_mapping_bytes 131072
+ default_cleanup_noexit
+}
+
+default_setup_noexit "$DISKS" "true"
+log_onexit reset
+log_must set_tunable64 zfs_condense_indirect_commit_entry_delay_ms 1000
+log_must set_tunable64 zfs_condense_min_mapping_bytes 1
+
+log_must zfs set recordsize=512 $TESTPOOL/$TESTFS
+
+#
+# Create a large file so that we know some of the blocks will be on the
+# removed device, and hence eligible for remapping.
+#
+log_must dd if=/dev/urandom of=$TESTDIR/file bs=1024k count=10
+
+#
+# Create a file in the other filesystem, which will not be remapped.
+#
+log_must dd if=/dev/urandom of=$TESTDIR1/file bs=1024k count=10
+
+#
+# Randomly rewrite some of blocks in the file so that there will be holes and
+# we will not be able to remap the entire file in a few huge chunks.
+#
+for i in {1..4096}; do
+ #
+ # We have to sync periodically so that all the writes don't end up in
+ # the same txg. If they were all in the same txg, only the last write
+ # would go through and we would not have as many allocations to
+ # fragment the file.
+ #
+ ((i % 100 > 0 )) || sync_pool $TESTPOOL || log_fail "Could not sync."
+ random_write $TESTDIR/file 512 || \
+ log_fail "Could not random write."
+done
+
+REMOVEDISKPATH=/dev
+case $REMOVEDISK in
+ /*)
+ REMOVEDISKPATH=$(dirname $REMOVEDISK)
+ ;;
+esac
+
+log_must zpool remove $TESTPOOL $REMOVEDISK
+log_must wait_for_removal $TESTPOOL
+log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
+
+log_must zfs remap $TESTPOOL/$TESTFS
+sync_pool $TESTPOOL
+sleep 5
+sync_pool $TESTPOOL
+log_must zpool export $TESTPOOL
+zdb -e -p $REMOVEDISKPATH $TESTPOOL | grep 'Condensing indirect vdev' || \
+ log_fail "Did not export during a condense."
+log_must zdb -e -p $REMOVEDISKPATH -cudi $TESTPOOL
+log_must zpool import $TESTPOOL
+
+log_pass "Pool can be exported in the middle of a condense."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh b/tests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh
new file mode 100755
index 000000000000..1f71ec0e45b2
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_multiple_indirection.ksh
@@ -0,0 +1,93 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+#
+# DESCRIPTION:
+#
+# For device removal a file's contents should transfer
+# completely from one disk to another. That should remain
+# to be the case even if multiple levels of indirection
+# are introduced as we remove more and more devices.
+#
+# STRATEGY:
+#
+# 1. We create a file of size 128k and we save its contents
+# in a local variable.
+# 2. We set the limit of the maximum copied segment size of
+# removals to 32k, so during removal our 128k file will
+# be split to 4 blocks.
+# 3. We start removing disks and adding them back in a loop.
+# This way the file is moved around and introduces split
+# blocks.
+# 4. The loop itself tests that we don't have any problem
+# when removing many devices. Within the loop we test
+# that the files contents remain the same across transfers.
+#
+
+TMPDIR=${TMPDIR:-$TEST_BASE_DIR}
+log_must mkfile $(($MINVDEVSIZE * 2)) $TMPDIR/dsk1
+log_must mkfile $(($MINVDEVSIZE * 2)) $TMPDIR/dsk2
+DISKS="$TMPDIR/dsk1 $TMPDIR/dsk2"
+REMOVEDISK=$TMPDIR/dsk1
+
+log_must default_setup_noexit "$DISKS"
+
+function cleanup
+{
+ default_cleanup_noexit
+ log_must rm -f $DISKS
+
+ # reset zfs_remove_max_segment to 1M
+ set_tunable32 zfs_remove_max_segment 1048576
+}
+
+log_onexit cleanup
+
+# set zfs_remove_max_segment to 32k
+log_must set_tunable32 zfs_remove_max_segment 32768
+
+log_must dd if=/dev/urandom of=$TESTDIR/$TESTFILE0 bs=128k count=1
+FILE_CONTENTS=`cat $TESTDIR/$TESTFILE0`
+log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+
+for i in {1..10}; do
+ log_must zpool remove $TESTPOOL $TMPDIR/dsk1
+ log_must wait_for_removal $TESTPOOL
+ log_mustnot vdevs_in_pool $TESTPOOL $TMPDIR/dsk1
+ log_must zpool add $TESTPOOL $TMPDIR/dsk1
+
+ log_must zinject -a
+ log_must dd if=$TESTDIR/$TESTFILE0 of=/dev/null
+ log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+
+ log_must zpool remove $TESTPOOL $TMPDIR/dsk2
+ log_must wait_for_removal $TESTPOOL
+ log_mustnot vdevs_in_pool $TESTPOOL $TMPDIR/dsk2
+ log_must zpool add $TESTPOOL $TMPDIR/dsk2
+
+ log_must zinject -a
+ log_must dd if=$TESTDIR/$TESTFILE0 of=/dev/null
+ log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+done
+
+log_pass "File contents transferred completely from one disk to another."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_remap.ksh b/tests/zfs-tests/tests/functional/removal/removal_remap.ksh
new file mode 100755
index 000000000000..04d0c50e48f7
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_remap.ksh
@@ -0,0 +1,123 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+
+
+function cleanup
+{
+ set_tunable64 zfs_condense_min_mapping_bytes 131072
+ default_cleanup_noexit
+}
+
+log_onexit cleanup
+
+log_must set_tunable64 zfs_condense_min_mapping_bytes 1
+
+log_must zfs set recordsize=512 $TESTPOOL/$TESTFS
+
+#
+# Create a large file so that we know some of the blocks will be on the
+# removed device, and hence eligible for remapping.
+#
+log_must dd if=/dev/urandom of=$TESTDIR/file bs=$((2**12)) count=$((2**9))
+
+#
+# Randomly rewrite some of blocks in the file so that there will be holes and
+# we will not be able to remap the entire file in a few huge chunks.
+#
+for i in $(seq $((2**12))); do
+ #
+ # We have to sync periodically so that all the writes don't end up in
+ # the same txg. If they were all in the same txg, only the last write
+ # would go through and we would not have as many allocations to
+ # fragment the file.
+ #
+ ((i % 100 > 0 )) || sync_pool || log_fail "Could not sync."
+ random_write $TESTDIR/file $((2**9)) || \
+ log_fail "Could not random write."
+done
+
+#
+# Remap should quietly succeed as a noop before a removal.
+#
+log_must zfs remap $TESTPOOL/$TESTFS
+remaptxg_before=$(zfs get -H -o value remaptxg $TESTPOOL/$TESTFS)
+(( $? == 0 )) || log_fail "Could not get remaptxg."
+[[ $remaptxg_before == "-" ]] || \
+ log_fail "remaptxg ($remaptxg_before) had value before a removal"
+
+log_must zpool remove $TESTPOOL $REMOVEDISK
+log_must wait_for_removal $TESTPOOL
+log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
+
+#
+# remaptxg should not be set if we haven't done a remap.
+#
+remaptxg_before=$(zfs get -H -o value remaptxg $TESTPOOL/$TESTFS)
+(( $? == 0 )) || log_fail "Could not get remaptxg."
+[[ $remaptxg_before == "-" ]] || \
+ log_fail "remaptxg ($remaptxg_before) had value before a removal"
+
+mapping_size_before=$(indirect_vdev_mapping_size $TESTPOOL)
+log_must zfs remap $TESTPOOL/$TESTFS
+
+# Try to wait for a condense to finish.
+for i in {1..5}; do
+ sleep 5
+ sync_pool
+done
+mapping_size_after=$(indirect_vdev_mapping_size $TESTPOOL)
+
+#
+# After the remap, there should not be very many blocks referenced. The reason
+# why our threshold is as high as 512 is because our ratio of metadata to
+# user data is relatively high, with only 64M of user data on the file system.
+#
+(( mapping_size_after < mapping_size_before )) || \
+ log_fail "Mapping size did not decrease after remap: " \
+ "$mapping_size_before before to $mapping_size_after after."
+(( mapping_size_after < 512 )) || \
+ log_fail "Mapping size not small enough after remap: " \
+ "$mapping_size_before before to $mapping_size_after after."
+
+#
+# After a remap, the remaptxg should be set to a non-zero value.
+#
+remaptxg_after=$(zfs get -H -o value remaptxg $TESTPOOL/$TESTFS)
+(( $? == 0 )) || log_fail "Could not get remaptxg."
+log_note "remap txg after remap is $remaptxg_after"
+(( remaptxg_after > 0 )) || log_fail "remaptxg not increased"
+
+#
+# Remap should quietly succeed as a noop if there have been no removals since
+# the last remap.
+#
+log_must zfs remap $TESTPOOL/$TESTFS
+remaptxg_again=$(zfs get -H -o value remaptxg $TESTPOOL/$TESTFS)
+(( $? == 0 )) || log_fail "Could not get remaptxg."
+log_note "remap txg after second remap is $remaptxg_again"
+(( remaptxg_again == remaptxg_after )) || \
+ log_fail "remap not noop if there has been no removal"
+
+log_pass "Remapping a fs caused mapping size to decrease."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_remap_deadlists.ksh b/tests/zfs-tests/tests/functional/removal/removal_remap_deadlists.ksh
new file mode 100755
index 000000000000..5b5be66b3343
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_remap_deadlists.ksh
@@ -0,0 +1,77 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+log_must dd if=/dev/zero of=$TESTDIR/file bs=1024k count=300
+
+log_must zfs snapshot $TESTPOOL/$TESTFS@snap-pre1
+log_must dd if=/dev/zero of=$TESTDIR/file bs=1024k count=100 \
+ conv=notrunc seek=100
+
+log_must zfs snapshot $TESTPOOL/$TESTFS@snap-pre2
+log_must dd if=/dev/zero of=$TESTDIR/file bs=1024k count=100 \
+ conv=notrunc seek=200
+
+log_must zpool remove $TESTPOOL $REMOVEDISK
+if is_linux; then
+ log_must wait_for_removal $TESTPOOL
+else
+ log_must wait_for_removal $TESTPOOL zdb -cd $TESTPOOL
+fi
+log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
+log_must zdb -cd $TESTPOOL
+
+log_must zfs remap $TESTPOOL/$TESTFS
+log_must zdb -cd $TESTPOOL
+
+log_must zfs snapshot $TESTPOOL/$TESTFS@snap-post3
+log_must zdb -cd $TESTPOOL
+
+log_must zfs snapshot $TESTPOOL/$TESTFS@snap-post4
+log_must zdb -cd $TESTPOOL
+
+#
+# Test case where block is moved from remap deadlist: blocks born before
+# snap-pre2 will be obsoleted.
+#
+log_must zfs destroy $TESTPOOL/$TESTFS@snap-pre2
+log_must zdb -cd $TESTPOOL
+
+#
+# Test case where we merge remap deadlists: blocks before snap-pre1 will
+# need to go on snap-post4's deadlist.
+#
+log_must zfs destroy $TESTPOOL/$TESTFS@snap-post3
+log_must zdb -cd $TESTPOOL
+
+log_must zfs destroy $TESTPOOL/$TESTFS@snap-post4
+
+#
+# Test rollback.
+#
+log_must zfs rollback $TESTPOOL/$TESTFS@snap-pre1
+log_must zfs destroy $TESTPOOL/$TESTFS@snap-pre1
+
+log_pass "Remove and remap works with snapshots and deadlists."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_reservation.ksh b/tests/zfs-tests/tests/functional/removal/removal_reservation.ksh
new file mode 100755
index 000000000000..b57f1777ce0c
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_reservation.ksh
@@ -0,0 +1,68 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+TMPDIR=${TMPDIR:-$TEST_BASE_DIR}
+log_must mkfile 1g $TMPDIR/dsk1
+log_must mkfile 1g $TMPDIR/dsk2
+DISKS="$TMPDIR/dsk1 $TMPDIR/dsk2"
+REMOVEDISK=$TMPDIR/dsk1
+
+default_setup_noexit "$DISKS"
+
+function cleanup
+{
+ default_cleanup_noexit
+ log_must rm -f $DISKS
+}
+
+log_onexit cleanup
+
+log_must zfs set compression=off $TESTPOOL/$TESTFS
+
+# Write a little under half the pool.
+log_must file_write -o create -f $TESTDIR/$TESTFILE1 -b $((2**20)) -c $((2**9))
+
+#
+# Start a writing thread to ensure the removal will take a while.
+# This will automatically die when we destroy the pool.
+#
+start_random_writer $TESTDIR/$TESTFILE1
+
+callback_count=0
+function callback
+{
+ (( callback_count++ ))
+ (( callback_count == 1 )) || return 0
+
+ # Attempt to write more than the new pool will be able to handle.
+ file_write -o create -f $TESTDIR/$TESTFILE2 -b $((2**20)) -c $((2**9))
+ zret=$?
+ ENOSPC=28
+ log_note "file_write returned $zret"
+ (( $zret == $ENOSPC )) || log_fail "Did not get ENOSPC during removal."
+}
+
+log_must zpool remove $TESTPOOL $REMOVEDISK
+log_must wait_for_removal $TESTPOOL callback
+
+log_pass "Removal properly sets reservation."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_resume_export.ksh b/tests/zfs-tests/tests/functional/removal/removal_resume_export.ksh
new file mode 100755
index 000000000000..5cecfdb5dd57
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_resume_export.ksh
@@ -0,0 +1,134 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2017 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+#
+# DESCRIPTION:
+#
+# When a pool has an ongoing removal and it is exported ZFS
+# suspends the removal thread beforehand. This test ensures
+# that ZFS restarts the removal thread if the export fails
+# for some reason.
+#
+# STRATEGY:
+#
+# 1. Create a pool with one vdev and do some writes on it.
+# 2. Add a new vdev to the pool and start the removal of
+# the first vdev.
+# 3. Inject a fault in the pool and attempt to export (it
+# should fail).
+# 4. After the export fails ensure that the removal thread
+# was restarted (i.e. the svr_thread field in the spa
+# should be non-zero).
+#
+
+
+function cleanup
+{
+ log_must zinject -c all
+ default_cleanup_noexit
+}
+
+function ensure_thread_running # spa_address
+{
+ if is_linux; then
+ typeset TRIES=0
+ typeset THREAD_PID
+ while [[ $TRIES -lt 10 ]]; do
+ THREAD_PID=$(pgrep spa_vdev_remove)
+ [[ "$THREAD_PID" ]] && break
+ sleep 0.1
+ (( TRIES = TRIES + 1 ))
+ done
+ [[ "$THREAD_PID" ]] || \
+ log_fail "removal thread is not running TRIES=$TRIES THREAD_PID=$THREAD_PID"
+ else
+ #
+ # Try to get the address of the removal thread.
+ #
+ typeset THREAD_ADDR=$(mdb -ke "$1::print \
+ spa_t spa_vdev_removal->svr_thread" | awk "{print \$3}")
+
+ #
+ # if address is NULL it means that the thread is
+ # not running.
+ #
+ [[ "$THREAD_ADDR" = 0 ]] && \
+ log_fail "removal thread is not running"
+ fi
+
+ return 0
+}
+
+log_onexit cleanup
+
+#
+# Create pool with one disk.
+#
+log_must default_setup_noexit "$REMOVEDISK"
+
+#
+# Save address of SPA in memory so you can check with mdb
+# if the removal thread is running.
+#
+is_linux || typeset SPA_ADDR=$(mdb -ke "::spa" | awk "/$TESTPOOL/ {print \$1}")
+
+#
+# Turn off compression to raise capacity as much as possible
+# for the little time that this test runs.
+#
+log_must zfs set compression=off $TESTPOOL/$TESTFS
+
+#
+# Write some data that will be evacuated from the device when
+# we start the removal.
+#
+log_must dd if=/dev/urandom of=$TESTDIR/$TESTFILE0 bs=64M count=32
+
+#
+# Add second device where all the data will be evacuated.
+#
+log_must zpool add -f $TESTPOOL $NOTREMOVEDISK
+
+#
+# Start removal.
+#
+log_must zpool remove $TESTPOOL $REMOVEDISK
+
+#
+# Inject an error so export fails after having just suspended
+# the removal thread. [spa_inject_ref gets incremented]
+#
+log_must zinject -d $REMOVEDISK -D 10:1 $TESTPOOL
+
+log_must ensure_thread_running $SPA_ADDR
+
+#
+# Because of the above error export should fail.
+#
+log_mustnot zpool export $TESTPOOL
+
+log_must ensure_thread_running $SPA_ADDR
+
+wait_for_removal $TESTPOOL
+
+log_pass "Device removal thread resumes after failed export"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_sanity.ksh b/tests/zfs-tests/tests/functional/removal/removal_sanity.ksh
new file mode 100755
index 000000000000..f7f69c7d262f
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_sanity.ksh
@@ -0,0 +1,39 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+FILE_CONTENTS="Leeloo Dallas mul-ti-pass."
+
+echo $FILE_CONTENTS >$TESTDIR/$TESTFILE0
+log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+
+log_must zpool remove $TESTPOOL $REMOVEDISK
+log_must wait_for_removal $TESTPOOL
+log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
+
+log_must dd if=/$TESTDIR/$TESTFILE0 of=/dev/null
+log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+
+log_pass "Removed device not in use after removal."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_add.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_add.ksh
new file mode 100755
index 000000000000..e719a5ecc8fa
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_add.ksh
@@ -0,0 +1,52 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+TMPDIR=${TMPDIR:-$TEST_BASE_DIR}
+log_must mkfile 1g $TMPDIR/dsk1
+log_must mkfile 1g $TMPDIR/dsk2
+
+function cleanup
+{
+ default_cleanup_noexit
+ log_must rm -f $TMPDIR/dsk1 $TMPDIR/dsk2
+}
+
+default_setup_noexit "$DISKS"
+log_onexit cleanup
+
+function callback
+{
+ typeset count=$1
+ if ((count == 0)); then
+ log_mustnot zpool attach -f $TESTPOOL $TMPDIR/dsk1 $TMPDIR/dsk2
+ log_mustnot zpool add -f $TESTPOOL \
+ raidz $TMPDIR/dsk1 $TMPDIR/dsk2
+ log_must zpool add -f $TESTPOOL $TMPDIR/dsk1
+ fi
+
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Removal can only add normal disks."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_create_fs.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_create_fs.ksh
new file mode 100755
index 000000000000..403428290e83
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_create_fs.ksh
@@ -0,0 +1,39 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback
+{
+ typeset count=$1
+ if ((count == 0)); then
+ log_must zfs create $TESTPOOL/$TESTFS1
+ log_must zfs destroy $TESTPOOL/$TESTFS1
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can write to device during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_dedup.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_dedup.ksh
new file mode 100755
index 000000000000..4b6c94a80193
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_dedup.ksh
@@ -0,0 +1,49 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+log_must zfs set dedup=on $TESTPOOL
+
+FILE_CONTENTS="Leeloo Dallas mul-ti-pass."
+
+echo $FILE_CONTENTS >$TESTDIR/$TESTFILE0
+echo $FILE_CONTENTS >$TESTDIR/$TESTFILE1
+echo $FILE_CONTENTS >$TESTDIR/$TESTFILE2
+log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+log_must [ "x$(cat $TESTDIR/$TESTFILE1)" = "x$FILE_CONTENTS" ]
+log_must [ "x$(cat $TESTDIR/$TESTFILE2)" = "x$FILE_CONTENTS" ]
+
+log_must zpool remove $TESTPOOL $REMOVEDISK
+log_must wait_for_removal $TESTPOOL
+log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
+
+log_must dd if=/$TESTDIR/$TESTFILE0 of=/dev/null
+log_must dd if=/$TESTDIR/$TESTFILE1 of=/dev/null
+log_must dd if=/$TESTDIR/$TESTFILE2 of=/dev/null
+log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+log_must [ "x$(cat $TESTDIR/$TESTFILE1)" = "x$FILE_CONTENTS" ]
+log_must [ "x$(cat $TESTDIR/$TESTFILE2)" = "x$FILE_CONTENTS" ]
+
+log_pass "Removed device not in use after removal."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_export.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_export.ksh
new file mode 100755
index 000000000000..38d6d53d4e45
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_export.ksh
@@ -0,0 +1,50 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback # count
+{
+ typeset count=$1
+ if ((count == 0)); then
+ is_linux && test_removal_with_operation_kill
+ log_must zpool export $TESTPOOL
+
+ #
+ # We are concurrently starting dd processes that will
+ # create files in $TESTDIR. These could cause the import
+ # to fail because it can't mount on the filesystem on a
+ # non-empty directory. Therefore, remove the directory
+ # so that the dd process will fail.
+ #
+ log_must rm -rf $TESTDIR
+
+ log_must zpool import $TESTPOOL
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can export and import pool during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh
new file mode 100755
index 000000000000..cfbca89bda34
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_ganging.ksh
@@ -0,0 +1,47 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+function cleanup
+{
+ log_must set_tunable64 metaslab_gang_bang $((2**17 + 1))
+ default_cleanup_noexit
+}
+
+default_setup_noexit "$DISKS"
+log_must set_tunable64 metaslab_gang_bang $((2**12))
+log_onexit cleanup
+
+FILE_CONTENTS="Leeloo Dallas mul-ti-pass."
+
+echo $FILE_CONTENTS >$TESTDIR/$TESTFILE0
+log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+log_must file_write -o create -f $TESTDIR/$TESTFILE1 -b $((2**20)) -c $((2**7))
+
+log_must zpool remove $TESTPOOL $REMOVEDISK
+log_must wait_for_removal $TESTPOOL
+log_mustnot vdevs_in_pool $TESTPOOL $REMOVEDISK
+
+log_must dd if=/$TESTDIR/$TESTFILE0 of=/dev/null
+log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+
+log_pass "Removed device not in use after removal."
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_remap.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_remap.ksh
new file mode 100755
index 000000000000..63050a6479bc
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_remap.ksh
@@ -0,0 +1,38 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2015, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback
+{
+ typeset count=$1
+ if ((count == 0)); then
+ zfs remap $TESTPOOL/$TESTFS
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can remap a filesystem during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_remove.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_remove.ksh
new file mode 100755
index 000000000000..fef7c293b943
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_remove.ksh
@@ -0,0 +1,38 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback # count
+{
+ typeset count=$1
+ if ((count == 0)); then
+ log_mustnot zpool remove $TESTPOOL $NOTREMOVEDISK
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Cannot remove a disk during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_scrub.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_scrub.ksh
new file mode 100755
index 000000000000..33eb41bf2acd
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_scrub.ksh
@@ -0,0 +1,38 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback
+{
+ typeset count=$1
+ if ((count == 0)); then
+ log_must zpool scrub $TESTPOOL
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can use scrub during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_send.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_send.ksh
new file mode 100755
index 000000000000..c5a92505c4e7
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_send.ksh
@@ -0,0 +1,40 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback
+{
+ typeset count=$1
+ if ((count == 0)); then
+ create_snapshot $TESTPOOL/$TESTFS $TESTSNAP
+ log_must ksh -c \
+ "zfs send $TESTPOOL/$TESTFS@$TESTSNAP >/dev/null"
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can use send during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_send_recv.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_send_recv.ksh
new file mode 100755
index 000000000000..c7d1c8a89c40
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_send_recv.ksh
@@ -0,0 +1,41 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback
+{
+ typeset count=$1
+ if ((count == 0)); then
+ create_snapshot $TESTPOOL/$TESTFS $TESTSNAP
+ log_must ksh -o pipefail -c \
+ "zfs send $TESTPOOL/$TESTFS@$TESTSNAP | \
+ zfs recv $TESTPOOL/$TESTFS1"
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can send and recv during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_snapshot.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_snapshot.ksh
new file mode 100755
index 000000000000..7fe36a94f15e
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_snapshot.ksh
@@ -0,0 +1,39 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback
+{
+ typeset count=$1
+ if ((count == 0)); then
+ create_snapshot $TESTPOOL/$TESTFS $TESTSNAP
+ destroy_snapshot $TESTPOOL/$TESTFS@$TESTSNAP
+ fi
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can create and destroy snapshot during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_write.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_write.ksh
new file mode 100755
index 000000000000..5d37b903e08c
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_write.ksh
@@ -0,0 +1,34 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+default_setup_noexit "$DISKS"
+log_onexit default_cleanup_noexit
+
+function callback
+{
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_pass "Can write to device during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/removal_with_zdb.ksh b/tests/zfs-tests/tests/functional/removal/removal_with_zdb.ksh
new file mode 100755
index 000000000000..1f609273c44f
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/removal_with_zdb.ksh
@@ -0,0 +1,78 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+zdbout=${TMPDIR:-$TEST_BASE_DIR}/zdbout.$$
+
+if is_linux; then
+ log_unsupported "ZDB fails during concurrent pool activity."
+fi
+
+function cleanup
+{
+ default_cleanup_noexit
+ log_must rm -f $zdbout
+}
+
+default_setup_noexit "$DISKS"
+log_onexit cleanup
+FIRSTDISK=${DISKS%% *}
+
+DISKPATH=/dev
+case $FIRSTDISK in
+ /*)
+ DISKPATH=$(dirname $FIRSTDISK)
+ ;;
+esac
+
+function callback
+{
+ typeset count=$1
+ typeset zdbstat
+
+ log_must zpool set cachefile=none $TESTPOOL
+ zdb -e -p $DISKPATH -cudi $TESTPOOL >$zdbout 2>&1
+ zdbstat=$?
+ log_must zpool set cachefile= $TESTPOOL
+ if [[ $zdbstat != 0 ]]; then
+ log_note "Output: zdb -e -p $DISKPATH -cudi $TESTPOOL"
+ cat $zdbout
+ log_note "zdb detected errors with exist status $zdbstat."
+ fi
+ log_note "zdb -e -p $DISKPATH -cudi $TESTPOOL >zdbout 2>&1"
+ return 0
+}
+
+test_removal_with_operation callback
+
+log_must zpool set cachefile=none $TESTPOOL
+zdb -e -p $DISKPATH -cudi $TESTPOOL >$zdbout 2>&1
+zdbstat=$?
+log_must zpool set cachefile= $TESTPOOL
+if [[ $zdbstat != 0 ]]; then
+ log_note "Output following removal: zdb -cudi $TESTPOOL"
+ cat $zdbout
+ log_fail "zdb detected errors with exit status " \
+ "$zdbstat following removal."
+fi
+
+log_pass "Can use zdb during removal"
diff --git a/tests/zfs-tests/tests/functional/removal/remove_mirror.ksh b/tests/zfs-tests/tests/functional/removal/remove_mirror.ksh
new file mode 100755
index 000000000000..06335b70360a
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/remove_mirror.ksh
@@ -0,0 +1,57 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2018 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+TMPDIR=${TMPDIR:-$TEST_BASE_DIR}
+
+DISK1="$TMPDIR/dsk1"
+DISK2="$TMPDIR/dsk2"
+DISK3="$TMPDIR/dsk3"
+DISKS="$DISK1 $DISK2 $DISK3"
+
+log_must mkfile $(($MINVDEVSIZE * 2)) $DISK1
+log_must mkfile $(($MINVDEVSIZE * 2)) $DISK2
+log_must mkfile $(($MINVDEVSIZE * 2)) $DISK3
+
+function cleanup
+{
+ default_cleanup_noexit
+ log_must rm -f $DISKS
+}
+
+log_must default_setup_noexit "$DISK1 mirror $DISK2 $DISK3"
+log_onexit cleanup
+
+# Attempt to remove the non mirrored disk.
+log_must zpool remove $TESTPOOL $DISK1
+
+# Attempt to remove one of the disks in the mirror.
+log_mustnot zpool remove $TESTPOOL $DISK2
+log_must wait_for_removal $TESTPOOL
+
+# Add back the first disk.
+log_must zpool add $TESTPOOL $DISK1
+
+# Now attempt to remove the mirror.
+log_must zpool remove $TESTPOOL mirror-1
+
+log_pass "Removal will succeed for top level vdev(s)."
diff --git a/tests/zfs-tests/tests/functional/removal/remove_mirror_sanity.ksh b/tests/zfs-tests/tests/functional/removal/remove_mirror_sanity.ksh
new file mode 100755
index 000000000000..fa3fb5de7852
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/remove_mirror_sanity.ksh
@@ -0,0 +1,58 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2017 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+DISK1=$(echo $DISKS | awk '{print $1}')
+DISK2=$(echo $DISKS | awk '{print $2}')
+DISK3=$(echo $DISKS | awk '{print $3}')
+DISKS="$DISK1 $DISK2 $DISK3"
+
+log_must default_setup_noexit "$DISK1 mirror $DISK2 $DISK3"
+log_onexit default_cleanup_noexit
+
+WORDS_FILE1="/usr/dict/words"
+WORDS_FILE2="/usr/share/dict/words"
+FILE_CONTENTS="Leeloo Dallas mul-ti-pass."
+
+if [[ -f $WORDS_FILE1 ]]; then
+ log_must cp $WORDS_FILE1 $TESTDIR
+elif [[ -f $WORDS_FILE2 ]]; then
+ log_must cp $WORDS_FILE2 $TESTDIR
+else
+ echo $FILE_CONTENTS >$TESTDIR/$TESTFILE0
+ log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+fi
+
+log_must zpool remove $TESTPOOL mirror-1
+log_must wait_for_removal $TESTPOOL
+log_mustnot vdevs_in_pool $TESTPOOL mirror-1
+
+if [[ -f $WORDS_FILE1 ]]; then
+ log_must diff $WORDS_FILE1 $TESTDIR/words
+elif [[ -f $WORDS_FILE2 ]]; then
+ log_must diff $WORDS_FILE2 $TESTDIR/words
+else
+ log_must dd if=/$TESTDIR/$TESTFILE0 of=/dev/null
+ log_must [ "x$(cat $TESTDIR/$TESTFILE0)" = "x$FILE_CONTENTS" ]
+fi
+
+log_pass "Removed top-level mirror device not in use after removal."
diff --git a/tests/zfs-tests/tests/functional/removal/remove_raidz.ksh b/tests/zfs-tests/tests/functional/removal/remove_raidz.ksh
new file mode 100755
index 000000000000..98d4536a148d
--- /dev/null
+++ b/tests/zfs-tests/tests/functional/removal/remove_raidz.ksh
@@ -0,0 +1,50 @@
+#! /bin/ksh -p
+#
+# CDDL HEADER START
+#
+# This file and its contents are supplied under the terms of the
+# Common Development and Distribution License ("CDDL"), version 1.0.
+# You may only use this file in accordance with the terms of version
+# 1.0 of the CDDL.
+#
+# A full copy of the text of the CDDL should have accompanied this
+# source. A copy of the CDDL is also available via the Internet at
+# http://www.illumos.org/license/CDDL.
+#
+# CDDL HEADER END
+#
+
+#
+# Copyright (c) 2014, 2016 by Delphix. All rights reserved.
+#
+
+. $STF_SUITE/include/libtest.shlib
+. $STF_SUITE/tests/functional/removal/removal.kshlib
+
+TMPDIR=${TMPDIR:-$TEST_BASE_DIR}
+log_must mkfile $MINVDEVSIZE $TMPDIR/dsk1
+log_must mkfile $MINVDEVSIZE $TMPDIR/dsk2
+log_must mkfile $MINVDEVSIZE $TMPDIR/dsk3
+DISKS1="$TMPDIR/dsk1"
+DISKS2="$TMPDIR/dsk2 $TMPDIR/dsk3"
+DISKS="$DISKS1 $DISKS2"
+
+function cleanup
+{
+ default_cleanup_noexit
+ log_must rm -f $DISKS
+}
+
+log_must default_setup_noexit "$DISKS1 raidz $DISKS2"
+log_onexit cleanup
+
+# Attempt to remove the non raidz disk.
+log_mustnot zpool remove $TESTPOOL $TMPDIR/dsk1
+
+# Attempt to remove one of the raidz disks.
+log_mustnot zpool remove $TESTPOOL $TMPDIR/dsk2
+
+# Attempt to remove the raidz.
+log_mustnot zpool remove $TESTPOOL raidz1-1
+
+log_pass "Removal will not succeed if there is a top level mirror."

File Metadata

Mime Type
application/octet-stream
Expires
Wed, May 15, 8:21 PM (2 d)
Storage Engine
chunks
Storage Format
Chunks
Storage Handle
DqvweOLg_TIe
Default Alt Text
(4 MB)

Event Timeline