diff --git a/include/sys/dnode.h b/include/sys/dnode.h index 9aacac3dfe30..ebede2d06e35 100644 --- a/include/sys/dnode.h +++ b/include/sys/dnode.h @@ -1,397 +1,402 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2016 by Delphix. All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. */ #ifndef _SYS_DNODE_H #define _SYS_DNODE_H #include #include #include #include #include #include #include #include #ifdef __cplusplus extern "C" { #endif /* * dnode_hold() flags. */ #define DNODE_MUST_BE_ALLOCATED 1 #define DNODE_MUST_BE_FREE 2 /* * dnode_next_offset() flags. */ #define DNODE_FIND_HOLE 1 #define DNODE_FIND_BACKWARDS 2 #define DNODE_FIND_HAVELOCK 4 /* * Fixed constants. */ #define DNODE_SHIFT 9 /* 512 bytes */ #define DN_MIN_INDBLKSHIFT 12 /* 4k */ /* * If we ever increase this value beyond 20, we need to revisit all logic that * does x << level * ebps to handle overflow. With a 1M indirect block size, * 4 levels of indirect blocks would not be able to guarantee addressing an * entire object, so 5 levels will be used, but 5 * (20 - 7) = 65. */ -#define DN_MAX_INDBLKSHIFT 14 /* 16k */ +#define DN_MAX_INDBLKSHIFT 17 /* 128k */ #define DNODE_BLOCK_SHIFT 14 /* 16k */ #define DNODE_CORE_SIZE 64 /* 64 bytes for dnode sans blkptrs */ #define DN_MAX_OBJECT_SHIFT 48 /* 256 trillion (zfs_fid_t limit) */ #define DN_MAX_OFFSET_SHIFT 64 /* 2^64 bytes in a dnode */ /* * dnode id flags * * Note: a file will never ever have its * ids moved from bonus->spill * and only in a crypto environment would it be on spill */ #define DN_ID_CHKED_BONUS 0x1 #define DN_ID_CHKED_SPILL 0x2 #define DN_ID_OLD_EXIST 0x4 #define DN_ID_NEW_EXIST 0x8 /* * Derived constants. */ #define DNODE_MIN_SIZE (1 << DNODE_SHIFT) #define DNODE_MAX_SIZE (1 << DNODE_BLOCK_SHIFT) #define DNODE_BLOCK_SIZE (1 << DNODE_BLOCK_SHIFT) #define DNODE_MIN_SLOTS (DNODE_MIN_SIZE >> DNODE_SHIFT) #define DNODE_MAX_SLOTS (DNODE_MAX_SIZE >> DNODE_SHIFT) #define DN_BONUS_SIZE(dnsize) ((dnsize) - DNODE_CORE_SIZE - \ (1 << SPA_BLKPTRSHIFT)) #define DN_SLOTS_TO_BONUSLEN(slots) DN_BONUS_SIZE((slots) << DNODE_SHIFT) #define DN_OLD_MAX_BONUSLEN (DN_BONUS_SIZE(DNODE_MIN_SIZE)) #define DN_MAX_NBLKPTR ((DNODE_MIN_SIZE - DNODE_CORE_SIZE) >> SPA_BLKPTRSHIFT) #define DN_MAX_OBJECT (1ULL << DN_MAX_OBJECT_SHIFT) #define DN_ZERO_BONUSLEN (DN_BONUS_SIZE(DNODE_MAX_SIZE) + 1) #define DN_KILL_SPILLBLK (1) #define DNODES_PER_BLOCK_SHIFT (DNODE_BLOCK_SHIFT - DNODE_SHIFT) #define DNODES_PER_BLOCK (1ULL << DNODES_PER_BLOCK_SHIFT) + +/* + * This is inaccurate if the indblkshift of the particular object is not the + * max. But it's only used by userland to calculate the zvol reservation. + */ #define DNODES_PER_LEVEL_SHIFT (DN_MAX_INDBLKSHIFT - SPA_BLKPTRSHIFT) #define DNODES_PER_LEVEL (1ULL << DNODES_PER_LEVEL_SHIFT) #define DN_MAX_LEVELS (DIV_ROUND_UP(DN_MAX_OFFSET_SHIFT - SPA_MINBLOCKSHIFT, \ DN_MIN_INDBLKSHIFT - SPA_BLKPTRSHIFT) + 1) #define DN_BONUS(dnp) ((void*)((dnp)->dn_bonus + \ (((dnp)->dn_nblkptr - 1) * sizeof (blkptr_t)))) #define DN_USED_BYTES(dnp) (((dnp)->dn_flags & DNODE_FLAG_USED_BYTES) ? \ (dnp)->dn_used : (dnp)->dn_used << SPA_MINBLOCKSHIFT) #define EPB(blkshift, typeshift) (1 << (blkshift - typeshift)) struct dmu_buf_impl; struct objset; struct zio; enum dnode_dirtycontext { DN_UNDIRTIED, DN_DIRTY_OPEN, DN_DIRTY_SYNC }; /* Is dn_used in bytes? if not, it's in multiples of SPA_MINBLOCKSIZE */ #define DNODE_FLAG_USED_BYTES (1 << 0) #define DNODE_FLAG_USERUSED_ACCOUNTED (1 << 1) /* Does dnode have a SA spill blkptr in bonus? */ #define DNODE_FLAG_SPILL_BLKPTR (1 << 2) /* User/Group dnode accounting */ #define DNODE_FLAG_USEROBJUSED_ACCOUNTED (1 << 3) typedef struct dnode_phys { uint8_t dn_type; /* dmu_object_type_t */ uint8_t dn_indblkshift; /* ln2(indirect block size) */ uint8_t dn_nlevels; /* 1=dn_blkptr->data blocks */ uint8_t dn_nblkptr; /* length of dn_blkptr */ uint8_t dn_bonustype; /* type of data in bonus buffer */ uint8_t dn_checksum; /* ZIO_CHECKSUM type */ uint8_t dn_compress; /* ZIO_COMPRESS type */ uint8_t dn_flags; /* DNODE_FLAG_* */ uint16_t dn_datablkszsec; /* data block size in 512b sectors */ uint16_t dn_bonuslen; /* length of dn_bonus */ uint8_t dn_extra_slots; /* # of subsequent slots consumed */ uint8_t dn_pad2[3]; /* accounting is protected by dn_dirty_mtx */ uint64_t dn_maxblkid; /* largest allocated block ID */ uint64_t dn_used; /* bytes (or sectors) of disk space */ uint64_t dn_pad3[4]; /* * The tail region is 448 bytes for a 512 byte dnode, and * correspondingly larger for larger dnode sizes. The spill * block pointer, when present, is always at the end of the tail * region. There are three ways this space may be used, using * a 512 byte dnode for this diagram: * * 0 64 128 192 256 320 384 448 (offset) * +---------------+---------------+---------------+-------+ * | dn_blkptr[0] | dn_blkptr[1] | dn_blkptr[2] | / | * +---------------+---------------+---------------+-------+ * | dn_blkptr[0] | dn_bonus[0..319] | * +---------------+-----------------------+---------------+ * | dn_blkptr[0] | dn_bonus[0..191] | dn_spill | * +---------------+-----------------------+---------------+ */ union { blkptr_t dn_blkptr[1+DN_OLD_MAX_BONUSLEN/sizeof (blkptr_t)]; struct { blkptr_t __dn_ignore1; uint8_t dn_bonus[DN_OLD_MAX_BONUSLEN]; }; struct { blkptr_t __dn_ignore2; uint8_t __dn_ignore3[DN_OLD_MAX_BONUSLEN - sizeof (blkptr_t)]; blkptr_t dn_spill; }; }; } dnode_phys_t; #define DN_SPILL_BLKPTR(dnp) (blkptr_t *)((char *)(dnp) + \ (((dnp)->dn_extra_slots + 1) << DNODE_SHIFT) - (1 << SPA_BLKPTRSHIFT)) struct dnode { /* * Protects the structure of the dnode, including the number of levels * of indirection (dn_nlevels), dn_maxblkid, and dn_next_* */ krwlock_t dn_struct_rwlock; /* Our link on dn_objset->os_dnodes list; protected by os_lock. */ list_node_t dn_link; /* immutable: */ struct objset *dn_objset; uint64_t dn_object; struct dmu_buf_impl *dn_dbuf; struct dnode_handle *dn_handle; dnode_phys_t *dn_phys; /* pointer into dn->dn_dbuf->db.db_data */ /* * Copies of stuff in dn_phys. They're valid in the open * context (eg. even before the dnode is first synced). * Where necessary, these are protected by dn_struct_rwlock. */ dmu_object_type_t dn_type; /* object type */ uint16_t dn_bonuslen; /* bonus length */ uint8_t dn_bonustype; /* bonus type */ uint8_t dn_nblkptr; /* number of blkptrs (immutable) */ uint8_t dn_checksum; /* ZIO_CHECKSUM type */ uint8_t dn_compress; /* ZIO_COMPRESS type */ uint8_t dn_nlevels; uint8_t dn_indblkshift; uint8_t dn_datablkshift; /* zero if blksz not power of 2! */ uint8_t dn_moved; /* Has this dnode been moved? */ uint16_t dn_datablkszsec; /* in 512b sectors */ uint32_t dn_datablksz; /* in bytes */ uint64_t dn_maxblkid; uint8_t dn_next_type[TXG_SIZE]; uint8_t dn_num_slots; /* metadnode slots consumed on disk */ uint8_t dn_next_nblkptr[TXG_SIZE]; uint8_t dn_next_nlevels[TXG_SIZE]; uint8_t dn_next_indblkshift[TXG_SIZE]; uint8_t dn_next_bonustype[TXG_SIZE]; uint8_t dn_rm_spillblk[TXG_SIZE]; /* for removing spill blk */ uint16_t dn_next_bonuslen[TXG_SIZE]; uint32_t dn_next_blksz[TXG_SIZE]; /* next block size in bytes */ /* protected by dn_dbufs_mtx; declared here to fill 32-bit hole */ uint32_t dn_dbufs_count; /* count of dn_dbufs */ /* protected by os_lock: */ list_node_t dn_dirty_link[TXG_SIZE]; /* next on dataset's dirty */ /* protected by dn_mtx: */ kmutex_t dn_mtx; list_t dn_dirty_records[TXG_SIZE]; struct range_tree *dn_free_ranges[TXG_SIZE]; uint64_t dn_allocated_txg; uint64_t dn_free_txg; uint64_t dn_assigned_txg; kcondvar_t dn_notxholds; enum dnode_dirtycontext dn_dirtyctx; uint8_t *dn_dirtyctx_firstset; /* dbg: contents meaningless */ /* protected by own devices */ refcount_t dn_tx_holds; refcount_t dn_holds; kmutex_t dn_dbufs_mtx; /* * Descendent dbufs, ordered by dbuf_compare. Note that dn_dbufs * can contain multiple dbufs of the same (level, blkid) when a * dbuf is marked DB_EVICTING without being removed from * dn_dbufs. To maintain the avl invariant that there cannot be * duplicate entries, we order the dbufs by an arbitrary value - * their address in memory. This means that dn_dbufs cannot be used to * directly look up a dbuf. Instead, callers must use avl_walk, have * a reference to the dbuf, or look up a non-existent node with * db_state = DB_SEARCH (see dbuf_free_range for an example). */ avl_tree_t dn_dbufs; /* protected by dn_struct_rwlock */ struct dmu_buf_impl *dn_bonus; /* bonus buffer dbuf */ boolean_t dn_have_spill; /* have spill or are spilling */ /* parent IO for current sync write */ zio_t *dn_zio; /* used in syncing context */ uint64_t dn_oldused; /* old phys used bytes */ uint64_t dn_oldflags; /* old phys dn_flags */ uint64_t dn_olduid, dn_oldgid; uint64_t dn_newuid, dn_newgid; int dn_id_flags; /* holds prefetch structure */ struct zfetch dn_zfetch; }; /* * Adds a level of indirection between the dbuf and the dnode to avoid * iterating descendent dbufs in dnode_move(). Handles are not allocated * individually, but as an array of child dnodes in dnode_hold_impl(). */ typedef struct dnode_handle { /* Protects dnh_dnode from modification by dnode_move(). */ zrlock_t dnh_zrlock; dnode_t *dnh_dnode; } dnode_handle_t; typedef struct dnode_children { dmu_buf_user_t dnc_dbu; /* User evict data */ size_t dnc_count; /* number of children */ dnode_handle_t dnc_children[]; /* sized dynamically */ } dnode_children_t; typedef struct free_range { avl_node_t fr_node; uint64_t fr_blkid; uint64_t fr_nblks; } free_range_t; void dnode_special_open(struct objset *dd, dnode_phys_t *dnp, uint64_t object, dnode_handle_t *dnh); void dnode_special_close(dnode_handle_t *dnh); void dnode_setbonuslen(dnode_t *dn, int newsize, dmu_tx_t *tx); void dnode_setbonus_type(dnode_t *dn, dmu_object_type_t, dmu_tx_t *tx); void dnode_rm_spill(dnode_t *dn, dmu_tx_t *tx); int dnode_hold(struct objset *dd, uint64_t object, void *ref, dnode_t **dnp); int dnode_hold_impl(struct objset *dd, uint64_t object, int flag, int dn_slots, void *ref, dnode_t **dnp); boolean_t dnode_add_ref(dnode_t *dn, void *ref); void dnode_rele(dnode_t *dn, void *ref); void dnode_rele_and_unlock(dnode_t *dn, void *tag); void dnode_setdirty(dnode_t *dn, dmu_tx_t *tx); void dnode_sync(dnode_t *dn, dmu_tx_t *tx); void dnode_allocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, int ibs, dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx); void dnode_reallocate(dnode_t *dn, dmu_object_type_t ot, int blocksize, dmu_object_type_t bonustype, int bonuslen, int dn_slots, dmu_tx_t *tx); void dnode_free(dnode_t *dn, dmu_tx_t *tx); void dnode_byteswap(dnode_phys_t *dnp); void dnode_buf_byteswap(void *buf, size_t size); void dnode_verify(dnode_t *dn); int dnode_set_blksz(dnode_t *dn, uint64_t size, int ibs, dmu_tx_t *tx); void dnode_free_range(dnode_t *dn, uint64_t off, uint64_t len, dmu_tx_t *tx); void dnode_diduse_space(dnode_t *dn, int64_t space); void dnode_willuse_space(dnode_t *dn, int64_t space, dmu_tx_t *tx); void dnode_new_blkid(dnode_t *dn, uint64_t blkid, dmu_tx_t *tx, boolean_t); uint64_t dnode_block_freed(dnode_t *dn, uint64_t blkid); void dnode_init(void); void dnode_fini(void); int dnode_next_offset(dnode_t *dn, int flags, uint64_t *off, int minlvl, uint64_t blkfill, uint64_t txg); void dnode_evict_dbufs(dnode_t *dn); void dnode_evict_bonus(dnode_t *dn); #define DNODE_IS_CACHEABLE(_dn) \ ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \ (DMU_OT_IS_METADATA((_dn)->dn_type) && \ (_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA)) #define DNODE_META_IS_CACHEABLE(_dn) \ ((_dn)->dn_objset->os_primary_cache == ZFS_CACHE_ALL || \ (_dn)->dn_objset->os_primary_cache == ZFS_CACHE_METADATA) #ifdef ZFS_DEBUG /* * There should be a ## between the string literal and fmt, to make it * clear that we're joining two strings together, but that piece of shit * gcc doesn't support that preprocessor token. */ #define dprintf_dnode(dn, fmt, ...) do { \ if (zfs_flags & ZFS_DEBUG_DPRINTF) { \ char __db_buf[32]; \ uint64_t __db_obj = (dn)->dn_object; \ if (__db_obj == DMU_META_DNODE_OBJECT) \ (void) strcpy(__db_buf, "mdn"); \ else \ (void) snprintf(__db_buf, sizeof (__db_buf), "%lld", \ (u_longlong_t)__db_obj);\ dprintf_ds((dn)->dn_objset->os_dsl_dataset, "obj=%s " fmt, \ __db_buf, __VA_ARGS__); \ } \ _NOTE(CONSTCOND) } while (0) #define DNODE_VERIFY(dn) dnode_verify(dn) #define FREE_VERIFY(db, start, end, tx) free_verify(db, start, end, tx) #else #define dprintf_dnode(db, fmt, ...) #define DNODE_VERIFY(dn) #define FREE_VERIFY(db, start, end, tx) #endif #ifdef __cplusplus } #endif #endif /* _SYS_DNODE_H */ diff --git a/module/zfs/dmu_objset.c b/module/zfs/dmu_objset.c index 53b8a759a0dd..c83ca1b1adaa 100644 --- a/module/zfs/dmu_objset.c +++ b/module/zfs/dmu_objset.c @@ -1,2380 +1,2386 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2012, 2016 by Delphix. All rights reserved. * Copyright (c) 2013 by Saso Kiselkov. All rights reserved. * Copyright (c) 2013, Joyent, Inc. All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright (c) 2015, STRATO AG, Inc. All rights reserved. * Copyright (c) 2016 Actifio, Inc. All rights reserved. * Copyright 2017 Nexenta Systems, Inc. */ /* Portions Copyright 2010 Robert Milkowski */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include /* * Needed to close a window in dnode_move() that allows the objset to be freed * before it can be safely accessed. */ krwlock_t os_lock; /* * Tunable to overwrite the maximum number of threads for the parallelization * of dmu_objset_find_dp, needed to speed up the import of pools with many * datasets. * Default is 4 times the number of leaf vdevs. */ int dmu_find_threads = 0; /* * Backfill lower metadnode objects after this many have been freed. * Backfilling negatively impacts object creation rates, so only do it * if there are enough holes to fill. */ int dmu_rescan_dnode_threshold = 1 << DN_MAX_INDBLKSHIFT; static void dmu_objset_find_dp_cb(void *arg); static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb); static void dmu_objset_upgrade_stop(objset_t *os); void dmu_objset_init(void) { rw_init(&os_lock, NULL, RW_DEFAULT, NULL); } void dmu_objset_fini(void) { rw_destroy(&os_lock); } spa_t * dmu_objset_spa(objset_t *os) { return (os->os_spa); } zilog_t * dmu_objset_zil(objset_t *os) { return (os->os_zil); } dsl_pool_t * dmu_objset_pool(objset_t *os) { dsl_dataset_t *ds; if ((ds = os->os_dsl_dataset) != NULL && ds->ds_dir) return (ds->ds_dir->dd_pool); else return (spa_get_dsl(os->os_spa)); } dsl_dataset_t * dmu_objset_ds(objset_t *os) { return (os->os_dsl_dataset); } dmu_objset_type_t dmu_objset_type(objset_t *os) { return (os->os_phys->os_type); } void dmu_objset_name(objset_t *os, char *buf) { dsl_dataset_name(os->os_dsl_dataset, buf); } uint64_t dmu_objset_id(objset_t *os) { dsl_dataset_t *ds = os->os_dsl_dataset; return (ds ? ds->ds_object : 0); } uint64_t dmu_objset_dnodesize(objset_t *os) { return (os->os_dnodesize); } zfs_sync_type_t dmu_objset_syncprop(objset_t *os) { return (os->os_sync); } zfs_logbias_op_t dmu_objset_logbias(objset_t *os) { return (os->os_logbias); } static void checksum_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance should have been done by now. */ ASSERT(newval != ZIO_CHECKSUM_INHERIT); os->os_checksum = zio_checksum_select(newval, ZIO_CHECKSUM_ON_VALUE); } static void compression_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval != ZIO_COMPRESS_INHERIT); os->os_compress = zio_compress_select(os->os_spa, newval, ZIO_COMPRESS_ON); } static void copies_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval > 0); ASSERT(newval <= spa_max_replication(os->os_spa)); os->os_copies = newval; } static void dedup_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; spa_t *spa = os->os_spa; enum zio_checksum checksum; /* * Inheritance should have been done by now. */ ASSERT(newval != ZIO_CHECKSUM_INHERIT); checksum = zio_checksum_dedup_select(spa, newval, ZIO_CHECKSUM_OFF); os->os_dedup_checksum = checksum & ZIO_CHECKSUM_MASK; os->os_dedup_verify = !!(checksum & ZIO_CHECKSUM_VERIFY); } static void primary_cache_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || newval == ZFS_CACHE_METADATA); os->os_primary_cache = newval; } static void secondary_cache_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_CACHE_ALL || newval == ZFS_CACHE_NONE || newval == ZFS_CACHE_METADATA); os->os_secondary_cache = newval; } static void sync_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_SYNC_STANDARD || newval == ZFS_SYNC_ALWAYS || newval == ZFS_SYNC_DISABLED); os->os_sync = newval; if (os->os_zil) zil_set_sync(os->os_zil, newval); } static void redundant_metadata_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; /* * Inheritance and range checking should have been done by now. */ ASSERT(newval == ZFS_REDUNDANT_METADATA_ALL || newval == ZFS_REDUNDANT_METADATA_MOST); os->os_redundant_metadata = newval; } static void dnodesize_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; switch (newval) { case ZFS_DNSIZE_LEGACY: os->os_dnodesize = DNODE_MIN_SIZE; break; case ZFS_DNSIZE_AUTO: /* * Choose a dnode size that will work well for most * workloads if the user specified "auto". Future code * improvements could dynamically select a dnode size * based on observed workload patterns. */ os->os_dnodesize = DNODE_MIN_SIZE * 2; break; case ZFS_DNSIZE_1K: case ZFS_DNSIZE_2K: case ZFS_DNSIZE_4K: case ZFS_DNSIZE_8K: case ZFS_DNSIZE_16K: os->os_dnodesize = newval; break; } } static void logbias_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; ASSERT(newval == ZFS_LOGBIAS_LATENCY || newval == ZFS_LOGBIAS_THROUGHPUT); os->os_logbias = newval; if (os->os_zil) zil_set_logbias(os->os_zil, newval); } static void recordsize_changed_cb(void *arg, uint64_t newval) { objset_t *os = arg; os->os_recordsize = newval; } void dmu_objset_byteswap(void *buf, size_t size) { objset_phys_t *osp = buf; ASSERT(size == OBJSET_OLD_PHYS_SIZE || size == sizeof (objset_phys_t)); dnode_byteswap(&osp->os_meta_dnode); byteswap_uint64_array(&osp->os_zil_header, sizeof (zil_header_t)); osp->os_type = BSWAP_64(osp->os_type); osp->os_flags = BSWAP_64(osp->os_flags); if (size == sizeof (objset_phys_t)) { dnode_byteswap(&osp->os_userused_dnode); dnode_byteswap(&osp->os_groupused_dnode); } } int dmu_objset_open_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, objset_t **osp) { objset_t *os; int i, err; ASSERT(ds == NULL || MUTEX_HELD(&ds->ds_opening_lock)); os = kmem_zalloc(sizeof (objset_t), KM_SLEEP); os->os_dsl_dataset = ds; os->os_spa = spa; os->os_rootbp = bp; if (!BP_IS_HOLE(os->os_rootbp)) { arc_flags_t aflags = ARC_FLAG_WAIT; zbookmark_phys_t zb; SET_BOOKMARK(&zb, ds ? ds->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); if (DMU_OS_IS_L2CACHEABLE(os)) aflags |= ARC_FLAG_L2CACHE; dprintf_bp(os->os_rootbp, "reading %s", ""); err = arc_read(NULL, spa, os->os_rootbp, arc_getbuf_func, &os->os_phys_buf, ZIO_PRIORITY_SYNC_READ, ZIO_FLAG_CANFAIL, &aflags, &zb); if (err != 0) { kmem_free(os, sizeof (objset_t)); /* convert checksum errors into IO errors */ if (err == ECKSUM) err = SET_ERROR(EIO); return (err); } /* Increase the blocksize if we are permitted. */ if (spa_version(spa) >= SPA_VERSION_USERSPACE && arc_buf_size(os->os_phys_buf) < sizeof (objset_phys_t)) { arc_buf_t *buf = arc_alloc_buf(spa, &os->os_phys_buf, ARC_BUFC_METADATA, sizeof (objset_phys_t)); bzero(buf->b_data, sizeof (objset_phys_t)); bcopy(os->os_phys_buf->b_data, buf->b_data, arc_buf_size(os->os_phys_buf)); arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); os->os_phys_buf = buf; } os->os_phys = os->os_phys_buf->b_data; os->os_flags = os->os_phys->os_flags; } else { int size = spa_version(spa) >= SPA_VERSION_USERSPACE ? sizeof (objset_phys_t) : OBJSET_OLD_PHYS_SIZE; os->os_phys_buf = arc_alloc_buf(spa, &os->os_phys_buf, ARC_BUFC_METADATA, size); os->os_phys = os->os_phys_buf->b_data; bzero(os->os_phys, size); } /* * Note: the changed_cb will be called once before the register * func returns, thus changing the checksum/compression from the * default (fletcher2/off). Snapshots don't need to know about * checksum/compression/copies. */ if (ds != NULL) { boolean_t needlock = B_FALSE; /* * Note: it's valid to open the objset if the dataset is * long-held, in which case the pool_config lock will not * be held. */ if (!dsl_pool_config_held(dmu_objset_pool(os))) { needlock = B_TRUE; dsl_pool_config_enter(dmu_objset_pool(os), FTAG); } err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_PRIMARYCACHE), primary_cache_changed_cb, os); if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SECONDARYCACHE), secondary_cache_changed_cb, os); } if (!ds->ds_is_snapshot) { if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_CHECKSUM), checksum_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COMPRESSION), compression_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_COPIES), copies_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DEDUP), dedup_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_LOGBIAS), logbias_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_SYNC), sync_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name( ZFS_PROP_REDUNDANT_METADATA), redundant_metadata_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_RECORDSIZE), recordsize_changed_cb, os); } if (err == 0) { err = dsl_prop_register(ds, zfs_prop_to_name(ZFS_PROP_DNODESIZE), dnodesize_changed_cb, os); } } if (needlock) dsl_pool_config_exit(dmu_objset_pool(os), FTAG); if (err != 0) { arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); kmem_free(os, sizeof (objset_t)); return (err); } } else { /* It's the meta-objset. */ os->os_checksum = ZIO_CHECKSUM_FLETCHER_4; os->os_compress = ZIO_COMPRESS_ON; os->os_copies = spa_max_replication(spa); os->os_dedup_checksum = ZIO_CHECKSUM_OFF; os->os_dedup_verify = B_FALSE; os->os_logbias = ZFS_LOGBIAS_LATENCY; os->os_sync = ZFS_SYNC_STANDARD; os->os_primary_cache = ZFS_CACHE_ALL; os->os_secondary_cache = ZFS_CACHE_ALL; os->os_dnodesize = DNODE_MIN_SIZE; } if (ds == NULL || !ds->ds_is_snapshot) os->os_zil_header = os->os_phys->os_zil_header; os->os_zil = zil_alloc(os, &os->os_zil_header); for (i = 0; i < TXG_SIZE; i++) { list_create(&os->os_dirty_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); list_create(&os->os_free_dnodes[i], sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[i])); } list_create(&os->os_dnodes, sizeof (dnode_t), offsetof(dnode_t, dn_link)); list_create(&os->os_downgraded_dbufs, sizeof (dmu_buf_impl_t), offsetof(dmu_buf_impl_t, db_link)); list_link_init(&os->os_evicting_node); mutex_init(&os->os_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_obj_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&os->os_user_ptr_lock, NULL, MUTEX_DEFAULT, NULL); dnode_special_open(os, &os->os_phys->os_meta_dnode, DMU_META_DNODE_OBJECT, &os->os_meta_dnode); if (arc_buf_size(os->os_phys_buf) >= sizeof (objset_phys_t)) { dnode_special_open(os, &os->os_phys->os_userused_dnode, DMU_USERUSED_OBJECT, &os->os_userused_dnode); dnode_special_open(os, &os->os_phys->os_groupused_dnode, DMU_GROUPUSED_OBJECT, &os->os_groupused_dnode); } mutex_init(&os->os_upgrade_lock, NULL, MUTEX_DEFAULT, NULL); *osp = os; return (0); } int dmu_objset_from_ds(dsl_dataset_t *ds, objset_t **osp) { int err = 0; /* * We shouldn't be doing anything with dsl_dataset_t's unless the * pool_config lock is held, or the dataset is long-held. */ ASSERT(dsl_pool_config_held(ds->ds_dir->dd_pool) || dsl_dataset_long_held(ds)); mutex_enter(&ds->ds_opening_lock); if (ds->ds_objset == NULL) { objset_t *os; rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); err = dmu_objset_open_impl(dsl_dataset_get_spa(ds), ds, dsl_dataset_get_blkptr(ds), &os); rrw_exit(&ds->ds_bp_rwlock, FTAG); if (err == 0) { mutex_enter(&ds->ds_lock); ASSERT(ds->ds_objset == NULL); ds->ds_objset = os; mutex_exit(&ds->ds_lock); } } *osp = ds->ds_objset; mutex_exit(&ds->ds_opening_lock); return (err); } /* * Holds the pool while the objset is held. Therefore only one objset * can be held at a time. */ int dmu_objset_hold(const char *name, void *tag, objset_t **osp) { dsl_pool_t *dp; dsl_dataset_t *ds; int err; err = dsl_pool_hold(name, tag, &dp); if (err != 0) return (err); err = dsl_dataset_hold(dp, name, tag, &ds); if (err != 0) { dsl_pool_rele(dp, tag); return (err); } err = dmu_objset_from_ds(ds, osp); if (err != 0) { dsl_dataset_rele(ds, tag); dsl_pool_rele(dp, tag); } return (err); } static int dmu_objset_own_impl(dsl_dataset_t *ds, dmu_objset_type_t type, boolean_t readonly, void *tag, objset_t **osp) { int err; err = dmu_objset_from_ds(ds, osp); if (err != 0) { dsl_dataset_disown(ds, tag); } else if (type != DMU_OST_ANY && type != (*osp)->os_phys->os_type) { dsl_dataset_disown(ds, tag); return (SET_ERROR(EINVAL)); } else if (!readonly && dsl_dataset_is_snapshot(ds)) { dsl_dataset_disown(ds, tag); return (SET_ERROR(EROFS)); } return (err); } /* * dsl_pool must not be held when this is called. * Upon successful return, there will be a longhold on the dataset, * and the dsl_pool will not be held. */ int dmu_objset_own(const char *name, dmu_objset_type_t type, boolean_t readonly, void *tag, objset_t **osp) { dsl_pool_t *dp; dsl_dataset_t *ds; int err; err = dsl_pool_hold(name, FTAG, &dp); if (err != 0) return (err); err = dsl_dataset_own(dp, name, tag, &ds); if (err != 0) { dsl_pool_rele(dp, FTAG); return (err); } err = dmu_objset_own_impl(ds, type, readonly, tag, osp); dsl_pool_rele(dp, FTAG); if (err == 0 && dmu_objset_userobjspace_upgradable(*osp)) dmu_objset_userobjspace_upgrade(*osp); return (err); } int dmu_objset_own_obj(dsl_pool_t *dp, uint64_t obj, dmu_objset_type_t type, boolean_t readonly, void *tag, objset_t **osp) { dsl_dataset_t *ds; int err; err = dsl_dataset_own_obj(dp, obj, tag, &ds); if (err != 0) return (err); return (dmu_objset_own_impl(ds, type, readonly, tag, osp)); } void dmu_objset_rele(objset_t *os, void *tag) { dsl_pool_t *dp = dmu_objset_pool(os); dsl_dataset_rele(os->os_dsl_dataset, tag); dsl_pool_rele(dp, tag); } /* * When we are called, os MUST refer to an objset associated with a dataset * that is owned by 'tag'; that is, is held and long held by 'tag' and ds_owner * == tag. We will then release and reacquire ownership of the dataset while * holding the pool config_rwlock to avoid intervening namespace or ownership * changes may occur. * * This exists solely to accommodate zfs_ioc_userspace_upgrade()'s desire to * release the hold on its dataset and acquire a new one on the dataset of the * same name so that it can be partially torn down and reconstructed. */ void dmu_objset_refresh_ownership(objset_t *os, void *tag) { dsl_pool_t *dp; dsl_dataset_t *ds, *newds; char name[ZFS_MAX_DATASET_NAME_LEN]; ds = os->os_dsl_dataset; VERIFY3P(ds, !=, NULL); VERIFY3P(ds->ds_owner, ==, tag); VERIFY(dsl_dataset_long_held(ds)); dsl_dataset_name(ds, name); dp = dmu_objset_pool(os); dsl_pool_config_enter(dp, FTAG); dmu_objset_disown(os, tag); VERIFY0(dsl_dataset_own(dp, name, tag, &newds)); VERIFY3P(newds, ==, os->os_dsl_dataset); dsl_pool_config_exit(dp, FTAG); } void dmu_objset_disown(objset_t *os, void *tag) { /* * Stop upgrading thread */ dmu_objset_upgrade_stop(os); dsl_dataset_disown(os->os_dsl_dataset, tag); } void dmu_objset_evict_dbufs(objset_t *os) { dnode_t *dn_marker; dnode_t *dn; dn_marker = kmem_alloc(sizeof (dnode_t), KM_SLEEP); mutex_enter(&os->os_lock); dn = list_head(&os->os_dnodes); while (dn != NULL) { /* * Skip dnodes without holds. We have to do this dance * because dnode_add_ref() only works if there is already a * hold. If the dnode has no holds, then it has no dbufs. */ if (dnode_add_ref(dn, FTAG)) { list_insert_after(&os->os_dnodes, dn, dn_marker); mutex_exit(&os->os_lock); dnode_evict_dbufs(dn); dnode_rele(dn, FTAG); mutex_enter(&os->os_lock); dn = list_next(&os->os_dnodes, dn_marker); list_remove(&os->os_dnodes, dn_marker); } else { dn = list_next(&os->os_dnodes, dn); } } mutex_exit(&os->os_lock); kmem_free(dn_marker, sizeof (dnode_t)); if (DMU_USERUSED_DNODE(os) != NULL) { dnode_evict_dbufs(DMU_GROUPUSED_DNODE(os)); dnode_evict_dbufs(DMU_USERUSED_DNODE(os)); } dnode_evict_dbufs(DMU_META_DNODE(os)); } /* * Objset eviction processing is split into into two pieces. * The first marks the objset as evicting, evicts any dbufs that * have a refcount of zero, and then queues up the objset for the * second phase of eviction. Once os->os_dnodes has been cleared by * dnode_buf_pageout()->dnode_destroy(), the second phase is executed. * The second phase closes the special dnodes, dequeues the objset from * the list of those undergoing eviction, and finally frees the objset. * * NOTE: Due to asynchronous eviction processing (invocation of * dnode_buf_pageout()), it is possible for the meta dnode for the * objset to have no holds even though os->os_dnodes is not empty. */ void dmu_objset_evict(objset_t *os) { int t; dsl_dataset_t *ds = os->os_dsl_dataset; for (t = 0; t < TXG_SIZE; t++) ASSERT(!dmu_objset_is_dirty(os, t)); if (ds) dsl_prop_unregister_all(ds, os); if (os->os_sa) sa_tear_down(os); dmu_objset_evict_dbufs(os); mutex_enter(&os->os_lock); spa_evicting_os_register(os->os_spa, os); if (list_is_empty(&os->os_dnodes)) { mutex_exit(&os->os_lock); dmu_objset_evict_done(os); } else { mutex_exit(&os->os_lock); } } void dmu_objset_evict_done(objset_t *os) { ASSERT3P(list_head(&os->os_dnodes), ==, NULL); dnode_special_close(&os->os_meta_dnode); if (DMU_USERUSED_DNODE(os)) { dnode_special_close(&os->os_userused_dnode); dnode_special_close(&os->os_groupused_dnode); } zil_free(os->os_zil); arc_buf_destroy(os->os_phys_buf, &os->os_phys_buf); /* * This is a barrier to prevent the objset from going away in * dnode_move() until we can safely ensure that the objset is still in * use. We consider the objset valid before the barrier and invalid * after the barrier. */ rw_enter(&os_lock, RW_READER); rw_exit(&os_lock); mutex_destroy(&os->os_lock); mutex_destroy(&os->os_obj_lock); mutex_destroy(&os->os_user_ptr_lock); spa_evicting_os_deregister(os->os_spa, os); kmem_free(os, sizeof (objset_t)); } timestruc_t dmu_objset_snap_cmtime(objset_t *os) { return (dsl_dir_snap_cmtime(os->os_dsl_dataset->ds_dir)); } /* called from dsl for meta-objset */ objset_t * dmu_objset_create_impl(spa_t *spa, dsl_dataset_t *ds, blkptr_t *bp, dmu_objset_type_t type, dmu_tx_t *tx) { objset_t *os; dnode_t *mdn; ASSERT(dmu_tx_is_syncing(tx)); if (ds != NULL) VERIFY0(dmu_objset_from_ds(ds, &os)); else VERIFY0(dmu_objset_open_impl(spa, NULL, bp, &os)); mdn = DMU_META_DNODE(os); dnode_allocate(mdn, DMU_OT_DNODE, DNODE_BLOCK_SIZE, DN_MAX_INDBLKSHIFT, DMU_OT_NONE, 0, DNODE_MIN_SLOTS, tx); /* * We don't want to have to increase the meta-dnode's nlevels * later, because then we could do it in quescing context while * we are also accessing it in open context. * * This precaution is not necessary for the MOS (ds == NULL), * because the MOS is only updated in syncing context. * This is most fortunate: the MOS is the only objset that * needs to be synced multiple times as spa_sync() iterates * to convergence, so minimizing its dn_nlevels matters. */ if (ds != NULL) { int levels = 1; /* * Determine the number of levels necessary for the meta-dnode - * to contain DN_MAX_OBJECT dnodes. + * to contain DN_MAX_OBJECT dnodes. Note that in order to + * ensure that we do not overflow 64 bits, there has to be + * a nlevels that gives us a number of blocks > DN_MAX_OBJECT + * but < 2^64. Therefore, + * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT) (10) must be + * less than (64 - log2(DN_MAX_OBJECT)) (16). */ - while ((uint64_t)mdn->dn_nblkptr << (mdn->dn_datablkshift + + while ((uint64_t)mdn->dn_nblkptr << + (mdn->dn_datablkshift - DNODE_SHIFT + (levels - 1) * (mdn->dn_indblkshift - SPA_BLKPTRSHIFT)) < - DN_MAX_OBJECT * sizeof (dnode_phys_t)) + DN_MAX_OBJECT) levels++; mdn->dn_next_nlevels[tx->tx_txg & TXG_MASK] = mdn->dn_nlevels = levels; } ASSERT(type != DMU_OST_NONE); ASSERT(type != DMU_OST_ANY); ASSERT(type < DMU_OST_NUMTYPES); os->os_phys->os_type = type; if (dmu_objset_userused_enabled(os)) { os->os_phys->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; if (dmu_objset_userobjused_enabled(os)) { ds->ds_feature_activation_needed[ SPA_FEATURE_USEROBJ_ACCOUNTING] = B_TRUE; os->os_phys->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE; } os->os_flags = os->os_phys->os_flags; } dsl_dataset_dirty(ds, tx); return (os); } typedef struct dmu_objset_create_arg { const char *doca_name; cred_t *doca_cred; void (*doca_userfunc)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx); void *doca_userarg; dmu_objset_type_t doca_type; uint64_t doca_flags; } dmu_objset_create_arg_t; /*ARGSUSED*/ static int dmu_objset_create_check(void *arg, dmu_tx_t *tx) { dmu_objset_create_arg_t *doca = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dir_t *pdd; const char *tail; int error; if (strchr(doca->doca_name, '@') != NULL) return (SET_ERROR(EINVAL)); if (strlen(doca->doca_name) >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); error = dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail); if (error != 0) return (error); if (tail == NULL) { dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EEXIST)); } error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, doca->doca_cred); dsl_dir_rele(pdd, FTAG); return (error); } static void dmu_objset_create_sync(void *arg, dmu_tx_t *tx) { dmu_objset_create_arg_t *doca = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dir_t *pdd; const char *tail; dsl_dataset_t *ds; uint64_t obj; blkptr_t *bp; objset_t *os; VERIFY0(dsl_dir_hold(dp, doca->doca_name, FTAG, &pdd, &tail)); obj = dsl_dataset_create_sync(pdd, tail, NULL, doca->doca_flags, doca->doca_cred, tx); VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); rrw_enter(&ds->ds_bp_rwlock, RW_READER, FTAG); bp = dsl_dataset_get_blkptr(ds); os = dmu_objset_create_impl(pdd->dd_pool->dp_spa, ds, bp, doca->doca_type, tx); rrw_exit(&ds->ds_bp_rwlock, FTAG); if (doca->doca_userfunc != NULL) { doca->doca_userfunc(os, doca->doca_userarg, doca->doca_cred, tx); } spa_history_log_internal_ds(ds, "create", tx, ""); zvol_create_minors(dp->dp_spa, doca->doca_name, B_TRUE); dsl_dataset_rele(ds, FTAG); dsl_dir_rele(pdd, FTAG); } int dmu_objset_create(const char *name, dmu_objset_type_t type, uint64_t flags, void (*func)(objset_t *os, void *arg, cred_t *cr, dmu_tx_t *tx), void *arg) { dmu_objset_create_arg_t doca; doca.doca_name = name; doca.doca_cred = CRED(); doca.doca_flags = flags; doca.doca_userfunc = func; doca.doca_userarg = arg; doca.doca_type = type; return (dsl_sync_task(name, dmu_objset_create_check, dmu_objset_create_sync, &doca, 5, ZFS_SPACE_CHECK_NORMAL)); } typedef struct dmu_objset_clone_arg { const char *doca_clone; const char *doca_origin; cred_t *doca_cred; } dmu_objset_clone_arg_t; /*ARGSUSED*/ static int dmu_objset_clone_check(void *arg, dmu_tx_t *tx) { dmu_objset_clone_arg_t *doca = arg; dsl_dir_t *pdd; const char *tail; int error; dsl_dataset_t *origin; dsl_pool_t *dp = dmu_tx_pool(tx); if (strchr(doca->doca_clone, '@') != NULL) return (SET_ERROR(EINVAL)); if (strlen(doca->doca_clone) >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); error = dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail); if (error != 0) return (error); if (tail == NULL) { dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EEXIST)); } error = dsl_fs_ss_limit_check(pdd, 1, ZFS_PROP_FILESYSTEM_LIMIT, NULL, doca->doca_cred); if (error != 0) { dsl_dir_rele(pdd, FTAG); return (SET_ERROR(EDQUOT)); } dsl_dir_rele(pdd, FTAG); error = dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin); if (error != 0) return (error); /* You can only clone snapshots, not the head datasets. */ if (!origin->ds_is_snapshot) { dsl_dataset_rele(origin, FTAG); return (SET_ERROR(EINVAL)); } dsl_dataset_rele(origin, FTAG); return (0); } static void dmu_objset_clone_sync(void *arg, dmu_tx_t *tx) { dmu_objset_clone_arg_t *doca = arg; dsl_pool_t *dp = dmu_tx_pool(tx); dsl_dir_t *pdd; const char *tail; dsl_dataset_t *origin, *ds; uint64_t obj; char namebuf[ZFS_MAX_DATASET_NAME_LEN]; VERIFY0(dsl_dir_hold(dp, doca->doca_clone, FTAG, &pdd, &tail)); VERIFY0(dsl_dataset_hold(dp, doca->doca_origin, FTAG, &origin)); obj = dsl_dataset_create_sync(pdd, tail, origin, 0, doca->doca_cred, tx); VERIFY0(dsl_dataset_hold_obj(pdd->dd_pool, obj, FTAG, &ds)); dsl_dataset_name(origin, namebuf); spa_history_log_internal_ds(ds, "clone", tx, "origin=%s (%llu)", namebuf, origin->ds_object); zvol_create_minors(dp->dp_spa, doca->doca_clone, B_TRUE); dsl_dataset_rele(ds, FTAG); dsl_dataset_rele(origin, FTAG); dsl_dir_rele(pdd, FTAG); } int dmu_objset_clone(const char *clone, const char *origin) { dmu_objset_clone_arg_t doca; doca.doca_clone = clone; doca.doca_origin = origin; doca.doca_cred = CRED(); return (dsl_sync_task(clone, dmu_objset_clone_check, dmu_objset_clone_sync, &doca, 5, ZFS_SPACE_CHECK_NORMAL)); } int dmu_objset_snapshot_one(const char *fsname, const char *snapname) { int err; char *longsnap = kmem_asprintf("%s@%s", fsname, snapname); nvlist_t *snaps = fnvlist_alloc(); fnvlist_add_boolean(snaps, longsnap); strfree(longsnap); err = dsl_dataset_snapshot(snaps, NULL, NULL); fnvlist_free(snaps); return (err); } static void dmu_objset_upgrade_task_cb(void *data) { objset_t *os = data; mutex_enter(&os->os_upgrade_lock); os->os_upgrade_status = EINTR; if (!os->os_upgrade_exit) { mutex_exit(&os->os_upgrade_lock); os->os_upgrade_status = os->os_upgrade_cb(os); mutex_enter(&os->os_upgrade_lock); } os->os_upgrade_exit = B_TRUE; os->os_upgrade_id = 0; mutex_exit(&os->os_upgrade_lock); } static void dmu_objset_upgrade(objset_t *os, dmu_objset_upgrade_cb_t cb) { if (os->os_upgrade_id != 0) return; mutex_enter(&os->os_upgrade_lock); if (os->os_upgrade_id == 0 && os->os_upgrade_status == 0) { os->os_upgrade_exit = B_FALSE; os->os_upgrade_cb = cb; os->os_upgrade_id = taskq_dispatch( os->os_spa->spa_upgrade_taskq, dmu_objset_upgrade_task_cb, os, TQ_SLEEP); if (os->os_upgrade_id == TASKQID_INVALID) os->os_upgrade_status = ENOMEM; } mutex_exit(&os->os_upgrade_lock); } static void dmu_objset_upgrade_stop(objset_t *os) { mutex_enter(&os->os_upgrade_lock); os->os_upgrade_exit = B_TRUE; if (os->os_upgrade_id != 0) { taskqid_t id = os->os_upgrade_id; os->os_upgrade_id = 0; mutex_exit(&os->os_upgrade_lock); taskq_cancel_id(os->os_spa->spa_upgrade_taskq, id); } else { mutex_exit(&os->os_upgrade_lock); } } static void dmu_objset_sync_dnodes(list_t *list, list_t *newlist, dmu_tx_t *tx) { dnode_t *dn; while ((dn = list_head(list))) { ASSERT(dn->dn_object != DMU_META_DNODE_OBJECT); ASSERT(dn->dn_dbuf->db_data_pending); /* * Initialize dn_zio outside dnode_sync() because the * meta-dnode needs to set it ouside dnode_sync(). */ dn->dn_zio = dn->dn_dbuf->db_data_pending->dr_zio; ASSERT(dn->dn_zio); ASSERT3U(dn->dn_nlevels, <=, DN_MAX_LEVELS); list_remove(list, dn); if (newlist) { (void) dnode_add_ref(dn, newlist); list_insert_tail(newlist, dn); } dnode_sync(dn, tx); } } /* ARGSUSED */ static void dmu_objset_write_ready(zio_t *zio, arc_buf_t *abuf, void *arg) { int i; blkptr_t *bp = zio->io_bp; objset_t *os = arg; dnode_phys_t *dnp = &os->os_phys->os_meta_dnode; ASSERT(!BP_IS_EMBEDDED(bp)); ASSERT3U(BP_GET_TYPE(bp), ==, DMU_OT_OBJSET); ASSERT0(BP_GET_LEVEL(bp)); /* * Update rootbp fill count: it should be the number of objects * allocated in the object set (not counting the "special" * objects that are stored in the objset_phys_t -- the meta * dnode and user/group accounting objects). */ bp->blk_fill = 0; for (i = 0; i < dnp->dn_nblkptr; i++) bp->blk_fill += BP_GET_FILL(&dnp->dn_blkptr[i]); if (os->os_dsl_dataset != NULL) rrw_enter(&os->os_dsl_dataset->ds_bp_rwlock, RW_WRITER, FTAG); *os->os_rootbp = *bp; if (os->os_dsl_dataset != NULL) rrw_exit(&os->os_dsl_dataset->ds_bp_rwlock, FTAG); } /* ARGSUSED */ static void dmu_objset_write_done(zio_t *zio, arc_buf_t *abuf, void *arg) { blkptr_t *bp = zio->io_bp; blkptr_t *bp_orig = &zio->io_bp_orig; objset_t *os = arg; if (zio->io_flags & ZIO_FLAG_IO_REWRITE) { ASSERT(BP_EQUAL(bp, bp_orig)); } else { dsl_dataset_t *ds = os->os_dsl_dataset; dmu_tx_t *tx = os->os_synctx; (void) dsl_dataset_block_kill(ds, bp_orig, tx, B_TRUE); dsl_dataset_block_born(ds, bp, tx); } kmem_free(bp, sizeof (*bp)); } /* called from dsl */ void dmu_objset_sync(objset_t *os, zio_t *pio, dmu_tx_t *tx) { int txgoff; zbookmark_phys_t zb; zio_prop_t zp; zio_t *zio; list_t *list; list_t *newlist = NULL; dbuf_dirty_record_t *dr; blkptr_t *blkptr_copy = kmem_alloc(sizeof (*os->os_rootbp), KM_SLEEP); *blkptr_copy = *os->os_rootbp; dprintf_ds(os->os_dsl_dataset, "txg=%llu\n", tx->tx_txg); ASSERT(dmu_tx_is_syncing(tx)); /* XXX the write_done callback should really give us the tx... */ os->os_synctx = tx; if (os->os_dsl_dataset == NULL) { /* * This is the MOS. If we have upgraded, * spa_max_replication() could change, so reset * os_copies here. */ os->os_copies = spa_max_replication(os->os_spa); } /* * Create the root block IO */ SET_BOOKMARK(&zb, os->os_dsl_dataset ? os->os_dsl_dataset->ds_object : DMU_META_OBJSET, ZB_ROOT_OBJECT, ZB_ROOT_LEVEL, ZB_ROOT_BLKID); arc_release(os->os_phys_buf, &os->os_phys_buf); dmu_write_policy(os, NULL, 0, 0, ZIO_COMPRESS_INHERIT, &zp); zio = arc_write(pio, os->os_spa, tx->tx_txg, blkptr_copy, os->os_phys_buf, DMU_OS_IS_L2CACHEABLE(os), &zp, dmu_objset_write_ready, NULL, NULL, dmu_objset_write_done, os, ZIO_PRIORITY_ASYNC_WRITE, ZIO_FLAG_MUSTSUCCEED, &zb); /* * Sync special dnodes - the parent IO for the sync is the root block */ DMU_META_DNODE(os)->dn_zio = zio; dnode_sync(DMU_META_DNODE(os), tx); os->os_phys->os_flags = os->os_flags; if (DMU_USERUSED_DNODE(os) && DMU_USERUSED_DNODE(os)->dn_type != DMU_OT_NONE) { DMU_USERUSED_DNODE(os)->dn_zio = zio; dnode_sync(DMU_USERUSED_DNODE(os), tx); DMU_GROUPUSED_DNODE(os)->dn_zio = zio; dnode_sync(DMU_GROUPUSED_DNODE(os), tx); } txgoff = tx->tx_txg & TXG_MASK; if (dmu_objset_userused_enabled(os)) { newlist = &os->os_synced_dnodes; /* * We must create the list here because it uses the * dn_dirty_link[] of this txg. */ list_create(newlist, sizeof (dnode_t), offsetof(dnode_t, dn_dirty_link[txgoff])); } dmu_objset_sync_dnodes(&os->os_free_dnodes[txgoff], newlist, tx); dmu_objset_sync_dnodes(&os->os_dirty_dnodes[txgoff], newlist, tx); list = &DMU_META_DNODE(os)->dn_dirty_records[txgoff]; while ((dr = list_head(list))) { ASSERT0(dr->dr_dbuf->db_level); list_remove(list, dr); if (dr->dr_zio) zio_nowait(dr->dr_zio); } /* Enable dnode backfill if enough objects have been freed. */ if (os->os_freed_dnodes >= dmu_rescan_dnode_threshold) { os->os_rescan_dnodes = B_TRUE; os->os_freed_dnodes = 0; } /* * Free intent log blocks up to this tx. */ zil_sync(os->os_zil, tx); os->os_phys->os_zil_header = os->os_zil_header; zio_nowait(zio); } boolean_t dmu_objset_is_dirty(objset_t *os, uint64_t txg) { return (!list_is_empty(&os->os_dirty_dnodes[txg & TXG_MASK]) || !list_is_empty(&os->os_free_dnodes[txg & TXG_MASK])); } static objset_used_cb_t *used_cbs[DMU_OST_NUMTYPES]; void dmu_objset_register_type(dmu_objset_type_t ost, objset_used_cb_t *cb) { used_cbs[ost] = cb; } boolean_t dmu_objset_userused_enabled(objset_t *os) { return (spa_version(os->os_spa) >= SPA_VERSION_USERSPACE && used_cbs[os->os_phys->os_type] != NULL && DMU_USERUSED_DNODE(os) != NULL); } boolean_t dmu_objset_userobjused_enabled(objset_t *os) { return (dmu_objset_userused_enabled(os) && spa_feature_is_enabled(os->os_spa, SPA_FEATURE_USEROBJ_ACCOUNTING)); } typedef struct userquota_node { /* must be in the first filed, see userquota_update_cache() */ char uqn_id[20 + DMU_OBJACCT_PREFIX_LEN]; int64_t uqn_delta; avl_node_t uqn_node; } userquota_node_t; typedef struct userquota_cache { avl_tree_t uqc_user_deltas; avl_tree_t uqc_group_deltas; } userquota_cache_t; static int userquota_compare(const void *l, const void *r) { const userquota_node_t *luqn = l; const userquota_node_t *ruqn = r; int rv; /* * NB: can only access uqn_id because userquota_update_cache() doesn't * pass in an entire userquota_node_t. */ rv = strcmp(luqn->uqn_id, ruqn->uqn_id); return (AVL_ISIGN(rv)); } static void do_userquota_cacheflush(objset_t *os, userquota_cache_t *cache, dmu_tx_t *tx) { void *cookie; userquota_node_t *uqn; ASSERT(dmu_tx_is_syncing(tx)); cookie = NULL; while ((uqn = avl_destroy_nodes(&cache->uqc_user_deltas, &cookie)) != NULL) { VERIFY0(zap_increment(os, DMU_USERUSED_OBJECT, uqn->uqn_id, uqn->uqn_delta, tx)); kmem_free(uqn, sizeof (*uqn)); } avl_destroy(&cache->uqc_user_deltas); cookie = NULL; while ((uqn = avl_destroy_nodes(&cache->uqc_group_deltas, &cookie)) != NULL) { VERIFY0(zap_increment(os, DMU_GROUPUSED_OBJECT, uqn->uqn_id, uqn->uqn_delta, tx)); kmem_free(uqn, sizeof (*uqn)); } avl_destroy(&cache->uqc_group_deltas); } static void userquota_update_cache(avl_tree_t *avl, const char *id, int64_t delta) { userquota_node_t *uqn; avl_index_t idx; ASSERT(strlen(id) < sizeof (uqn->uqn_id)); /* * Use id directly for searching because uqn_id is the first field of * userquota_node_t and fields after uqn_id won't be accessed in * avl_find(). */ uqn = avl_find(avl, (const void *)id, &idx); if (uqn == NULL) { uqn = kmem_zalloc(sizeof (*uqn), KM_SLEEP); strlcpy(uqn->uqn_id, id, sizeof (uqn->uqn_id)); avl_insert(avl, uqn, idx); } uqn->uqn_delta += delta; } static void do_userquota_update(userquota_cache_t *cache, uint64_t used, uint64_t flags, uint64_t user, uint64_t group, boolean_t subtract) { if ((flags & DNODE_FLAG_USERUSED_ACCOUNTED)) { int64_t delta = DNODE_MIN_SIZE + used; char name[20]; if (subtract) delta = -delta; (void) sprintf(name, "%llx", (longlong_t)user); userquota_update_cache(&cache->uqc_user_deltas, name, delta); (void) sprintf(name, "%llx", (longlong_t)group); userquota_update_cache(&cache->uqc_group_deltas, name, delta); } } static void do_userobjquota_update(userquota_cache_t *cache, uint64_t flags, uint64_t user, uint64_t group, boolean_t subtract) { if (flags & DNODE_FLAG_USEROBJUSED_ACCOUNTED) { char name[20 + DMU_OBJACCT_PREFIX_LEN]; int delta = subtract ? -1 : 1; (void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx", (longlong_t)user); userquota_update_cache(&cache->uqc_user_deltas, name, delta); (void) snprintf(name, sizeof (name), DMU_OBJACCT_PREFIX "%llx", (longlong_t)group); userquota_update_cache(&cache->uqc_group_deltas, name, delta); } } void dmu_objset_do_userquota_updates(objset_t *os, dmu_tx_t *tx) { dnode_t *dn; list_t *list = &os->os_synced_dnodes; userquota_cache_t cache = { { 0 } }; ASSERT(list_head(list) == NULL || dmu_objset_userused_enabled(os)); avl_create(&cache.uqc_user_deltas, userquota_compare, sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); avl_create(&cache.uqc_group_deltas, userquota_compare, sizeof (userquota_node_t), offsetof(userquota_node_t, uqn_node)); while ((dn = list_head(list))) { int flags; ASSERT(!DMU_OBJECT_IS_SPECIAL(dn->dn_object)); ASSERT(dn->dn_phys->dn_type == DMU_OT_NONE || dn->dn_phys->dn_flags & DNODE_FLAG_USERUSED_ACCOUNTED); /* Allocate the user/groupused objects if necessary. */ if (DMU_USERUSED_DNODE(os)->dn_type == DMU_OT_NONE) { VERIFY0(zap_create_claim(os, DMU_USERUSED_OBJECT, DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); VERIFY0(zap_create_claim(os, DMU_GROUPUSED_OBJECT, DMU_OT_USERGROUP_USED, DMU_OT_NONE, 0, tx)); } flags = dn->dn_id_flags; ASSERT(flags); if (flags & DN_ID_OLD_EXIST) { do_userquota_update(&cache, dn->dn_oldused, dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid, B_TRUE); do_userobjquota_update(&cache, dn->dn_oldflags, dn->dn_olduid, dn->dn_oldgid, B_TRUE); } if (flags & DN_ID_NEW_EXIST) { do_userquota_update(&cache, DN_USED_BYTES(dn->dn_phys), dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid, B_FALSE); do_userobjquota_update(&cache, dn->dn_phys->dn_flags, dn->dn_newuid, dn->dn_newgid, B_FALSE); } mutex_enter(&dn->dn_mtx); dn->dn_oldused = 0; dn->dn_oldflags = 0; if (dn->dn_id_flags & DN_ID_NEW_EXIST) { dn->dn_olduid = dn->dn_newuid; dn->dn_oldgid = dn->dn_newgid; dn->dn_id_flags |= DN_ID_OLD_EXIST; if (dn->dn_bonuslen == 0) dn->dn_id_flags |= DN_ID_CHKED_SPILL; else dn->dn_id_flags |= DN_ID_CHKED_BONUS; } dn->dn_id_flags &= ~(DN_ID_NEW_EXIST); mutex_exit(&dn->dn_mtx); list_remove(list, dn); dnode_rele(dn, list); } do_userquota_cacheflush(os, &cache, tx); } /* * Returns a pointer to data to find uid/gid from * * If a dirty record for transaction group that is syncing can't * be found then NULL is returned. In the NULL case it is assumed * the uid/gid aren't changing. */ static void * dmu_objset_userquota_find_data(dmu_buf_impl_t *db, dmu_tx_t *tx) { dbuf_dirty_record_t *dr, **drp; void *data; if (db->db_dirtycnt == 0) return (db->db.db_data); /* Nothing is changing */ for (drp = &db->db_last_dirty; (dr = *drp) != NULL; drp = &dr->dr_next) if (dr->dr_txg == tx->tx_txg) break; if (dr == NULL) { data = NULL; } else { dnode_t *dn; DB_DNODE_ENTER(dr->dr_dbuf); dn = DB_DNODE(dr->dr_dbuf); if (dn->dn_bonuslen == 0 && dr->dr_dbuf->db_blkid == DMU_SPILL_BLKID) data = dr->dt.dl.dr_data->b_data; else data = dr->dt.dl.dr_data; DB_DNODE_EXIT(dr->dr_dbuf); } return (data); } void dmu_objset_userquota_get_ids(dnode_t *dn, boolean_t before, dmu_tx_t *tx) { objset_t *os = dn->dn_objset; void *data = NULL; dmu_buf_impl_t *db = NULL; uint64_t *user = NULL; uint64_t *group = NULL; int flags = dn->dn_id_flags; int error; boolean_t have_spill = B_FALSE; if (!dmu_objset_userused_enabled(dn->dn_objset)) return; if (before && (flags & (DN_ID_CHKED_BONUS|DN_ID_OLD_EXIST| DN_ID_CHKED_SPILL))) return; if (before && dn->dn_bonuslen != 0) data = DN_BONUS(dn->dn_phys); else if (!before && dn->dn_bonuslen != 0) { if (dn->dn_bonus) { db = dn->dn_bonus; mutex_enter(&db->db_mtx); data = dmu_objset_userquota_find_data(db, tx); } else { data = DN_BONUS(dn->dn_phys); } } else if (dn->dn_bonuslen == 0 && dn->dn_bonustype == DMU_OT_SA) { int rf = 0; if (RW_WRITE_HELD(&dn->dn_struct_rwlock)) rf |= DB_RF_HAVESTRUCT; error = dmu_spill_hold_by_dnode(dn, rf | DB_RF_MUST_SUCCEED, FTAG, (dmu_buf_t **)&db); ASSERT(error == 0); mutex_enter(&db->db_mtx); data = (before) ? db->db.db_data : dmu_objset_userquota_find_data(db, tx); have_spill = B_TRUE; } else { mutex_enter(&dn->dn_mtx); dn->dn_id_flags |= DN_ID_CHKED_BONUS; mutex_exit(&dn->dn_mtx); return; } if (before) { ASSERT(data); user = &dn->dn_olduid; group = &dn->dn_oldgid; } else if (data) { user = &dn->dn_newuid; group = &dn->dn_newgid; } /* * Must always call the callback in case the object * type has changed and that type isn't an object type to track */ error = used_cbs[os->os_phys->os_type](dn->dn_bonustype, data, user, group); /* * Preserve existing uid/gid when the callback can't determine * what the new uid/gid are and the callback returned EEXIST. * The EEXIST error tells us to just use the existing uid/gid. * If we don't know what the old values are then just assign * them to 0, since that is a new file being created. */ if (!before && data == NULL && error == EEXIST) { if (flags & DN_ID_OLD_EXIST) { dn->dn_newuid = dn->dn_olduid; dn->dn_newgid = dn->dn_oldgid; } else { dn->dn_newuid = 0; dn->dn_newgid = 0; } error = 0; } if (db) mutex_exit(&db->db_mtx); mutex_enter(&dn->dn_mtx); if (error == 0 && before) dn->dn_id_flags |= DN_ID_OLD_EXIST; if (error == 0 && !before) dn->dn_id_flags |= DN_ID_NEW_EXIST; if (have_spill) { dn->dn_id_flags |= DN_ID_CHKED_SPILL; } else { dn->dn_id_flags |= DN_ID_CHKED_BONUS; } mutex_exit(&dn->dn_mtx); if (have_spill) dmu_buf_rele((dmu_buf_t *)db, FTAG); } boolean_t dmu_objset_userspace_present(objset_t *os) { return (os->os_phys->os_flags & OBJSET_FLAG_USERACCOUNTING_COMPLETE); } boolean_t dmu_objset_userobjspace_present(objset_t *os) { return (os->os_phys->os_flags & OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE); } static int dmu_objset_space_upgrade(objset_t *os) { uint64_t obj; int err = 0; /* * We simply need to mark every object dirty, so that it will be * synced out and now accounted. If this is called * concurrently, or if we already did some work before crashing, * that's fine, since we track each object's accounted state * independently. */ for (obj = 0; err == 0; err = dmu_object_next(os, &obj, FALSE, 0)) { dmu_tx_t *tx; dmu_buf_t *db; int objerr; mutex_enter(&os->os_upgrade_lock); if (os->os_upgrade_exit) err = SET_ERROR(EINTR); mutex_exit(&os->os_upgrade_lock); if (err != 0) return (err); if (issig(JUSTLOOKING) && issig(FORREAL)) return (SET_ERROR(EINTR)); objerr = dmu_bonus_hold(os, obj, FTAG, &db); if (objerr != 0) continue; tx = dmu_tx_create(os); dmu_tx_hold_bonus(tx, obj); objerr = dmu_tx_assign(tx, TXG_WAIT); if (objerr != 0) { dmu_tx_abort(tx); continue; } dmu_buf_will_dirty(db, tx); dmu_buf_rele(db, FTAG); dmu_tx_commit(tx); } return (0); } int dmu_objset_userspace_upgrade(objset_t *os) { int err = 0; if (dmu_objset_userspace_present(os)) return (0); if (dmu_objset_is_snapshot(os)) return (SET_ERROR(EINVAL)); if (!dmu_objset_userused_enabled(os)) return (SET_ERROR(ENOTSUP)); err = dmu_objset_space_upgrade(os); if (err) return (err); os->os_flags |= OBJSET_FLAG_USERACCOUNTING_COMPLETE; txg_wait_synced(dmu_objset_pool(os), 0); return (0); } static int dmu_objset_userobjspace_upgrade_cb(objset_t *os) { int err = 0; if (dmu_objset_userobjspace_present(os)) return (0); if (dmu_objset_is_snapshot(os)) return (SET_ERROR(EINVAL)); if (!dmu_objset_userobjused_enabled(os)) return (SET_ERROR(ENOTSUP)); dmu_objset_ds(os)->ds_feature_activation_needed[ SPA_FEATURE_USEROBJ_ACCOUNTING] = B_TRUE; err = dmu_objset_space_upgrade(os); if (err) return (err); os->os_flags |= OBJSET_FLAG_USEROBJACCOUNTING_COMPLETE; txg_wait_synced(dmu_objset_pool(os), 0); return (0); } void dmu_objset_userobjspace_upgrade(objset_t *os) { dmu_objset_upgrade(os, dmu_objset_userobjspace_upgrade_cb); } boolean_t dmu_objset_userobjspace_upgradable(objset_t *os) { return (dmu_objset_type(os) == DMU_OST_ZFS && !dmu_objset_is_snapshot(os) && dmu_objset_userobjused_enabled(os) && !dmu_objset_userobjspace_present(os)); } void dmu_objset_space(objset_t *os, uint64_t *refdbytesp, uint64_t *availbytesp, uint64_t *usedobjsp, uint64_t *availobjsp) { dsl_dataset_space(os->os_dsl_dataset, refdbytesp, availbytesp, usedobjsp, availobjsp); } uint64_t dmu_objset_fsid_guid(objset_t *os) { return (dsl_dataset_fsid_guid(os->os_dsl_dataset)); } void dmu_objset_fast_stat(objset_t *os, dmu_objset_stats_t *stat) { stat->dds_type = os->os_phys->os_type; if (os->os_dsl_dataset) dsl_dataset_fast_stat(os->os_dsl_dataset, stat); } void dmu_objset_stats(objset_t *os, nvlist_t *nv) { ASSERT(os->os_dsl_dataset || os->os_phys->os_type == DMU_OST_META); if (os->os_dsl_dataset != NULL) dsl_dataset_stats(os->os_dsl_dataset, nv); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_TYPE, os->os_phys->os_type); dsl_prop_nvlist_add_uint64(nv, ZFS_PROP_USERACCOUNTING, dmu_objset_userspace_present(os)); } int dmu_objset_is_snapshot(objset_t *os) { if (os->os_dsl_dataset != NULL) return (os->os_dsl_dataset->ds_is_snapshot); else return (B_FALSE); } int dmu_snapshot_realname(objset_t *os, char *name, char *real, int maxlen, boolean_t *conflict) { dsl_dataset_t *ds = os->os_dsl_dataset; uint64_t ignored; if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) return (SET_ERROR(ENOENT)); return (zap_lookup_norm(ds->ds_dir->dd_pool->dp_meta_objset, dsl_dataset_phys(ds)->ds_snapnames_zapobj, name, 8, 1, &ignored, MT_NORMALIZE, real, maxlen, conflict)); } int dmu_snapshot_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp, boolean_t *case_conflict) { dsl_dataset_t *ds = os->os_dsl_dataset; zap_cursor_t cursor; zap_attribute_t attr; ASSERT(dsl_pool_config_held(dmu_objset_pool(os))); if (dsl_dataset_phys(ds)->ds_snapnames_zapobj == 0) return (SET_ERROR(ENOENT)); zap_cursor_init_serialized(&cursor, ds->ds_dir->dd_pool->dp_meta_objset, dsl_dataset_phys(ds)->ds_snapnames_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (SET_ERROR(ENOENT)); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (SET_ERROR(ENAMETOOLONG)); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; if (case_conflict) *case_conflict = attr.za_normalization_conflict; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); } int dmu_snapshot_lookup(objset_t *os, const char *name, uint64_t *value) { return (dsl_dataset_snap_lookup(os->os_dsl_dataset, name, value)); } int dmu_dir_list_next(objset_t *os, int namelen, char *name, uint64_t *idp, uint64_t *offp) { dsl_dir_t *dd = os->os_dsl_dataset->ds_dir; zap_cursor_t cursor; zap_attribute_t attr; /* there is no next dir on a snapshot! */ if (os->os_dsl_dataset->ds_object != dsl_dir_phys(dd)->dd_head_dataset_obj) return (SET_ERROR(ENOENT)); zap_cursor_init_serialized(&cursor, dd->dd_pool->dp_meta_objset, dsl_dir_phys(dd)->dd_child_dir_zapobj, *offp); if (zap_cursor_retrieve(&cursor, &attr) != 0) { zap_cursor_fini(&cursor); return (SET_ERROR(ENOENT)); } if (strlen(attr.za_name) + 1 > namelen) { zap_cursor_fini(&cursor); return (SET_ERROR(ENAMETOOLONG)); } (void) strcpy(name, attr.za_name); if (idp) *idp = attr.za_first_integer; zap_cursor_advance(&cursor); *offp = zap_cursor_serialize(&cursor); zap_cursor_fini(&cursor); return (0); } typedef struct dmu_objset_find_ctx { taskq_t *dc_tq; dsl_pool_t *dc_dp; uint64_t dc_ddobj; char *dc_ddname; /* last component of ddobj's name */ int (*dc_func)(dsl_pool_t *, dsl_dataset_t *, void *); void *dc_arg; int dc_flags; kmutex_t *dc_error_lock; int *dc_error; } dmu_objset_find_ctx_t; static void dmu_objset_find_dp_impl(dmu_objset_find_ctx_t *dcp) { dsl_pool_t *dp = dcp->dc_dp; dmu_objset_find_ctx_t *child_dcp; dsl_dir_t *dd; dsl_dataset_t *ds; zap_cursor_t zc; zap_attribute_t *attr; uint64_t thisobj; int err = 0; /* don't process if there already was an error */ if (*dcp->dc_error != 0) goto out; /* * Note: passing the name (dc_ddname) here is optional, but it * improves performance because we don't need to call * zap_value_search() to determine the name. */ err = dsl_dir_hold_obj(dp, dcp->dc_ddobj, dcp->dc_ddname, FTAG, &dd); if (err != 0) goto out; /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ if (dd->dd_myname[0] == '$') { dsl_dir_rele(dd, FTAG); goto out; } thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); /* * Iterate over all children. */ if (dcp->dc_flags & DS_FIND_CHILDREN) { for (zap_cursor_init(&zc, dp->dp_meta_objset, dsl_dir_phys(dd)->dd_child_dir_zapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); child_dcp = kmem_alloc(sizeof (*child_dcp), KM_SLEEP); *child_dcp = *dcp; child_dcp->dc_ddobj = attr->za_first_integer; child_dcp->dc_ddname = spa_strdup(attr->za_name); if (dcp->dc_tq != NULL) (void) taskq_dispatch(dcp->dc_tq, dmu_objset_find_dp_cb, child_dcp, TQ_SLEEP); else dmu_objset_find_dp_impl(child_dcp); } zap_cursor_fini(&zc); } /* * Iterate over all snapshots. */ if (dcp->dc_flags & DS_FIND_SNAPSHOTS) { dsl_dataset_t *ds; err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); if (err == 0) { uint64_t snapobj; snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; dsl_dataset_rele(ds, FTAG); for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); err = dsl_dataset_hold_obj(dp, attr->za_first_integer, FTAG, &ds); if (err != 0) break; err = dcp->dc_func(dp, ds, dcp->dc_arg); dsl_dataset_rele(ds, FTAG); if (err != 0) break; } zap_cursor_fini(&zc); } } kmem_free(attr, sizeof (zap_attribute_t)); if (err != 0) { dsl_dir_rele(dd, FTAG); goto out; } /* * Apply to self. */ err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); /* * Note: we hold the dir while calling dsl_dataset_hold_obj() so * that the dir will remain cached, and we won't have to re-instantiate * it (which could be expensive due to finding its name via * zap_value_search()). */ dsl_dir_rele(dd, FTAG); if (err != 0) goto out; err = dcp->dc_func(dp, ds, dcp->dc_arg); dsl_dataset_rele(ds, FTAG); out: if (err != 0) { mutex_enter(dcp->dc_error_lock); /* only keep first error */ if (*dcp->dc_error == 0) *dcp->dc_error = err; mutex_exit(dcp->dc_error_lock); } if (dcp->dc_ddname != NULL) spa_strfree(dcp->dc_ddname); kmem_free(dcp, sizeof (*dcp)); } static void dmu_objset_find_dp_cb(void *arg) { dmu_objset_find_ctx_t *dcp = arg; dsl_pool_t *dp = dcp->dc_dp; /* * We need to get a pool_config_lock here, as there are several * asssert(pool_config_held) down the stack. Getting a lock via * dsl_pool_config_enter is risky, as it might be stalled by a * pending writer. This would deadlock, as the write lock can * only be granted when our parent thread gives up the lock. * The _prio interface gives us priority over a pending writer. */ dsl_pool_config_enter_prio(dp, FTAG); dmu_objset_find_dp_impl(dcp); dsl_pool_config_exit(dp, FTAG); } /* * Find objsets under and including ddobj, call func(ds) on each. * The order for the enumeration is completely undefined. * func is called with dsl_pool_config held. */ int dmu_objset_find_dp(dsl_pool_t *dp, uint64_t ddobj, int func(dsl_pool_t *, dsl_dataset_t *, void *), void *arg, int flags) { int error = 0; taskq_t *tq = NULL; int ntasks; dmu_objset_find_ctx_t *dcp; kmutex_t err_lock; mutex_init(&err_lock, NULL, MUTEX_DEFAULT, NULL); dcp = kmem_alloc(sizeof (*dcp), KM_SLEEP); dcp->dc_tq = NULL; dcp->dc_dp = dp; dcp->dc_ddobj = ddobj; dcp->dc_ddname = NULL; dcp->dc_func = func; dcp->dc_arg = arg; dcp->dc_flags = flags; dcp->dc_error_lock = &err_lock; dcp->dc_error = &error; if ((flags & DS_FIND_SERIALIZE) || dsl_pool_config_held_writer(dp)) { /* * In case a write lock is held we can't make use of * parallelism, as down the stack of the worker threads * the lock is asserted via dsl_pool_config_held. * In case of a read lock this is solved by getting a read * lock in each worker thread, which isn't possible in case * of a writer lock. So we fall back to the synchronous path * here. * In the future it might be possible to get some magic into * dsl_pool_config_held in a way that it returns true for * the worker threads so that a single lock held from this * thread suffices. For now, stay single threaded. */ dmu_objset_find_dp_impl(dcp); mutex_destroy(&err_lock); return (error); } ntasks = dmu_find_threads; if (ntasks == 0) ntasks = vdev_count_leaves(dp->dp_spa) * 4; tq = taskq_create("dmu_objset_find", ntasks, maxclsyspri, ntasks, INT_MAX, 0); if (tq == NULL) { kmem_free(dcp, sizeof (*dcp)); mutex_destroy(&err_lock); return (SET_ERROR(ENOMEM)); } dcp->dc_tq = tq; /* dcp will be freed by task */ (void) taskq_dispatch(tq, dmu_objset_find_dp_cb, dcp, TQ_SLEEP); /* * PORTING: this code relies on the property of taskq_wait to wait * until no more tasks are queued and no more tasks are active. As * we always queue new tasks from within other tasks, task_wait * reliably waits for the full recursion to finish, even though we * enqueue new tasks after taskq_wait has been called. * On platforms other than illumos, taskq_wait may not have this * property. */ taskq_wait(tq); taskq_destroy(tq); mutex_destroy(&err_lock); return (error); } /* * Find all objsets under name, and for each, call 'func(child_name, arg)'. * The dp_config_rwlock must not be held when this is called, and it * will not be held when the callback is called. * Therefore this function should only be used when the pool is not changing * (e.g. in syncing context), or the callback can deal with the possible races. */ static int dmu_objset_find_impl(spa_t *spa, const char *name, int func(const char *, void *), void *arg, int flags) { dsl_dir_t *dd; dsl_pool_t *dp = spa_get_dsl(spa); dsl_dataset_t *ds; zap_cursor_t zc; zap_attribute_t *attr; char *child; uint64_t thisobj; int err; dsl_pool_config_enter(dp, FTAG); err = dsl_dir_hold(dp, name, FTAG, &dd, NULL); if (err != 0) { dsl_pool_config_exit(dp, FTAG); return (err); } /* Don't visit hidden ($MOS & $ORIGIN) objsets. */ if (dd->dd_myname[0] == '$') { dsl_dir_rele(dd, FTAG); dsl_pool_config_exit(dp, FTAG); return (0); } thisobj = dsl_dir_phys(dd)->dd_head_dataset_obj; attr = kmem_alloc(sizeof (zap_attribute_t), KM_SLEEP); /* * Iterate over all children. */ if (flags & DS_FIND_CHILDREN) { for (zap_cursor_init(&zc, dp->dp_meta_objset, dsl_dir_phys(dd)->dd_child_dir_zapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); child = kmem_asprintf("%s/%s", name, attr->za_name); dsl_pool_config_exit(dp, FTAG); err = dmu_objset_find_impl(spa, child, func, arg, flags); dsl_pool_config_enter(dp, FTAG); strfree(child); if (err != 0) break; } zap_cursor_fini(&zc); if (err != 0) { dsl_dir_rele(dd, FTAG); dsl_pool_config_exit(dp, FTAG); kmem_free(attr, sizeof (zap_attribute_t)); return (err); } } /* * Iterate over all snapshots. */ if (flags & DS_FIND_SNAPSHOTS) { err = dsl_dataset_hold_obj(dp, thisobj, FTAG, &ds); if (err == 0) { uint64_t snapobj; snapobj = dsl_dataset_phys(ds)->ds_snapnames_zapobj; dsl_dataset_rele(ds, FTAG); for (zap_cursor_init(&zc, dp->dp_meta_objset, snapobj); zap_cursor_retrieve(&zc, attr) == 0; (void) zap_cursor_advance(&zc)) { ASSERT3U(attr->za_integer_length, ==, sizeof (uint64_t)); ASSERT3U(attr->za_num_integers, ==, 1); child = kmem_asprintf("%s@%s", name, attr->za_name); dsl_pool_config_exit(dp, FTAG); err = func(child, arg); dsl_pool_config_enter(dp, FTAG); strfree(child); if (err != 0) break; } zap_cursor_fini(&zc); } } dsl_dir_rele(dd, FTAG); kmem_free(attr, sizeof (zap_attribute_t)); dsl_pool_config_exit(dp, FTAG); if (err != 0) return (err); /* Apply to self. */ return (func(name, arg)); } /* * See comment above dmu_objset_find_impl(). */ int dmu_objset_find(char *name, int func(const char *, void *), void *arg, int flags) { spa_t *spa; int error; error = spa_open(name, &spa, FTAG); if (error != 0) return (error); error = dmu_objset_find_impl(spa, name, func, arg, flags); spa_close(spa, FTAG); return (error); } void dmu_objset_set_user(objset_t *os, void *user_ptr) { ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); os->os_user_ptr = user_ptr; } void * dmu_objset_get_user(objset_t *os) { ASSERT(MUTEX_HELD(&os->os_user_ptr_lock)); return (os->os_user_ptr); } /* * Determine name of filesystem, given name of snapshot. * buf must be at least ZFS_MAX_DATASET_NAME_LEN bytes */ int dmu_fsname(const char *snapname, char *buf) { char *atp = strchr(snapname, '@'); if (atp == NULL) return (SET_ERROR(EINVAL)); if (atp - snapname >= ZFS_MAX_DATASET_NAME_LEN) return (SET_ERROR(ENAMETOOLONG)); (void) strlcpy(buf, snapname, atp - snapname + 1); return (0); } #if defined(_KERNEL) && defined(HAVE_SPL) EXPORT_SYMBOL(dmu_objset_zil); EXPORT_SYMBOL(dmu_objset_pool); EXPORT_SYMBOL(dmu_objset_ds); EXPORT_SYMBOL(dmu_objset_type); EXPORT_SYMBOL(dmu_objset_name); EXPORT_SYMBOL(dmu_objset_hold); EXPORT_SYMBOL(dmu_objset_own); EXPORT_SYMBOL(dmu_objset_rele); EXPORT_SYMBOL(dmu_objset_disown); EXPORT_SYMBOL(dmu_objset_from_ds); EXPORT_SYMBOL(dmu_objset_create); EXPORT_SYMBOL(dmu_objset_clone); EXPORT_SYMBOL(dmu_objset_stats); EXPORT_SYMBOL(dmu_objset_fast_stat); EXPORT_SYMBOL(dmu_objset_spa); EXPORT_SYMBOL(dmu_objset_space); EXPORT_SYMBOL(dmu_objset_fsid_guid); EXPORT_SYMBOL(dmu_objset_find); EXPORT_SYMBOL(dmu_objset_byteswap); EXPORT_SYMBOL(dmu_objset_evict_dbufs); EXPORT_SYMBOL(dmu_objset_snap_cmtime); EXPORT_SYMBOL(dmu_objset_dnodesize); EXPORT_SYMBOL(dmu_objset_sync); EXPORT_SYMBOL(dmu_objset_is_dirty); EXPORT_SYMBOL(dmu_objset_create_impl); EXPORT_SYMBOL(dmu_objset_open_impl); EXPORT_SYMBOL(dmu_objset_evict); EXPORT_SYMBOL(dmu_objset_register_type); EXPORT_SYMBOL(dmu_objset_do_userquota_updates); EXPORT_SYMBOL(dmu_objset_userquota_get_ids); EXPORT_SYMBOL(dmu_objset_userused_enabled); EXPORT_SYMBOL(dmu_objset_userspace_upgrade); EXPORT_SYMBOL(dmu_objset_userspace_present); EXPORT_SYMBOL(dmu_objset_userobjused_enabled); EXPORT_SYMBOL(dmu_objset_userobjspace_upgrade); EXPORT_SYMBOL(dmu_objset_userobjspace_upgradable); EXPORT_SYMBOL(dmu_objset_userobjspace_present); #endif diff --git a/module/zfs/spa_misc.c b/module/zfs/spa_misc.c index c39c137e661f..59d9fe57619c 100644 --- a/module/zfs/spa_misc.c +++ b/module/zfs/spa_misc.c @@ -1,2141 +1,2147 @@ /* * CDDL HEADER START * * The contents of this file are subject to the terms of the * Common Development and Distribution License (the "License"). * You may not use this file except in compliance with the License. * * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE * or http://www.opensolaris.org/os/licensing. * See the License for the specific language governing permissions * and limitations under the License. * * When distributing Covered Code, include this CDDL HEADER in each * file and include the License file at usr/src/OPENSOLARIS.LICENSE. * If applicable, add the following below this CDDL HEADER, with the * fields enclosed by brackets "[]" replaced with your own identifying * information: Portions Copyright [yyyy] [name of copyright owner] * * CDDL HEADER END */ /* * Copyright (c) 2005, 2010, Oracle and/or its affiliates. All rights reserved. * Copyright (c) 2011, 2015 by Delphix. All rights reserved. * Copyright 2015 Nexenta Systems, Inc. All rights reserved. * Copyright (c) 2014 Spectra Logic Corporation, All rights reserved. * Copyright 2013 Saso Kiselkov. All rights reserved. */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "zfs_prop.h" #include /* * SPA locking * * There are four basic locks for managing spa_t structures: * * spa_namespace_lock (global mutex) * * This lock must be acquired to do any of the following: * * - Lookup a spa_t by name * - Add or remove a spa_t from the namespace * - Increase spa_refcount from non-zero * - Check if spa_refcount is zero * - Rename a spa_t * - add/remove/attach/detach devices * - Held for the duration of create/destroy/import/export * * It does not need to handle recursion. A create or destroy may * reference objects (files or zvols) in other pools, but by * definition they must have an existing reference, and will never need * to lookup a spa_t by name. * * spa_refcount (per-spa refcount_t protected by mutex) * * This reference count keep track of any active users of the spa_t. The * spa_t cannot be destroyed or freed while this is non-zero. Internally, * the refcount is never really 'zero' - opening a pool implicitly keeps * some references in the DMU. Internally we check against spa_minref, but * present the image of a zero/non-zero value to consumers. * * spa_config_lock[] (per-spa array of rwlocks) * * This protects the spa_t from config changes, and must be held in * the following circumstances: * * - RW_READER to perform I/O to the spa * - RW_WRITER to change the vdev config * * The locking order is fairly straightforward: * * spa_namespace_lock -> spa_refcount * * The namespace lock must be acquired to increase the refcount from 0 * or to check if it is zero. * * spa_refcount -> spa_config_lock[] * * There must be at least one valid reference on the spa_t to acquire * the config lock. * * spa_namespace_lock -> spa_config_lock[] * * The namespace lock must always be taken before the config lock. * * * The spa_namespace_lock can be acquired directly and is globally visible. * * The namespace is manipulated using the following functions, all of which * require the spa_namespace_lock to be held. * * spa_lookup() Lookup a spa_t by name. * * spa_add() Create a new spa_t in the namespace. * * spa_remove() Remove a spa_t from the namespace. This also * frees up any memory associated with the spa_t. * * spa_next() Returns the next spa_t in the system, or the * first if NULL is passed. * * spa_evict_all() Shutdown and remove all spa_t structures in * the system. * * spa_guid_exists() Determine whether a pool/device guid exists. * * The spa_refcount is manipulated using the following functions: * * spa_open_ref() Adds a reference to the given spa_t. Must be * called with spa_namespace_lock held if the * refcount is currently zero. * * spa_close() Remove a reference from the spa_t. This will * not free the spa_t or remove it from the * namespace. No locking is required. * * spa_refcount_zero() Returns true if the refcount is currently * zero. Must be called with spa_namespace_lock * held. * * The spa_config_lock[] is an array of rwlocks, ordered as follows: * SCL_CONFIG > SCL_STATE > SCL_ALLOC > SCL_ZIO > SCL_FREE > SCL_VDEV. * spa_config_lock[] is manipulated with spa_config_{enter,exit,held}(). * * To read the configuration, it suffices to hold one of these locks as reader. * To modify the configuration, you must hold all locks as writer. To modify * vdev state without altering the vdev tree's topology (e.g. online/offline), * you must hold SCL_STATE and SCL_ZIO as writer. * * We use these distinct config locks to avoid recursive lock entry. * For example, spa_sync() (which holds SCL_CONFIG as reader) induces * block allocations (SCL_ALLOC), which may require reading space maps * from disk (dmu_read() -> zio_read() -> SCL_ZIO). * * The spa config locks cannot be normal rwlocks because we need the * ability to hand off ownership. For example, SCL_ZIO is acquired * by the issuing thread and later released by an interrupt thread. * They do, however, obey the usual write-wanted semantics to prevent * writer (i.e. system administrator) starvation. * * The lock acquisition rules are as follows: * * SCL_CONFIG * Protects changes to the vdev tree topology, such as vdev * add/remove/attach/detach. Protects the dirty config list * (spa_config_dirty_list) and the set of spares and l2arc devices. * * SCL_STATE * Protects changes to pool state and vdev state, such as vdev * online/offline/fault/degrade/clear. Protects the dirty state list * (spa_state_dirty_list) and global pool state (spa_state). * * SCL_ALLOC * Protects changes to metaslab groups and classes. * Held as reader by metaslab_alloc() and metaslab_claim(). * * SCL_ZIO * Held by bp-level zios (those which have no io_vd upon entry) * to prevent changes to the vdev tree. The bp-level zio implicitly * protects all of its vdev child zios, which do not hold SCL_ZIO. * * SCL_FREE * Protects changes to metaslab groups and classes. * Held as reader by metaslab_free(). SCL_FREE is distinct from * SCL_ALLOC, and lower than SCL_ZIO, so that we can safely free * blocks in zio_done() while another i/o that holds either * SCL_ALLOC or SCL_ZIO is waiting for this i/o to complete. * * SCL_VDEV * Held as reader to prevent changes to the vdev tree during trivial * inquiries such as bp_get_dsize(). SCL_VDEV is distinct from the * other locks, and lower than all of them, to ensure that it's safe * to acquire regardless of caller context. * * In addition, the following rules apply: * * (a) spa_props_lock protects pool properties, spa_config and spa_config_list. * The lock ordering is SCL_CONFIG > spa_props_lock. * * (b) I/O operations on leaf vdevs. For any zio operation that takes * an explicit vdev_t argument -- such as zio_ioctl(), zio_read_phys(), * or zio_write_phys() -- the caller must ensure that the config cannot * cannot change in the interim, and that the vdev cannot be reopened. * SCL_STATE as reader suffices for both. * * The vdev configuration is protected by spa_vdev_enter() / spa_vdev_exit(). * * spa_vdev_enter() Acquire the namespace lock and the config lock * for writing. * * spa_vdev_exit() Release the config lock, wait for all I/O * to complete, sync the updated configs to the * cache, and release the namespace lock. * * vdev state is protected by spa_vdev_state_enter() / spa_vdev_state_exit(). * Like spa_vdev_enter/exit, these are convenience wrappers -- the actual * locking is, always, based on spa_namespace_lock and spa_config_lock[]. * * spa_rename() is also implemented within this file since it requires * manipulation of the namespace. */ static avl_tree_t spa_namespace_avl; kmutex_t spa_namespace_lock; static kcondvar_t spa_namespace_cv; int spa_max_replication_override = SPA_DVAS_PER_BP; static kmutex_t spa_spare_lock; static avl_tree_t spa_spare_avl; static kmutex_t spa_l2cache_lock; static avl_tree_t spa_l2cache_avl; kmem_cache_t *spa_buffer_pool; int spa_mode_global; #ifdef ZFS_DEBUG /* Everything except dprintf and spa is on by default in debug builds */ int zfs_flags = ~(ZFS_DEBUG_DPRINTF | ZFS_DEBUG_SPA); #else int zfs_flags = 0; #endif /* * zfs_recover can be set to nonzero to attempt to recover from * otherwise-fatal errors, typically caused by on-disk corruption. When * set, calls to zfs_panic_recover() will turn into warning messages. * This should only be used as a last resort, as it typically results * in leaked space, or worse. */ int zfs_recover = B_FALSE; /* * If destroy encounters an EIO while reading metadata (e.g. indirect * blocks), space referenced by the missing metadata can not be freed. * Normally this causes the background destroy to become "stalled", as * it is unable to make forward progress. While in this stalled state, * all remaining space to free from the error-encountering filesystem is * "temporarily leaked". Set this flag to cause it to ignore the EIO, * permanently leak the space from indirect blocks that can not be read, * and continue to free everything else that it can. * * The default, "stalling" behavior is useful if the storage partially * fails (i.e. some but not all i/os fail), and then later recovers. In * this case, we will be able to continue pool operations while it is * partially failed, and when it recovers, we can continue to free the * space, with no leaks. However, note that this case is actually * fairly rare. * * Typically pools either (a) fail completely (but perhaps temporarily, * e.g. a top-level vdev going offline), or (b) have localized, * permanent errors (e.g. disk returns the wrong data due to bit flip or * firmware bug). In case (a), this setting does not matter because the * pool will be suspended and the sync thread will not be able to make * forward progress regardless. In case (b), because the error is * permanent, the best we can do is leak the minimum amount of space, * which is what setting this flag will do. Therefore, it is reasonable * for this flag to normally be set, but we chose the more conservative * approach of not setting it, so that there is no possibility of * leaking space in the "partial temporary" failure case. */ int zfs_free_leak_on_eio = B_FALSE; /* * Expiration time in milliseconds. This value has two meanings. First it is * used to determine when the spa_deadman() logic should fire. By default the * spa_deadman() will fire if spa_sync() has not completed in 1000 seconds. * Secondly, the value determines if an I/O is considered "hung". Any I/O that * has not completed in zfs_deadman_synctime_ms is considered "hung" resulting * in a system panic. */ unsigned long zfs_deadman_synctime_ms = 1000000ULL; /* * Check time in milliseconds. This defines the frequency at which we check * for hung I/O. */ unsigned long zfs_deadman_checktime_ms = 5000ULL; /* * By default the deadman is enabled. */ int zfs_deadman_enabled = 1; /* * The worst case is single-sector max-parity RAID-Z blocks, in which * case the space requirement is exactly (VDEV_RAIDZ_MAXPARITY + 1) * times the size; so just assume that. Add to this the fact that * we can have up to 3 DVAs per bp, and one more factor of 2 because * the block may be dittoed with up to 3 DVAs by ddt_sync(). All together, * the worst case is: * (VDEV_RAIDZ_MAXPARITY + 1) * SPA_DVAS_PER_BP * 2 == 24 */ int spa_asize_inflation = 24; /* * Normally, we don't allow the last 3.2% (1/(2^spa_slop_shift)) of space in * the pool to be consumed. This ensures that we don't run the pool * completely out of space, due to unaccounted changes (e.g. to the MOS). * It also limits the worst-case time to allocate space. If we have * less than this amount of free space, most ZPL operations (e.g. write, * create) will return ENOSPC. * * Certain operations (e.g. file removal, most administrative actions) can * use half the slop space. They will only return ENOSPC if less than half * the slop space is free. Typically, once the pool has less than the slop * space free, the user will use these operations to free up space in the pool. * These are the operations that call dsl_pool_adjustedsize() with the netfree * argument set to TRUE. * * A very restricted set of operations are always permitted, regardless of * the amount of free space. These are the operations that call * dsl_sync_task(ZFS_SPACE_CHECK_NONE), e.g. "zfs destroy". If these * operations result in a net increase in the amount of space used, * it is possible to run the pool completely out of space, causing it to * be permanently read-only. * + * Note that on very small pools, the slop space will be larger than + * 3.2%, in an effort to have it be at least spa_min_slop (128MB), + * but we never allow it to be more than half the pool size. + * * See also the comments in zfs_space_check_t. */ int spa_slop_shift = 5; +uint64_t spa_min_slop = 128 * 1024 * 1024; /* * ========================================================================== * SPA config locking * ========================================================================== */ static void spa_config_lock_init(spa_t *spa) { int i; for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; mutex_init(&scl->scl_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&scl->scl_cv, NULL, CV_DEFAULT, NULL); refcount_create_untracked(&scl->scl_count); scl->scl_writer = NULL; scl->scl_write_wanted = 0; } } static void spa_config_lock_destroy(spa_t *spa) { int i; for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; mutex_destroy(&scl->scl_lock); cv_destroy(&scl->scl_cv); refcount_destroy(&scl->scl_count); ASSERT(scl->scl_writer == NULL); ASSERT(scl->scl_write_wanted == 0); } } int spa_config_tryenter(spa_t *spa, int locks, void *tag, krw_t rw) { int i; for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; mutex_enter(&scl->scl_lock); if (rw == RW_READER) { if (scl->scl_writer || scl->scl_write_wanted) { mutex_exit(&scl->scl_lock); spa_config_exit(spa, locks & ((1 << i) - 1), tag); return (0); } } else { ASSERT(scl->scl_writer != curthread); if (!refcount_is_zero(&scl->scl_count)) { mutex_exit(&scl->scl_lock); spa_config_exit(spa, locks & ((1 << i) - 1), tag); return (0); } scl->scl_writer = curthread; } (void) refcount_add(&scl->scl_count, tag); mutex_exit(&scl->scl_lock); } return (1); } void spa_config_enter(spa_t *spa, int locks, void *tag, krw_t rw) { int wlocks_held = 0; int i; ASSERT3U(SCL_LOCKS, <, sizeof (wlocks_held) * NBBY); for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (scl->scl_writer == curthread) wlocks_held |= (1 << i); if (!(locks & (1 << i))) continue; mutex_enter(&scl->scl_lock); if (rw == RW_READER) { while (scl->scl_writer || scl->scl_write_wanted) { cv_wait(&scl->scl_cv, &scl->scl_lock); } } else { ASSERT(scl->scl_writer != curthread); while (!refcount_is_zero(&scl->scl_count)) { scl->scl_write_wanted++; cv_wait(&scl->scl_cv, &scl->scl_lock); scl->scl_write_wanted--; } scl->scl_writer = curthread; } (void) refcount_add(&scl->scl_count, tag); mutex_exit(&scl->scl_lock); } ASSERT(wlocks_held <= locks); } void spa_config_exit(spa_t *spa, int locks, void *tag) { int i; for (i = SCL_LOCKS - 1; i >= 0; i--) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; mutex_enter(&scl->scl_lock); ASSERT(!refcount_is_zero(&scl->scl_count)); if (refcount_remove(&scl->scl_count, tag) == 0) { ASSERT(scl->scl_writer == NULL || scl->scl_writer == curthread); scl->scl_writer = NULL; /* OK in either case */ cv_broadcast(&scl->scl_cv); } mutex_exit(&scl->scl_lock); } } int spa_config_held(spa_t *spa, int locks, krw_t rw) { int i, locks_held = 0; for (i = 0; i < SCL_LOCKS; i++) { spa_config_lock_t *scl = &spa->spa_config_lock[i]; if (!(locks & (1 << i))) continue; if ((rw == RW_READER && !refcount_is_zero(&scl->scl_count)) || (rw == RW_WRITER && scl->scl_writer == curthread)) locks_held |= 1 << i; } return (locks_held); } /* * ========================================================================== * SPA namespace functions * ========================================================================== */ /* * Lookup the named spa_t in the AVL tree. The spa_namespace_lock must be held. * Returns NULL if no matching spa_t is found. */ spa_t * spa_lookup(const char *name) { static spa_t search; /* spa_t is large; don't allocate on stack */ spa_t *spa; avl_index_t where; char *cp; ASSERT(MUTEX_HELD(&spa_namespace_lock)); (void) strlcpy(search.spa_name, name, sizeof (search.spa_name)); /* * If it's a full dataset name, figure out the pool name and * just use that. */ cp = strpbrk(search.spa_name, "/@#"); if (cp != NULL) *cp = '\0'; spa = avl_find(&spa_namespace_avl, &search, &where); return (spa); } /* * Fires when spa_sync has not completed within zfs_deadman_synctime_ms. * If the zfs_deadman_enabled flag is set then it inspects all vdev queues * looking for potentially hung I/Os. */ void spa_deadman(void *arg) { spa_t *spa = arg; /* Disable the deadman if the pool is suspended. */ if (spa_suspended(spa)) return; zfs_dbgmsg("slow spa_sync: started %llu seconds ago, calls %llu", (gethrtime() - spa->spa_sync_starttime) / NANOSEC, ++spa->spa_deadman_calls); if (zfs_deadman_enabled) vdev_deadman(spa->spa_root_vdev); spa->spa_deadman_tqid = taskq_dispatch_delay(system_delay_taskq, spa_deadman, spa, TQ_SLEEP, ddi_get_lbolt() + MSEC_TO_TICK(zfs_deadman_checktime_ms)); } /* * Create an uninitialized spa_t with the given name. Requires * spa_namespace_lock. The caller must ensure that the spa_t doesn't already * exist by calling spa_lookup() first. */ spa_t * spa_add(const char *name, nvlist_t *config, const char *altroot) { spa_t *spa; spa_config_dirent_t *dp; int t; int i; ASSERT(MUTEX_HELD(&spa_namespace_lock)); spa = kmem_zalloc(sizeof (spa_t), KM_SLEEP); mutex_init(&spa->spa_async_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_errlist_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_errlog_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_evicting_os_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_history_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_proc_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_props_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_cksum_tmpls_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_scrub_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_suspend_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_vdev_top_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_feat_stats_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa->spa_alloc_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&spa->spa_async_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_evicting_os_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_proc_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_scrub_io_cv, NULL, CV_DEFAULT, NULL); cv_init(&spa->spa_suspend_cv, NULL, CV_DEFAULT, NULL); for (t = 0; t < TXG_SIZE; t++) bplist_create(&spa->spa_free_bplist[t]); (void) strlcpy(spa->spa_name, name, sizeof (spa->spa_name)); spa->spa_state = POOL_STATE_UNINITIALIZED; spa->spa_freeze_txg = UINT64_MAX; spa->spa_final_txg = UINT64_MAX; spa->spa_load_max_txg = UINT64_MAX; spa->spa_proc = &p0; spa->spa_proc_state = SPA_PROC_NONE; spa->spa_deadman_synctime = MSEC2NSEC(zfs_deadman_synctime_ms); refcount_create(&spa->spa_refcount); spa_config_lock_init(spa); spa_stats_init(spa); avl_add(&spa_namespace_avl, spa); /* * Set the alternate root, if there is one. */ if (altroot) spa->spa_root = spa_strdup(altroot); avl_create(&spa->spa_alloc_tree, zio_timestamp_compare, sizeof (zio_t), offsetof(zio_t, io_alloc_node)); /* * Every pool starts with the default cachefile */ list_create(&spa->spa_config_list, sizeof (spa_config_dirent_t), offsetof(spa_config_dirent_t, scd_link)); dp = kmem_zalloc(sizeof (spa_config_dirent_t), KM_SLEEP); dp->scd_path = altroot ? NULL : spa_strdup(spa_config_path); list_insert_head(&spa->spa_config_list, dp); VERIFY(nvlist_alloc(&spa->spa_load_info, NV_UNIQUE_NAME, KM_SLEEP) == 0); if (config != NULL) { nvlist_t *features; if (nvlist_lookup_nvlist(config, ZPOOL_CONFIG_FEATURES_FOR_READ, &features) == 0) { VERIFY(nvlist_dup(features, &spa->spa_label_features, 0) == 0); } VERIFY(nvlist_dup(config, &spa->spa_config, 0) == 0); } if (spa->spa_label_features == NULL) { VERIFY(nvlist_alloc(&spa->spa_label_features, NV_UNIQUE_NAME, KM_SLEEP) == 0); } spa->spa_debug = ((zfs_flags & ZFS_DEBUG_SPA) != 0); spa->spa_min_ashift = INT_MAX; spa->spa_max_ashift = 0; /* Reset cached value */ spa->spa_dedup_dspace = ~0ULL; /* * As a pool is being created, treat all features as disabled by * setting SPA_FEATURE_DISABLED for all entries in the feature * refcount cache. */ for (i = 0; i < SPA_FEATURES; i++) { spa->spa_feat_refcount_cache[i] = SPA_FEATURE_DISABLED; } return (spa); } /* * Removes a spa_t from the namespace, freeing up any memory used. Requires * spa_namespace_lock. This is called only after the spa_t has been closed and * deactivated. */ void spa_remove(spa_t *spa) { spa_config_dirent_t *dp; int t; ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(spa->spa_state == POOL_STATE_UNINITIALIZED); ASSERT3U(refcount_count(&spa->spa_refcount), ==, 0); nvlist_free(spa->spa_config_splitting); avl_remove(&spa_namespace_avl, spa); cv_broadcast(&spa_namespace_cv); if (spa->spa_root) spa_strfree(spa->spa_root); while ((dp = list_head(&spa->spa_config_list)) != NULL) { list_remove(&spa->spa_config_list, dp); if (dp->scd_path != NULL) spa_strfree(dp->scd_path); kmem_free(dp, sizeof (spa_config_dirent_t)); } avl_destroy(&spa->spa_alloc_tree); list_destroy(&spa->spa_config_list); nvlist_free(spa->spa_label_features); nvlist_free(spa->spa_load_info); nvlist_free(spa->spa_feat_stats); spa_config_set(spa, NULL); refcount_destroy(&spa->spa_refcount); spa_stats_destroy(spa); spa_config_lock_destroy(spa); for (t = 0; t < TXG_SIZE; t++) bplist_destroy(&spa->spa_free_bplist[t]); zio_checksum_templates_free(spa); cv_destroy(&spa->spa_async_cv); cv_destroy(&spa->spa_evicting_os_cv); cv_destroy(&spa->spa_proc_cv); cv_destroy(&spa->spa_scrub_io_cv); cv_destroy(&spa->spa_suspend_cv); mutex_destroy(&spa->spa_alloc_lock); mutex_destroy(&spa->spa_async_lock); mutex_destroy(&spa->spa_errlist_lock); mutex_destroy(&spa->spa_errlog_lock); mutex_destroy(&spa->spa_evicting_os_lock); mutex_destroy(&spa->spa_history_lock); mutex_destroy(&spa->spa_proc_lock); mutex_destroy(&spa->spa_props_lock); mutex_destroy(&spa->spa_cksum_tmpls_lock); mutex_destroy(&spa->spa_scrub_lock); mutex_destroy(&spa->spa_suspend_lock); mutex_destroy(&spa->spa_vdev_top_lock); mutex_destroy(&spa->spa_feat_stats_lock); kmem_free(spa, sizeof (spa_t)); } /* * Given a pool, return the next pool in the namespace, or NULL if there is * none. If 'prev' is NULL, return the first pool. */ spa_t * spa_next(spa_t *prev) { ASSERT(MUTEX_HELD(&spa_namespace_lock)); if (prev) return (AVL_NEXT(&spa_namespace_avl, prev)); else return (avl_first(&spa_namespace_avl)); } /* * ========================================================================== * SPA refcount functions * ========================================================================== */ /* * Add a reference to the given spa_t. Must have at least one reference, or * have the namespace lock held. */ void spa_open_ref(spa_t *spa, void *tag) { ASSERT(refcount_count(&spa->spa_refcount) >= spa->spa_minref || MUTEX_HELD(&spa_namespace_lock)); (void) refcount_add(&spa->spa_refcount, tag); } /* * Remove a reference to the given spa_t. Must have at least one reference, or * have the namespace lock held. */ void spa_close(spa_t *spa, void *tag) { ASSERT(refcount_count(&spa->spa_refcount) > spa->spa_minref || MUTEX_HELD(&spa_namespace_lock)); (void) refcount_remove(&spa->spa_refcount, tag); } /* * Remove a reference to the given spa_t held by a dsl dir that is * being asynchronously released. Async releases occur from a taskq * performing eviction of dsl datasets and dirs. The namespace lock * isn't held and the hold by the object being evicted may contribute to * spa_minref (e.g. dataset or directory released during pool export), * so the asserts in spa_close() do not apply. */ void spa_async_close(spa_t *spa, void *tag) { (void) refcount_remove(&spa->spa_refcount, tag); } /* * Check to see if the spa refcount is zero. Must be called with * spa_namespace_lock held. We really compare against spa_minref, which is the * number of references acquired when opening a pool */ boolean_t spa_refcount_zero(spa_t *spa) { ASSERT(MUTEX_HELD(&spa_namespace_lock)); return (refcount_count(&spa->spa_refcount) == spa->spa_minref); } /* * ========================================================================== * SPA spare and l2cache tracking * ========================================================================== */ /* * Hot spares and cache devices are tracked using the same code below, * for 'auxiliary' devices. */ typedef struct spa_aux { uint64_t aux_guid; uint64_t aux_pool; avl_node_t aux_avl; int aux_count; } spa_aux_t; static inline int spa_aux_compare(const void *a, const void *b) { const spa_aux_t *sa = (const spa_aux_t *)a; const spa_aux_t *sb = (const spa_aux_t *)b; return (AVL_CMP(sa->aux_guid, sb->aux_guid)); } void spa_aux_add(vdev_t *vd, avl_tree_t *avl) { avl_index_t where; spa_aux_t search; spa_aux_t *aux; search.aux_guid = vd->vdev_guid; if ((aux = avl_find(avl, &search, &where)) != NULL) { aux->aux_count++; } else { aux = kmem_zalloc(sizeof (spa_aux_t), KM_SLEEP); aux->aux_guid = vd->vdev_guid; aux->aux_count = 1; avl_insert(avl, aux, where); } } void spa_aux_remove(vdev_t *vd, avl_tree_t *avl) { spa_aux_t search; spa_aux_t *aux; avl_index_t where; search.aux_guid = vd->vdev_guid; aux = avl_find(avl, &search, &where); ASSERT(aux != NULL); if (--aux->aux_count == 0) { avl_remove(avl, aux); kmem_free(aux, sizeof (spa_aux_t)); } else if (aux->aux_pool == spa_guid(vd->vdev_spa)) { aux->aux_pool = 0ULL; } } boolean_t spa_aux_exists(uint64_t guid, uint64_t *pool, int *refcnt, avl_tree_t *avl) { spa_aux_t search, *found; search.aux_guid = guid; found = avl_find(avl, &search, NULL); if (pool) { if (found) *pool = found->aux_pool; else *pool = 0ULL; } if (refcnt) { if (found) *refcnt = found->aux_count; else *refcnt = 0; } return (found != NULL); } void spa_aux_activate(vdev_t *vd, avl_tree_t *avl) { spa_aux_t search, *found; avl_index_t where; search.aux_guid = vd->vdev_guid; found = avl_find(avl, &search, &where); ASSERT(found != NULL); ASSERT(found->aux_pool == 0ULL); found->aux_pool = spa_guid(vd->vdev_spa); } /* * Spares are tracked globally due to the following constraints: * * - A spare may be part of multiple pools. * - A spare may be added to a pool even if it's actively in use within * another pool. * - A spare in use in any pool can only be the source of a replacement if * the target is a spare in the same pool. * * We keep track of all spares on the system through the use of a reference * counted AVL tree. When a vdev is added as a spare, or used as a replacement * spare, then we bump the reference count in the AVL tree. In addition, we set * the 'vdev_isspare' member to indicate that the device is a spare (active or * inactive). When a spare is made active (used to replace a device in the * pool), we also keep track of which pool its been made a part of. * * The 'spa_spare_lock' protects the AVL tree. These functions are normally * called under the spa_namespace lock as part of vdev reconfiguration. The * separate spare lock exists for the status query path, which does not need to * be completely consistent with respect to other vdev configuration changes. */ static int spa_spare_compare(const void *a, const void *b) { return (spa_aux_compare(a, b)); } void spa_spare_add(vdev_t *vd) { mutex_enter(&spa_spare_lock); ASSERT(!vd->vdev_isspare); spa_aux_add(vd, &spa_spare_avl); vd->vdev_isspare = B_TRUE; mutex_exit(&spa_spare_lock); } void spa_spare_remove(vdev_t *vd) { mutex_enter(&spa_spare_lock); ASSERT(vd->vdev_isspare); spa_aux_remove(vd, &spa_spare_avl); vd->vdev_isspare = B_FALSE; mutex_exit(&spa_spare_lock); } boolean_t spa_spare_exists(uint64_t guid, uint64_t *pool, int *refcnt) { boolean_t found; mutex_enter(&spa_spare_lock); found = spa_aux_exists(guid, pool, refcnt, &spa_spare_avl); mutex_exit(&spa_spare_lock); return (found); } void spa_spare_activate(vdev_t *vd) { mutex_enter(&spa_spare_lock); ASSERT(vd->vdev_isspare); spa_aux_activate(vd, &spa_spare_avl); mutex_exit(&spa_spare_lock); } /* * Level 2 ARC devices are tracked globally for the same reasons as spares. * Cache devices currently only support one pool per cache device, and so * for these devices the aux reference count is currently unused beyond 1. */ static int spa_l2cache_compare(const void *a, const void *b) { return (spa_aux_compare(a, b)); } void spa_l2cache_add(vdev_t *vd) { mutex_enter(&spa_l2cache_lock); ASSERT(!vd->vdev_isl2cache); spa_aux_add(vd, &spa_l2cache_avl); vd->vdev_isl2cache = B_TRUE; mutex_exit(&spa_l2cache_lock); } void spa_l2cache_remove(vdev_t *vd) { mutex_enter(&spa_l2cache_lock); ASSERT(vd->vdev_isl2cache); spa_aux_remove(vd, &spa_l2cache_avl); vd->vdev_isl2cache = B_FALSE; mutex_exit(&spa_l2cache_lock); } boolean_t spa_l2cache_exists(uint64_t guid, uint64_t *pool) { boolean_t found; mutex_enter(&spa_l2cache_lock); found = spa_aux_exists(guid, pool, NULL, &spa_l2cache_avl); mutex_exit(&spa_l2cache_lock); return (found); } void spa_l2cache_activate(vdev_t *vd) { mutex_enter(&spa_l2cache_lock); ASSERT(vd->vdev_isl2cache); spa_aux_activate(vd, &spa_l2cache_avl); mutex_exit(&spa_l2cache_lock); } /* * ========================================================================== * SPA vdev locking * ========================================================================== */ /* * Lock the given spa_t for the purpose of adding or removing a vdev. * Grabs the global spa_namespace_lock plus the spa config lock for writing. * It returns the next transaction group for the spa_t. */ uint64_t spa_vdev_enter(spa_t *spa) { mutex_enter(&spa->spa_vdev_top_lock); mutex_enter(&spa_namespace_lock); return (spa_vdev_config_enter(spa)); } /* * Internal implementation for spa_vdev_enter(). Used when a vdev * operation requires multiple syncs (i.e. removing a device) while * keeping the spa_namespace_lock held. */ uint64_t spa_vdev_config_enter(spa_t *spa) { ASSERT(MUTEX_HELD(&spa_namespace_lock)); spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); return (spa_last_synced_txg(spa) + 1); } /* * Used in combination with spa_vdev_config_enter() to allow the syncing * of multiple transactions without releasing the spa_namespace_lock. */ void spa_vdev_config_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error, char *tag) { int config_changed = B_FALSE; ASSERT(MUTEX_HELD(&spa_namespace_lock)); ASSERT(txg > spa_last_synced_txg(spa)); spa->spa_pending_vdev = NULL; /* * Reassess the DTLs. */ vdev_dtl_reassess(spa->spa_root_vdev, 0, 0, B_FALSE); if (error == 0 && !list_is_empty(&spa->spa_config_dirty_list)) { config_changed = B_TRUE; spa->spa_config_generation++; } /* * Verify the metaslab classes. */ ASSERT(metaslab_class_validate(spa_normal_class(spa)) == 0); ASSERT(metaslab_class_validate(spa_log_class(spa)) == 0); spa_config_exit(spa, SCL_ALL, spa); /* * Panic the system if the specified tag requires it. This * is useful for ensuring that configurations are updated * transactionally. */ if (zio_injection_enabled) zio_handle_panic_injection(spa, tag, 0); /* * Note: this txg_wait_synced() is important because it ensures * that there won't be more than one config change per txg. * This allows us to use the txg as the generation number. */ if (error == 0) txg_wait_synced(spa->spa_dsl_pool, txg); if (vd != NULL) { ASSERT(!vd->vdev_detached || vd->vdev_dtl_sm == NULL); spa_config_enter(spa, SCL_ALL, spa, RW_WRITER); vdev_free(vd); spa_config_exit(spa, SCL_ALL, spa); } /* * If the config changed, update the config cache. */ if (config_changed) spa_config_sync(spa, B_FALSE, B_TRUE); } /* * Unlock the spa_t after adding or removing a vdev. Besides undoing the * locking of spa_vdev_enter(), we also want make sure the transactions have * synced to disk, and then update the global configuration cache with the new * information. */ int spa_vdev_exit(spa_t *spa, vdev_t *vd, uint64_t txg, int error) { spa_vdev_config_exit(spa, vd, txg, error, FTAG); mutex_exit(&spa_namespace_lock); mutex_exit(&spa->spa_vdev_top_lock); return (error); } /* * Lock the given spa_t for the purpose of changing vdev state. */ void spa_vdev_state_enter(spa_t *spa, int oplocks) { int locks = SCL_STATE_ALL | oplocks; /* * Root pools may need to read of the underlying devfs filesystem * when opening up a vdev. Unfortunately if we're holding the * SCL_ZIO lock it will result in a deadlock when we try to issue * the read from the root filesystem. Instead we "prefetch" * the associated vnodes that we need prior to opening the * underlying devices and cache them so that we can prevent * any I/O when we are doing the actual open. */ if (spa_is_root(spa)) { int low = locks & ~(SCL_ZIO - 1); int high = locks & ~low; spa_config_enter(spa, high, spa, RW_WRITER); vdev_hold(spa->spa_root_vdev); spa_config_enter(spa, low, spa, RW_WRITER); } else { spa_config_enter(spa, locks, spa, RW_WRITER); } spa->spa_vdev_locks = locks; } int spa_vdev_state_exit(spa_t *spa, vdev_t *vd, int error) { boolean_t config_changed = B_FALSE; if (vd != NULL || error == 0) vdev_dtl_reassess(vd ? vd->vdev_top : spa->spa_root_vdev, 0, 0, B_FALSE); if (vd != NULL) { vdev_state_dirty(vd->vdev_top); config_changed = B_TRUE; spa->spa_config_generation++; } if (spa_is_root(spa)) vdev_rele(spa->spa_root_vdev); ASSERT3U(spa->spa_vdev_locks, >=, SCL_STATE_ALL); spa_config_exit(spa, spa->spa_vdev_locks, spa); /* * If anything changed, wait for it to sync. This ensures that, * from the system administrator's perspective, zpool(1M) commands * are synchronous. This is important for things like zpool offline: * when the command completes, you expect no further I/O from ZFS. */ if (vd != NULL) txg_wait_synced(spa->spa_dsl_pool, 0); /* * If the config changed, update the config cache. */ if (config_changed) { mutex_enter(&spa_namespace_lock); spa_config_sync(spa, B_FALSE, B_TRUE); mutex_exit(&spa_namespace_lock); } return (error); } /* * ========================================================================== * Miscellaneous functions * ========================================================================== */ void spa_activate_mos_feature(spa_t *spa, const char *feature, dmu_tx_t *tx) { if (!nvlist_exists(spa->spa_label_features, feature)) { fnvlist_add_boolean(spa->spa_label_features, feature); /* * When we are creating the pool (tx_txg==TXG_INITIAL), we can't * dirty the vdev config because lock SCL_CONFIG is not held. * Thankfully, in this case we don't need to dirty the config * because it will be written out anyway when we finish * creating the pool. */ if (tx->tx_txg != TXG_INITIAL) vdev_config_dirty(spa->spa_root_vdev); } } void spa_deactivate_mos_feature(spa_t *spa, const char *feature) { if (nvlist_remove_all(spa->spa_label_features, feature) == 0) vdev_config_dirty(spa->spa_root_vdev); } /* * Rename a spa_t. */ int spa_rename(const char *name, const char *newname) { spa_t *spa; int err; /* * Lookup the spa_t and grab the config lock for writing. We need to * actually open the pool so that we can sync out the necessary labels. * It's OK to call spa_open() with the namespace lock held because we * allow recursive calls for other reasons. */ mutex_enter(&spa_namespace_lock); if ((err = spa_open(name, &spa, FTAG)) != 0) { mutex_exit(&spa_namespace_lock); return (err); } spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); avl_remove(&spa_namespace_avl, spa); (void) strlcpy(spa->spa_name, newname, sizeof (spa->spa_name)); avl_add(&spa_namespace_avl, spa); /* * Sync all labels to disk with the new names by marking the root vdev * dirty and waiting for it to sync. It will pick up the new pool name * during the sync. */ vdev_config_dirty(spa->spa_root_vdev); spa_config_exit(spa, SCL_ALL, FTAG); txg_wait_synced(spa->spa_dsl_pool, 0); /* * Sync the updated config cache. */ spa_config_sync(spa, B_FALSE, B_TRUE); spa_close(spa, FTAG); mutex_exit(&spa_namespace_lock); return (0); } /* * Return the spa_t associated with given pool_guid, if it exists. If * device_guid is non-zero, determine whether the pool exists *and* contains * a device with the specified device_guid. */ spa_t * spa_by_guid(uint64_t pool_guid, uint64_t device_guid) { spa_t *spa; avl_tree_t *t = &spa_namespace_avl; ASSERT(MUTEX_HELD(&spa_namespace_lock)); for (spa = avl_first(t); spa != NULL; spa = AVL_NEXT(t, spa)) { if (spa->spa_state == POOL_STATE_UNINITIALIZED) continue; if (spa->spa_root_vdev == NULL) continue; if (spa_guid(spa) == pool_guid) { if (device_guid == 0) break; if (vdev_lookup_by_guid(spa->spa_root_vdev, device_guid) != NULL) break; /* * Check any devices we may be in the process of adding. */ if (spa->spa_pending_vdev) { if (vdev_lookup_by_guid(spa->spa_pending_vdev, device_guid) != NULL) break; } } } return (spa); } /* * Determine whether a pool with the given pool_guid exists. */ boolean_t spa_guid_exists(uint64_t pool_guid, uint64_t device_guid) { return (spa_by_guid(pool_guid, device_guid) != NULL); } char * spa_strdup(const char *s) { size_t len; char *new; len = strlen(s); new = kmem_alloc(len + 1, KM_SLEEP); bcopy(s, new, len); new[len] = '\0'; return (new); } void spa_strfree(char *s) { kmem_free(s, strlen(s) + 1); } uint64_t spa_get_random(uint64_t range) { uint64_t r; ASSERT(range != 0); (void) random_get_pseudo_bytes((void *)&r, sizeof (uint64_t)); return (r % range); } uint64_t spa_generate_guid(spa_t *spa) { uint64_t guid = spa_get_random(-1ULL); if (spa != NULL) { while (guid == 0 || spa_guid_exists(spa_guid(spa), guid)) guid = spa_get_random(-1ULL); } else { while (guid == 0 || spa_guid_exists(guid, 0)) guid = spa_get_random(-1ULL); } return (guid); } void snprintf_blkptr(char *buf, size_t buflen, const blkptr_t *bp) { char type[256]; char *checksum = NULL; char *compress = NULL; if (bp != NULL) { if (BP_GET_TYPE(bp) & DMU_OT_NEWTYPE) { dmu_object_byteswap_t bswap = DMU_OT_BYTESWAP(BP_GET_TYPE(bp)); (void) snprintf(type, sizeof (type), "bswap %s %s", DMU_OT_IS_METADATA(BP_GET_TYPE(bp)) ? "metadata" : "data", dmu_ot_byteswap[bswap].ob_name); } else { (void) strlcpy(type, dmu_ot[BP_GET_TYPE(bp)].ot_name, sizeof (type)); } if (!BP_IS_EMBEDDED(bp)) { checksum = zio_checksum_table[BP_GET_CHECKSUM(bp)].ci_name; } compress = zio_compress_table[BP_GET_COMPRESS(bp)].ci_name; } SNPRINTF_BLKPTR(snprintf, ' ', buf, buflen, bp, type, checksum, compress); } void spa_freeze(spa_t *spa) { uint64_t freeze_txg = 0; spa_config_enter(spa, SCL_ALL, FTAG, RW_WRITER); if (spa->spa_freeze_txg == UINT64_MAX) { freeze_txg = spa_last_synced_txg(spa) + TXG_SIZE; spa->spa_freeze_txg = freeze_txg; } spa_config_exit(spa, SCL_ALL, FTAG); if (freeze_txg != 0) txg_wait_synced(spa_get_dsl(spa), freeze_txg); } void zfs_panic_recover(const char *fmt, ...) { va_list adx; va_start(adx, fmt); vcmn_err(zfs_recover ? CE_WARN : CE_PANIC, fmt, adx); va_end(adx); } /* * This is a stripped-down version of strtoull, suitable only for converting * lowercase hexadecimal numbers that don't overflow. */ uint64_t strtonum(const char *str, char **nptr) { uint64_t val = 0; char c; int digit; while ((c = *str) != '\0') { if (c >= '0' && c <= '9') digit = c - '0'; else if (c >= 'a' && c <= 'f') digit = 10 + c - 'a'; else break; val *= 16; val += digit; str++; } if (nptr) *nptr = (char *)str; return (val); } /* * ========================================================================== * Accessor functions * ========================================================================== */ boolean_t spa_shutting_down(spa_t *spa) { return (spa->spa_async_suspended); } dsl_pool_t * spa_get_dsl(spa_t *spa) { return (spa->spa_dsl_pool); } boolean_t spa_is_initializing(spa_t *spa) { return (spa->spa_is_initializing); } blkptr_t * spa_get_rootblkptr(spa_t *spa) { return (&spa->spa_ubsync.ub_rootbp); } void spa_set_rootblkptr(spa_t *spa, const blkptr_t *bp) { spa->spa_uberblock.ub_rootbp = *bp; } void spa_altroot(spa_t *spa, char *buf, size_t buflen) { if (spa->spa_root == NULL) buf[0] = '\0'; else (void) strncpy(buf, spa->spa_root, buflen); } int spa_sync_pass(spa_t *spa) { return (spa->spa_sync_pass); } char * spa_name(spa_t *spa) { return (spa->spa_name); } uint64_t spa_guid(spa_t *spa) { dsl_pool_t *dp = spa_get_dsl(spa); uint64_t guid; /* * If we fail to parse the config during spa_load(), we can go through * the error path (which posts an ereport) and end up here with no root * vdev. We stash the original pool guid in 'spa_config_guid' to handle * this case. */ if (spa->spa_root_vdev == NULL) return (spa->spa_config_guid); guid = spa->spa_last_synced_guid != 0 ? spa->spa_last_synced_guid : spa->spa_root_vdev->vdev_guid; /* * Return the most recently synced out guid unless we're * in syncing context. */ if (dp && dsl_pool_sync_context(dp)) return (spa->spa_root_vdev->vdev_guid); else return (guid); } uint64_t spa_load_guid(spa_t *spa) { /* * This is a GUID that exists solely as a reference for the * purposes of the arc. It is generated at load time, and * is never written to persistent storage. */ return (spa->spa_load_guid); } uint64_t spa_last_synced_txg(spa_t *spa) { return (spa->spa_ubsync.ub_txg); } uint64_t spa_first_txg(spa_t *spa) { return (spa->spa_first_txg); } uint64_t spa_syncing_txg(spa_t *spa) { return (spa->spa_syncing_txg); } pool_state_t spa_state(spa_t *spa) { return (spa->spa_state); } spa_load_state_t spa_load_state(spa_t *spa) { return (spa->spa_load_state); } uint64_t spa_freeze_txg(spa_t *spa) { return (spa->spa_freeze_txg); } /* ARGSUSED */ uint64_t spa_get_asize(spa_t *spa, uint64_t lsize) { return (lsize * spa_asize_inflation); } /* * Return the amount of slop space in bytes. It is 1/32 of the pool (3.2%), - * or at least 32MB. + * or at least 128MB, unless that would cause it to be more than half the + * pool size. * * See the comment above spa_slop_shift for details. */ uint64_t spa_get_slop_space(spa_t *spa) { uint64_t space = spa_get_dspace(spa); - return (MAX(space >> spa_slop_shift, SPA_MINDEVSIZE >> 1)); + return (MAX(space >> spa_slop_shift, MIN(space >> 1, spa_min_slop))); } uint64_t spa_get_dspace(spa_t *spa) { return (spa->spa_dspace); } void spa_update_dspace(spa_t *spa) { spa->spa_dspace = metaslab_class_get_dspace(spa_normal_class(spa)) + ddt_get_dedup_dspace(spa); } /* * Return the failure mode that has been set to this pool. The default * behavior will be to block all I/Os when a complete failure occurs. */ uint8_t spa_get_failmode(spa_t *spa) { return (spa->spa_failmode); } boolean_t spa_suspended(spa_t *spa) { return (spa->spa_suspended); } uint64_t spa_version(spa_t *spa) { return (spa->spa_ubsync.ub_version); } boolean_t spa_deflate(spa_t *spa) { return (spa->spa_deflate); } metaslab_class_t * spa_normal_class(spa_t *spa) { return (spa->spa_normal_class); } metaslab_class_t * spa_log_class(spa_t *spa) { return (spa->spa_log_class); } void spa_evicting_os_register(spa_t *spa, objset_t *os) { mutex_enter(&spa->spa_evicting_os_lock); list_insert_head(&spa->spa_evicting_os_list, os); mutex_exit(&spa->spa_evicting_os_lock); } void spa_evicting_os_deregister(spa_t *spa, objset_t *os) { mutex_enter(&spa->spa_evicting_os_lock); list_remove(&spa->spa_evicting_os_list, os); cv_broadcast(&spa->spa_evicting_os_cv); mutex_exit(&spa->spa_evicting_os_lock); } void spa_evicting_os_wait(spa_t *spa) { mutex_enter(&spa->spa_evicting_os_lock); while (!list_is_empty(&spa->spa_evicting_os_list)) cv_wait(&spa->spa_evicting_os_cv, &spa->spa_evicting_os_lock); mutex_exit(&spa->spa_evicting_os_lock); dmu_buf_user_evict_wait(); } int spa_max_replication(spa_t *spa) { /* * As of SPA_VERSION == SPA_VERSION_DITTO_BLOCKS, we are able to * handle BPs with more than one DVA allocated. Set our max * replication level accordingly. */ if (spa_version(spa) < SPA_VERSION_DITTO_BLOCKS) return (1); return (MIN(SPA_DVAS_PER_BP, spa_max_replication_override)); } int spa_prev_software_version(spa_t *spa) { return (spa->spa_prev_software_version); } uint64_t spa_deadman_synctime(spa_t *spa) { return (spa->spa_deadman_synctime); } uint64_t dva_get_dsize_sync(spa_t *spa, const dva_t *dva) { uint64_t asize = DVA_GET_ASIZE(dva); uint64_t dsize = asize; ASSERT(spa_config_held(spa, SCL_ALL, RW_READER) != 0); if (asize != 0 && spa->spa_deflate) { vdev_t *vd = vdev_lookup_top(spa, DVA_GET_VDEV(dva)); if (vd != NULL) dsize = (asize >> SPA_MINBLOCKSHIFT) * vd->vdev_deflate_ratio; } return (dsize); } uint64_t bp_get_dsize_sync(spa_t *spa, const blkptr_t *bp) { uint64_t dsize = 0; int d; for (d = 0; d < BP_GET_NDVAS(bp); d++) dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); return (dsize); } uint64_t bp_get_dsize(spa_t *spa, const blkptr_t *bp) { uint64_t dsize = 0; int d; spa_config_enter(spa, SCL_VDEV, FTAG, RW_READER); for (d = 0; d < BP_GET_NDVAS(bp); d++) dsize += dva_get_dsize_sync(spa, &bp->blk_dva[d]); spa_config_exit(spa, SCL_VDEV, FTAG); return (dsize); } /* * ========================================================================== * Initialization and Termination * ========================================================================== */ static int spa_name_compare(const void *a1, const void *a2) { const spa_t *s1 = a1; const spa_t *s2 = a2; int s; s = strcmp(s1->spa_name, s2->spa_name); return (AVL_ISIGN(s)); } void spa_boot_init(void) { spa_config_load(); } void spa_init(int mode) { mutex_init(&spa_namespace_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa_spare_lock, NULL, MUTEX_DEFAULT, NULL); mutex_init(&spa_l2cache_lock, NULL, MUTEX_DEFAULT, NULL); cv_init(&spa_namespace_cv, NULL, CV_DEFAULT, NULL); avl_create(&spa_namespace_avl, spa_name_compare, sizeof (spa_t), offsetof(spa_t, spa_avl)); avl_create(&spa_spare_avl, spa_spare_compare, sizeof (spa_aux_t), offsetof(spa_aux_t, aux_avl)); avl_create(&spa_l2cache_avl, spa_l2cache_compare, sizeof (spa_aux_t), offsetof(spa_aux_t, aux_avl)); spa_mode_global = mode; #ifndef _KERNEL if (spa_mode_global != FREAD && dprintf_find_string("watch")) { struct sigaction sa; sa.sa_flags = SA_SIGINFO; sigemptyset(&sa.sa_mask); sa.sa_sigaction = arc_buf_sigsegv; if (sigaction(SIGSEGV, &sa, NULL) == -1) { perror("could not enable watchpoints: " "sigaction(SIGSEGV, ...) = "); } else { arc_watch = B_TRUE; } } #endif fm_init(); refcount_init(); unique_init(); range_tree_init(); metaslab_alloc_trace_init(); ddt_init(); zio_init(); dmu_init(); zil_init(); vdev_cache_stat_init(); vdev_raidz_math_init(); vdev_file_init(); zfs_prop_init(); zpool_prop_init(); zpool_feature_init(); spa_config_load(); l2arc_start(); } void spa_fini(void) { l2arc_stop(); spa_evict_all(); vdev_file_fini(); vdev_cache_stat_fini(); vdev_raidz_math_fini(); zil_fini(); dmu_fini(); zio_fini(); ddt_fini(); metaslab_alloc_trace_fini(); range_tree_fini(); unique_fini(); refcount_fini(); fm_fini(); avl_destroy(&spa_namespace_avl); avl_destroy(&spa_spare_avl); avl_destroy(&spa_l2cache_avl); cv_destroy(&spa_namespace_cv); mutex_destroy(&spa_namespace_lock); mutex_destroy(&spa_spare_lock); mutex_destroy(&spa_l2cache_lock); } /* * Return whether this pool has slogs. No locking needed. * It's not a problem if the wrong answer is returned as it's only for * performance and not correctness */ boolean_t spa_has_slogs(spa_t *spa) { return (spa->spa_log_class->mc_rotor != NULL); } spa_log_state_t spa_get_log_state(spa_t *spa) { return (spa->spa_log_state); } void spa_set_log_state(spa_t *spa, spa_log_state_t state) { spa->spa_log_state = state; } boolean_t spa_is_root(spa_t *spa) { return (spa->spa_is_root); } boolean_t spa_writeable(spa_t *spa) { return (!!(spa->spa_mode & FWRITE)); } /* * Returns true if there is a pending sync task in any of the current * syncing txg, the current quiescing txg, or the current open txg. */ boolean_t spa_has_pending_synctask(spa_t *spa) { return (!txg_all_lists_empty(&spa->spa_dsl_pool->dp_sync_tasks)); } int spa_mode(spa_t *spa) { return (spa->spa_mode); } uint64_t spa_bootfs(spa_t *spa) { return (spa->spa_bootfs); } uint64_t spa_delegation(spa_t *spa) { return (spa->spa_delegation); } objset_t * spa_meta_objset(spa_t *spa) { return (spa->spa_meta_objset); } enum zio_checksum spa_dedup_checksum(spa_t *spa) { return (spa->spa_dedup_checksum); } /* * Reset pool scan stat per scan pass (or reboot). */ void spa_scan_stat_init(spa_t *spa) { /* data not stored on disk */ spa->spa_scan_pass_start = gethrestime_sec(); spa->spa_scan_pass_exam = 0; vdev_scan_stat_init(spa->spa_root_vdev); } /* * Get scan stats for zpool status reports */ int spa_scan_get_stats(spa_t *spa, pool_scan_stat_t *ps) { dsl_scan_t *scn = spa->spa_dsl_pool ? spa->spa_dsl_pool->dp_scan : NULL; if (scn == NULL || scn->scn_phys.scn_func == POOL_SCAN_NONE) return (SET_ERROR(ENOENT)); bzero(ps, sizeof (pool_scan_stat_t)); /* data stored on disk */ ps->pss_func = scn->scn_phys.scn_func; ps->pss_start_time = scn->scn_phys.scn_start_time; ps->pss_end_time = scn->scn_phys.scn_end_time; ps->pss_to_examine = scn->scn_phys.scn_to_examine; ps->pss_examined = scn->scn_phys.scn_examined; ps->pss_to_process = scn->scn_phys.scn_to_process; ps->pss_processed = scn->scn_phys.scn_processed; ps->pss_errors = scn->scn_phys.scn_errors; ps->pss_state = scn->scn_phys.scn_state; /* data not stored on disk */ ps->pss_pass_start = spa->spa_scan_pass_start; ps->pss_pass_exam = spa->spa_scan_pass_exam; return (0); } boolean_t spa_debug_enabled(spa_t *spa) { return (spa->spa_debug); } int spa_maxblocksize(spa_t *spa) { if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_BLOCKS)) return (SPA_MAXBLOCKSIZE); else return (SPA_OLD_MAXBLOCKSIZE); } int spa_maxdnodesize(spa_t *spa) { if (spa_feature_is_enabled(spa, SPA_FEATURE_LARGE_DNODE)) return (DNODE_MAX_SIZE); else return (DNODE_MIN_SIZE); } #if defined(_KERNEL) && defined(HAVE_SPL) /* Namespace manipulation */ EXPORT_SYMBOL(spa_lookup); EXPORT_SYMBOL(spa_add); EXPORT_SYMBOL(spa_remove); EXPORT_SYMBOL(spa_next); /* Refcount functions */ EXPORT_SYMBOL(spa_open_ref); EXPORT_SYMBOL(spa_close); EXPORT_SYMBOL(spa_refcount_zero); /* Pool configuration lock */ EXPORT_SYMBOL(spa_config_tryenter); EXPORT_SYMBOL(spa_config_enter); EXPORT_SYMBOL(spa_config_exit); EXPORT_SYMBOL(spa_config_held); /* Pool vdev add/remove lock */ EXPORT_SYMBOL(spa_vdev_enter); EXPORT_SYMBOL(spa_vdev_exit); /* Pool vdev state change lock */ EXPORT_SYMBOL(spa_vdev_state_enter); EXPORT_SYMBOL(spa_vdev_state_exit); /* Accessor functions */ EXPORT_SYMBOL(spa_shutting_down); EXPORT_SYMBOL(spa_get_dsl); EXPORT_SYMBOL(spa_get_rootblkptr); EXPORT_SYMBOL(spa_set_rootblkptr); EXPORT_SYMBOL(spa_altroot); EXPORT_SYMBOL(spa_sync_pass); EXPORT_SYMBOL(spa_name); EXPORT_SYMBOL(spa_guid); EXPORT_SYMBOL(spa_last_synced_txg); EXPORT_SYMBOL(spa_first_txg); EXPORT_SYMBOL(spa_syncing_txg); EXPORT_SYMBOL(spa_version); EXPORT_SYMBOL(spa_state); EXPORT_SYMBOL(spa_load_state); EXPORT_SYMBOL(spa_freeze_txg); EXPORT_SYMBOL(spa_get_asize); EXPORT_SYMBOL(spa_get_dspace); EXPORT_SYMBOL(spa_update_dspace); EXPORT_SYMBOL(spa_deflate); EXPORT_SYMBOL(spa_normal_class); EXPORT_SYMBOL(spa_log_class); EXPORT_SYMBOL(spa_max_replication); EXPORT_SYMBOL(spa_prev_software_version); EXPORT_SYMBOL(spa_get_failmode); EXPORT_SYMBOL(spa_suspended); EXPORT_SYMBOL(spa_bootfs); EXPORT_SYMBOL(spa_delegation); EXPORT_SYMBOL(spa_meta_objset); EXPORT_SYMBOL(spa_maxblocksize); EXPORT_SYMBOL(spa_maxdnodesize); /* Miscellaneous support routines */ EXPORT_SYMBOL(spa_rename); EXPORT_SYMBOL(spa_guid_exists); EXPORT_SYMBOL(spa_strdup); EXPORT_SYMBOL(spa_strfree); EXPORT_SYMBOL(spa_get_random); EXPORT_SYMBOL(spa_generate_guid); EXPORT_SYMBOL(snprintf_blkptr); EXPORT_SYMBOL(spa_freeze); EXPORT_SYMBOL(spa_upgrade); EXPORT_SYMBOL(spa_evict_all); EXPORT_SYMBOL(spa_lookup_by_guid); EXPORT_SYMBOL(spa_has_spare); EXPORT_SYMBOL(dva_get_dsize_sync); EXPORT_SYMBOL(bp_get_dsize_sync); EXPORT_SYMBOL(bp_get_dsize); EXPORT_SYMBOL(spa_has_slogs); EXPORT_SYMBOL(spa_is_root); EXPORT_SYMBOL(spa_writeable); EXPORT_SYMBOL(spa_mode); EXPORT_SYMBOL(spa_namespace_lock); /* BEGIN CSTYLED */ module_param(zfs_flags, uint, 0644); MODULE_PARM_DESC(zfs_flags, "Set additional debugging flags"); module_param(zfs_recover, int, 0644); MODULE_PARM_DESC(zfs_recover, "Set to attempt to recover from fatal errors"); module_param(zfs_free_leak_on_eio, int, 0644); MODULE_PARM_DESC(zfs_free_leak_on_eio, "Set to ignore IO errors during free and permanently leak the space"); module_param(zfs_deadman_synctime_ms, ulong, 0644); MODULE_PARM_DESC(zfs_deadman_synctime_ms, "Expiration time in milliseconds"); module_param(zfs_deadman_checktime_ms, ulong, 0644); MODULE_PARM_DESC(zfs_deadman_checktime_ms, "Dead I/O check interval in milliseconds"); module_param(zfs_deadman_enabled, int, 0644); MODULE_PARM_DESC(zfs_deadman_enabled, "Enable deadman timer"); module_param(spa_asize_inflation, int, 0644); MODULE_PARM_DESC(spa_asize_inflation, "SPA size estimate multiplication factor"); module_param(spa_slop_shift, int, 0644); MODULE_PARM_DESC(spa_slop_shift, "Reserved free space in pool"); /* END CSTYLED */ #endif diff --git a/scripts/zconfig.sh b/scripts/zconfig.sh index 45b66447ff91..e78c80d542af 100755 --- a/scripts/zconfig.sh +++ b/scripts/zconfig.sh @@ -1,696 +1,696 @@ #!/bin/bash # # ZFS/ZPOOL configuration test script. basedir="$(dirname $0)" SCRIPT_COMMON=common.sh if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then . "${basedir}/${SCRIPT_COMMON}" else echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 fi PROG=zconfig.sh usage() { cat << EOF USAGE: $0 [hvcts] DESCRIPTION: ZFS/ZPOOL configuration tests OPTIONS: -h Show this message -v Verbose -c Cleanup lo+file devices at start -t <#> Run listed tests -s <#> Skip listed tests EOF } while getopts 'hvct:s:?' OPTION; do case $OPTION in h) usage exit 1 ;; v) VERBOSE=1 ;; c) CLEANUP=1 ;; t) TESTS_RUN=($OPTARG) ;; s) TESTS_SKIP=($OPTARG) ;; ?) usage exit ;; esac done if [ $(id -u) != 0 ]; then die "Must run as root" fi # Initialize the test suite init # Disable the udev rule 90-zfs.rules to prevent the zfs module # stack from being loaded due to the detection of a zfs device. # This is important because this test scripts require full control # over when and how the modules are loaded/unloaded. A trap is # set to ensure the udev rule is correctly replaced on exit. RULE=${udevruledir}/90-zfs.rules if test -e ${RULE}; then trap "mv ${RULE}.disabled ${RULE}" INT TERM EXIT mv ${RULE} ${RULE}.disabled fi # Perform pre-cleanup is requested if [ ${CLEANUP} ]; then ${ZFS_SH} -u cleanup_md_devices cleanup_loop_devices rm -f /tmp/zpool.cache.* fi # Check if we need to skip the tests that require scsi_debug and lsscsi. SCSI_DEBUG=0 ${INFOMOD} scsi_debug &>/dev/null && SCSI_DEBUG=1 HAVE_LSSCSI=0 test -f ${LSSCSI} && HAVE_LSSCSI=1 if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ]; then echo "Skipping test 10 which requires the scsi_debug " \ "module and the ${LSSCSI} utility" fi # Validate persistent zpool.cache configuration. test_1() { local POOL_NAME=test1 local TMP_FILE1=`mktemp` local TMP_FILE2=`mktemp` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool save its status for comparison. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3 # Unload/load the module stack and verify the pool persists. ${ZFS_SH} -u || fail 4 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 5 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 5 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7 # Cleanup the test pool and temporary files ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 8 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 9 ${ZFS_SH} -u || fail 10 pass } run_test 1 "persistent zpool.cache" # Validate ZFS disk scanning and import w/out zpool.cache configuration. test_2() { local POOL_NAME=test2 local TMP_FILE1=`mktemp` local TMP_FILE2=`mktemp` local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool save its status for comparison. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3 # Unload the module stack, remove the cache file, load the module # stack and attempt to probe the disks to import the pool. As # a cross check verify the old pool state against the imported. ${ZFS_SH} -u || fail 4 rm -f ${TMP_CACHE} || fail 5 ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 6 ${ZPOOL} import -d /dev ${POOL_NAME} || fail 8 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 9 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 10 # Cleanup the test pool and temporary files ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 11 rm -f ${TMP_FILE1} ${TMP_FILE2} || fail 12 ${ZFS_SH} -u || fail 13 pass } run_test 2 "scan disks for pools to import" zconfig_zvol_device_stat() { local EXPECT=$1 local POOL_NAME=/dev/zvol/$2 local ZVOL_NAME=/dev/zvol/$3 local SNAP_NAME=/dev/zvol/$4 local CLONE_NAME=/dev/zvol/$5 local COUNT=0 # Briefly delay for udev udev_trigger # Pool exists stat ${POOL_NAME} &>/dev/null && let COUNT=$COUNT+1 # Volume and partitions stat ${ZVOL_NAME} &>/dev/null && let COUNT=$COUNT+1 stat ${ZVOL_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1 stat ${ZVOL_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1 # Snapshot with partitions stat ${SNAP_NAME} &>/dev/null && let COUNT=$COUNT+1 stat ${SNAP_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1 stat ${SNAP_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1 # Clone with partitions stat ${CLONE_NAME} &>/dev/null && let COUNT=$COUNT+1 stat ${CLONE_NAME}-part1 &>/dev/null && let COUNT=$COUNT+1 stat ${CLONE_NAME}-part2 &>/dev/null && let COUNT=$COUNT+1 if [ $EXPECT -ne $COUNT ]; then return 1 fi return 0 } # zpool import/export device check # (1 volume, 2 partitions, 1 snapshot, 1 clone) test_3() { local POOL_NAME=tank local ZVOL_NAME=volume local SNAP_NAME=snap local CLONE_NAME=clone local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool, volume, partition, snapshot, and clone. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6 # Verify the devices were created zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7 # Export the pool ${ZPOOL} export ${POOL_NAME} || fail 8 # verify the devices were removed zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9 # Import the pool, wait 1 second for udev ${ZPOOL} import ${POOL_NAME} || fail 10 # Verify the devices were created zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11 # Toggle the snapdev and observe snapshot device links toggled ${ZFS} set snapdev=hidden ${FULL_ZVOL_NAME} || fail 12 zconfig_zvol_device_stat 7 ${POOL_NAME} ${FULL_ZVOL_NAME} \ "invalid" ${FULL_CLONE_NAME} || fail 13 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 14 zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 15 # Destroy the pool and consequently the devices ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16 # verify the devices were removed zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 17 ${ZFS_SH} -u || fail 18 rm -f ${TMP_CACHE} || fail 19 pass } run_test 3 "zpool import/export device" # zpool insmod/rmmod device check (1 volume, 1 snapshot, 1 clone) test_4() { POOL_NAME=tank ZVOL_NAME=volume SNAP_NAME=snap CLONE_NAME=clone FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool, volume, snapshot, and clone ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 100M ${FULL_ZVOL_NAME} || fail 3 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1% 50% || fail 4 partition /dev/zvol/${FULL_ZVOL_NAME} primary 51% -1 || fail 4 ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 5 ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 6 # Verify the devices were created zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 7 # Unload the modules ${ZFS_SH} -u || fail 8 # Verify the devices were removed zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 9 # Load the modules, list the pools to ensure they are opened ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 10 ${ZPOOL} import -c ${TMP_CACHE} ${POOL_NAME} || fail 10 ${ZPOOL} list &>/dev/null # Verify the devices were created zconfig_zvol_device_stat 10 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 11 # Destroy the pool and consequently the devices ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 12 # Verify the devices were removed zconfig_zvol_device_stat 0 ${POOL_NAME} ${FULL_ZVOL_NAME} \ ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 13 ${ZFS_SH} -u || fail 14 rm -f ${TMP_CACHE} || fail 15 pass } run_test 4 "zpool insmod/rmmod device" # ZVOL volume sanity check test_5() { local POOL_NAME=tank local ZVOL_NAME=fish local FULL_NAME=${POOL_NAME}/${ZVOL_NAME} local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool and volume. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2 ${ZFS} create -V 800M ${FULL_NAME} || fail 3 label /dev/zvol/${FULL_NAME} msdos || fail 4 partition /dev/zvol/${FULL_NAME} primary 1 -1 || fail 4 format /dev/zvol/${FULL_NAME}-part1 ext2 || fail 5 # Mount the ext2 filesystem and copy some data to it. mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6 mount /dev/zvol/${FULL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 || fail 7 cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 8 sync # Verify the copied files match the original files. diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \ &>/dev/null || fail 9 # Remove the files, umount, destroy the volume and pool. rm -Rf /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} || fail 10 umount /tmp/${ZVOL_NAME}-part1 || fail 11 rmdir /tmp/${ZVOL_NAME}-part1 || fail 12 ${ZFS} destroy ${FULL_NAME} || fail 13 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 14 ${ZFS_SH} -u || fail 15 rm -f ${TMP_CACHE} || fail 16 pass } run_test 5 "zvol+ext2 volume" # ZVOL snapshot sanity check test_6() { local POOL_NAME=tank local ZVOL_NAME=fish local SNAP_NAME=pristine local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool and volume. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raid0 || fail 2 - ${ZFS} create -V 800M ${FULL_ZVOL_NAME} || fail 3 + ${ZFS} create -s -V 800M ${FULL_ZVOL_NAME} || fail 3 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5 # Mount the ext2 filesystem and copy some data to it. mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 6 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 /tmp/${ZVOL_NAME}-part1 \ || fail 7 # Snapshot the pristine ext2 filesystem and mount it read-only. ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 8 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 8 mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9 mount /dev/zvol/${FULL_SNAP_NAME}-part1 /tmp/${SNAP_NAME}-part1 \ &>/dev/null || fail 10 # Copy to original volume cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11 sync # Verify the copied files match the original files, # and the copied files do NOT appear in the snapshot. diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \ &>/dev/null || fail 12 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \ &>/dev/null && fail 13 # umount, destroy the snapshot, volume, and pool. umount /tmp/${SNAP_NAME}-part1 || fail 14 rmdir /tmp/${SNAP_NAME}-part1 || fail 15 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 16 umount /tmp/${ZVOL_NAME}-part1 || fail 17 rmdir /tmp/${ZVOL_NAME}-part1 || fail 18 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 19 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 20 ${ZFS_SH} -u || fail 21 rm -f ${TMP_CACHE} || fail 22 pass } run_test 6 "zvol+ext2 snapshot" # ZVOL clone sanity check test_7() { local POOL_NAME=tank local ZVOL_NAME=fish local SNAP_NAME=pristine local CLONE_NAME=clone local FULL_ZVOL_NAME=${POOL_NAME}/${ZVOL_NAME} local FULL_SNAP_NAME=${POOL_NAME}/${ZVOL_NAME}@${SNAP_NAME} local FULL_CLONE_NAME=${POOL_NAME}/${CLONE_NAME} local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create a pool and volume. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 300M ${FULL_ZVOL_NAME} || fail 3 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME} || fail 3 label /dev/zvol/${FULL_ZVOL_NAME} msdos || fail 4 partition /dev/zvol/${FULL_ZVOL_NAME} primary 1 -1 || fail 4 format /dev/zvol/${FULL_ZVOL_NAME}-part1 ext2 || fail 5 # Snapshot the pristine ext2 filesystem. ${ZFS} snapshot ${FULL_SNAP_NAME} || fail 6 wait_udev /dev/zvol/${FULL_SNAP_NAME}-part1 30 || fail 7 # Mount the ext2 filesystem so some data can be copied to it. mkdir -p /tmp/${ZVOL_NAME}-part1 || fail 7 mount /dev/zvol/${FULL_ZVOL_NAME}-part1 \ /tmp/${ZVOL_NAME}-part1 || fail 8 # Mount the pristine ext2 snapshot. mkdir -p /tmp/${SNAP_NAME}-part1 || fail 9 mount /dev/zvol/${FULL_SNAP_NAME}-part1 \ /tmp/${SNAP_NAME}-part1 &>/dev/null || fail 10 # Copy to original volume. cp -RL ${SRC_DIR} /tmp/${ZVOL_NAME}-part1 || fail 11 sync # Verify the copied files match the original files, # and the copied files do NOT appear in the snapshot. diff -ur ${SRC_DIR} /tmp/${ZVOL_NAME}-part1/${SRC_DIR##*/} \ &>/dev/null || fail 12 diff -ur ${SRC_DIR} /tmp/${SNAP_NAME}-part1/${SRC_DIR##*/} \ &>/dev/null && fail 13 # Clone from the original pristine snapshot ${ZFS} clone ${FULL_SNAP_NAME} ${FULL_CLONE_NAME} || fail 14 wait_udev /dev/zvol/${FULL_CLONE_NAME}-part1 30 || fail 14 mkdir -p /tmp/${CLONE_NAME}-part1 || fail 15 mount /dev/zvol/${FULL_CLONE_NAME}-part1 \ /tmp/${CLONE_NAME}-part1 || fail 16 # Verify the clone matches the pristine snapshot, # and the files copied to the original volume are NOT there. diff -ur /tmp/${SNAP_NAME}-part1 /tmp/${CLONE_NAME}-part1 \ &>/dev/null || fail 17 diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \ &>/dev/null && fail 18 # Copy to cloned volume. cp -RL ${SRC_DIR} /tmp/${CLONE_NAME}-part1 || fail 19 sync # Verify the clone matches the modified original volume. diff -ur /tmp/${ZVOL_NAME}-part1 /tmp/${CLONE_NAME}-part1 \ &>/dev/null || fail 20 # umount, destroy the snapshot, volume, and pool. umount /tmp/${CLONE_NAME}-part1 || fail 21 rmdir /tmp/${CLONE_NAME}-part1 || fail 22 ${ZFS} destroy ${FULL_CLONE_NAME} || fail 23 umount /tmp/${SNAP_NAME}-part1 || fail 24 rmdir /tmp/${SNAP_NAME}-part1 || fail 25 ${ZFS} destroy ${FULL_SNAP_NAME} || fail 26 umount /tmp/${ZVOL_NAME}-part1 || fail 27 rmdir /tmp/${ZVOL_NAME}-part1 || fail 28 ${ZFS} destroy ${FULL_ZVOL_NAME} || fail 29 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 30 ${ZFS_SH} -u || fail 31 rm -f ${TMP_CACHE} || fail 32 pass } run_test 7 "zvol+ext2 clone" # Send/Receive sanity check test_8() { local POOL_NAME1=tank1 local POOL_NAME2=tank2 local ZVOL_NAME=fish local SNAP_NAME=snap local FULL_ZVOL_NAME1=${POOL_NAME1}/${ZVOL_NAME} local FULL_ZVOL_NAME2=${POOL_NAME2}/${ZVOL_NAME} local FULL_SNAP_NAME1=${POOL_NAME1}/${ZVOL_NAME}@${SNAP_NAME} local FULL_SNAP_NAME2=${POOL_NAME2}/${ZVOL_NAME}@${SNAP_NAME} local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` # Create two pools and a volume ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 || fail 2 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 || fail 2 ${ZFS} create -V 300M ${FULL_ZVOL_NAME1} || fail 3 ${ZFS} set snapdev=visible ${FULL_ZVOL_NAME1} || fail 3 label /dev/zvol/${FULL_ZVOL_NAME1} msdos || fail 4 partition /dev/zvol/${FULL_ZVOL_NAME1} primary 1 -1 || fail 4 format /dev/zvol/${FULL_ZVOL_NAME1}-part1 ext2 || fail 5 # Mount the ext2 filesystem and copy some data to it. mkdir -p /tmp/${FULL_ZVOL_NAME1}-part1 || fail 6 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \ /tmp/${FULL_ZVOL_NAME1}-part1 || fail 7 cp -RL ${SRC_DIR} /tmp/${FULL_ZVOL_NAME1}-part1 || fail 8 # Unmount, snapshot, mount the ext2 filesystem so it may be sent. # We only unmount to ensure the ext2 filesystem is clean. umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 9 ${ZFS} snapshot ${FULL_SNAP_NAME1} || fail 10 wait_udev /dev/zvol/${FULL_SNAP_NAME1} 30 || fail 10 mount /dev/zvol/${FULL_ZVOL_NAME1}-part1 \ /tmp/${FULL_ZVOL_NAME1}-part1 || 11 # Send/receive the snapshot from POOL_NAME1 to POOL_NAME2 (${ZFS} send ${FULL_SNAP_NAME1} | \ ${ZFS} receive ${FULL_ZVOL_NAME2}) || fail 12 wait_udev /dev/zvol/${FULL_ZVOL_NAME2}-part1 30 || fail 12 # Mount the sent ext2 filesystem. mkdir -p /tmp/${FULL_ZVOL_NAME2}-part1 || fail 13 mount /dev/zvol/${FULL_ZVOL_NAME2}-part1 \ /tmp/${FULL_ZVOL_NAME2}-part1 || fail 14 # Verify the contents of the volumes match diff -ur /tmp/${FULL_ZVOL_NAME1}-part1 /tmp/${FULL_ZVOL_NAME2}-part1 \ &>/dev/null || fail 15 # Umount, destroy the volume and pool. umount /tmp/${FULL_ZVOL_NAME1}-part1 || fail 16 umount /tmp/${FULL_ZVOL_NAME2}-part1 || fail 17 rmdir /tmp/${FULL_ZVOL_NAME1}-part1 || fail 18 rmdir /tmp/${FULL_ZVOL_NAME2}-part1 || fail 19 rmdir /tmp/${POOL_NAME1} || fail 20 rmdir /tmp/${POOL_NAME2} || fail 21 ${ZFS} destroy ${FULL_SNAP_NAME1} || fail 22 ${ZFS} destroy ${FULL_SNAP_NAME2} || fail 23 ${ZFS} destroy ${FULL_ZVOL_NAME1} || fail 24 ${ZFS} destroy ${FULL_ZVOL_NAME2} || fail 25 ${ZPOOL_CREATE_SH} -p ${POOL_NAME1} -c lo-raidz2 -d || fail 26 ${ZPOOL_CREATE_SH} -p ${POOL_NAME2} -c lo-raidz2 -d || fail 27 ${ZFS_SH} -u || fail 28 rm -f ${TMP_CACHE} || fail 29 pass } run_test 8 "zfs send/receive" # zpool event sanity check test_9() { local POOL_NAME=tank local ZVOL_NAME=fish local FULL_NAME=${POOL_NAME}/${ZVOL_NAME} local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_EVENTS=`mktemp -p /tmp zpool.events.XXXXXXXX` # Create a pool and volume. ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZFS} create -V 300M ${FULL_NAME} || fail 3 udev_trigger # Dump the events, there should be a pool create event ${ZPOOL} events >${TMP_EVENTS} || fail 4 MATCHES=`grep -c sysevent\.fs\.zfs\.pool_create ${TMP_EVENTS}` [ $MATCHES -eq 1 ] || fail 5 # Clear the events and ensure there are none. ${ZPOOL} events -c >/dev/null || fail 6 ${ZPOOL} events >${TMP_EVENTS} || fail 7 EVENTS=`wc -l ${TMP_EVENTS} | cut -f1 -d' '` [ $EVENTS -gt 1 ] && fail 8 ${ZFS} destroy ${FULL_NAME} || fail 9 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 10 ${ZFS_SH} -u || fail 11 rm -f ${TMP_CACHE} || fail 12 rm -f ${TMP_EVENTS} || fail 13 pass } run_test 9 "zpool events" zconfig_add_vdev() { local POOL_NAME=$1 local TYPE=$2 local DEVICE=$3 local TMP_FILE1=`mktemp` local TMP_FILE2=`mktemp` local TMP_FILE3=`mktemp` BASE_DEVICE=`basename ${DEVICE}` ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} ${ZPOOL} add -f ${POOL_NAME} ${TYPE} ${DEVICE} 2>/dev/null || return 1 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} diff ${TMP_FILE1} ${TMP_FILE2} > ${TMP_FILE3} [ `wc -l ${TMP_FILE3}|${AWK} '{print $1}'` -eq 3 ] || return 1 PARENT_VDEV=`tail -2 ${TMP_FILE3} | head -1 | ${AWK} '{print $NF}'` case $TYPE in cache) [ "${PARENT_VDEV}" = "${TYPE}" ] || return 1 ;; log) [ "${PARENT_VDEV}" = "logs" ] || return 1 ;; esac if ! tail -1 ${TMP_FILE3} | egrep -q "^>[[:space:]]+${BASE_DEVICE}[[:space:]]+ONLINE" ; then return 1 fi rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_FILE3} return 0 } # zpool add and remove sanity check test_10() { local POOL_NAME=tank local TMP_CACHE=`mktemp -p /tmp zpool.cache.XXXXXXXX` local TMP_FILE1=`mktemp` local TMP_FILE2=`mktemp` if [ ${SCSI_DEBUG} -eq 0 ] || [ ${HAVE_LSSCSI} -eq 0 ] ; then skip return fi test `${LSMOD} | grep -c scsi_debug` -gt 0 && \ (${RMMOD} scsi_debug || exit 1) /sbin/modprobe scsi_debug dev_size_mb=128 || die "Error $? creating scsi_debug device" udev_trigger SDDEVICE=`${LSSCSI}|${AWK} '/scsi_debug/ { print $6; exit }'` BASE_SDDEVICE=`basename $SDDEVICE` # Create a pool ${ZFS_SH} zfs="spa_config_path=${TMP_CACHE}" || fail 1 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 || fail 2 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE1} || fail 3 # Add and remove a cache vdev by full path zconfig_add_vdev ${POOL_NAME} cache ${SDDEVICE} || fail 4 ${ZPOOL} remove ${POOL_NAME} ${SDDEVICE} || fail 5 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 6 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 7 sleep 1 # Add and remove a cache vdev by shorthand path zconfig_add_vdev ${POOL_NAME} cache ${BASE_SDDEVICE} || fail 8 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 9 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 10 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 11 sleep 1 # Add and remove a log vdev zconfig_add_vdev ${POOL_NAME} log ${BASE_SDDEVICE} || fail 12 ${ZPOOL} remove ${POOL_NAME} ${BASE_SDDEVICE} || fail 13 ${ZPOOL} status ${POOL_NAME} >${TMP_FILE2} || fail 14 cmp ${TMP_FILE1} ${TMP_FILE2} || fail 15 ${ZPOOL_CREATE_SH} -p ${POOL_NAME} -c lo-raidz2 -d || fail 16 ${ZFS_SH} -u || fail 17 ${RMMOD} scsi_debug || fail 18 rm -f ${TMP_FILE1} ${TMP_FILE2} ${TMP_CACHE} || fail 19 pass } run_test 10 "zpool add/remove vdev" exit 0 diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh index 2e27c0a1c485..00a7ceabcca9 100755 --- a/scripts/zfs-tests.sh +++ b/scripts/zfs-tests.sh @@ -1,353 +1,353 @@ #!/bin/bash # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License, Version 1.0 only # (the "License"). You may not use this file except in compliance # with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # basedir="$(dirname $0)" SCRIPT_COMMON=common.sh if [ -f "${basedir}/${SCRIPT_COMMON}" ]; then . "${basedir}/${SCRIPT_COMMON}" else echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 fi . $STF_SUITE/include/default.cfg PROG=zfs-tests.sh SUDO=/usr/bin/sudo SETENFORCE=/usr/sbin/setenforce VERBOSE= QUIET= CLEANUP=1 CLEANUPALL=0 LOOPBACK=1 -FILESIZE="2G" +FILESIZE="4G" RUNFILE=${RUNFILE:-"linux.run"} FILEDIR=${FILEDIR:-/var/tmp} DISKS=${DISKS:-""} # # Attempt to remove loopback devices and files which where created earlier # by this script to run the test framework. The '-k' option may be passed # to the script to suppress cleanup for debugging purposes. # cleanup() { if [ $CLEANUP -eq 0 ]; then return 0 fi if [ $LOOPBACK -eq 1 ]; then for TEST_LOOPBACK in ${LOOPBACKS}; do LOOP_DEV=$(basename $TEST_LOOPBACK) DM_DEV=$(${SUDO} ${DMSETUP} ls 2>/dev/null | \ grep ${LOOP_DEV} | cut -f1) if [ -n "$DM_DEV" ]; then ${SUDO} ${DMSETUP} remove ${DM_DEV} || echo "Failed to remove: ${DM_DEV}" fi if [ -n "${TEST_LOOPBACK}" ]; then ${SUDO} ${LOSETUP} -d ${TEST_LOOPBACK} || echo "Failed to remove: ${TEST_LOOPBACK}" fi done fi for TEST_FILE in ${FILES}; do rm -f ${TEST_FILE} &>/dev/null done } trap cleanup EXIT # # Attempt to remove all testpools (testpool.XXX), unopened dm devices, # loopback devices, and files. This is a useful way to cleanup a previous # test run failure which has left the system in an unknown state. This can # be dangerous and should only be used in a dedicated test environment. # cleanup_all() { local TEST_POOLS=$(${SUDO} ${ZPOOL} list -H -o name | grep testpool) local TEST_LOOPBACKS=$(${SUDO} ${LOSETUP} -a|grep file-vdev|cut -f1 -d:) local TEST_FILES=$(ls /var/tmp/file-vdev* 2>/dev/null) msg msg "--- Cleanup ---" msg "Removing pool(s): $(echo ${TEST_POOLS} | tr '\n' ' ')" for TEST_POOL in $TEST_POOLS; do ${SUDO} ${ZPOOL} destroy ${TEST_POOL} done msg "Removing dm(s): $(${SUDO} ${DMSETUP} ls | grep loop | tr '\n' ' ')" ${SUDO} ${DMSETUP} remove_all msg "Removing loopback(s): $(echo ${TEST_LOOPBACKS} | tr '\n' ' ')" for TEST_LOOPBACK in $TEST_LOOPBACKS; do ${SUDO} ${LOSETUP} -d ${TEST_LOOPBACK} done msg "Removing files(s): $(echo ${TEST_FILES} | tr '\n' ' ')" for TEST_FILE in $TEST_FILES; do ${SUDO} rm -f ${TEST_FILE} done } # # Log a failure message, cleanup, and return an error. # fail() { echo -e "${PROG}: $1" >&2 cleanup exit 1 } # # Takes a name as the only arguments and looks for the following variations # on that name. If one is found it is returned. # # $RUNFILEDIR/ # $RUNFILEDIR/.run # # .run # find_runfile() { local NAME=$1 local RESULT="" if [ -f "$RUNFILEDIR/$NAME" ]; then RESULT="$RUNFILEDIR/$NAME" elif [ -f "$RUNFILEDIR/$NAME.run" ]; then RESULT="$RUNFILEDIR/$NAME.run" elif [ -f "$NAME" ]; then RESULT="$NAME" elif [ -f "$NAME.run" ]; then RESULT="$NAME.run" fi echo "$RESULT" } # # Output a useful usage message. # usage() { cat << EOF USAGE: $0 [hvqxkf] [-s SIZE] [-r RUNFILE] DESCRIPTION: ZFS Test Suite launch script OPTIONS: -h Show this message -v Verbose zfs-tests.sh output -q Quiet test-runner output -x Remove all testpools, dm, lo, and files (unsafe) -k Disable cleanup after test failure -f Use files only, disables block device tests -d DIR Use DIR for files and loopback devices - -s SIZE Use vdevs of SIZE (default: 2G) + -s SIZE Use vdevs of SIZE (default: 4G) -r RUNFILE Run tests in RUNFILE (default: linux.run) EXAMPLES: # Run the default (linux) suite of tests and output the configuration used. $0 -v # Run a smaller suite of tests designed to run more quickly. $0 -r linux-fast # Cleanup a previous run of the test suite prior to testing, run the # default (linux) suite of tests and perform no cleanup on exit. $0 -x EOF } while getopts 'hvqxkfd:s:r:?' OPTION; do case $OPTION in h) usage exit 1 ;; v) VERBOSE=1 ;; q) QUIET="-q" ;; x) CLEANUPALL=1 ;; k) CLEANUP=0 ;; f) LOOPBACK=0 ;; d) FILEDIR="$OPTARG" ;; s) FILESIZE="$OPTARG" ;; r) RUNFILE="$OPTARG" ;; ?) usage exit ;; esac done shift $((OPTIND-1)) FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"} LOOPBACKS=${LOOPBACKS:-""} # # Attempt to locate the runfile describing the test workload. # if [ -n "$RUNFILE" ]; then SAVED_RUNFILE="$RUNFILE" RUNFILE=$(find_runfile "$RUNFILE") [ -z "$RUNFILE" ] && fail "Cannot find runfile: $SAVED_RUNFILE" fi if [ ! -r "$RUNFILE" ]; then fail "Cannot read runfile: $RUNFILE" fi # # This script should not be run as root. Instead the test user, which may # be a normal user account, needs to be configured such that it can # run commands via sudo passwordlessly. # if [ $(id -u) = "0" ]; then fail "This script must not be run as root." fi if [ $(sudo whoami) != "root" ]; then fail "Passwordless sudo access required." fi # # Check if ksh exists # if [ -z "$(which ksh 2>/dev/null)" ]; then fail "This test suite requires ksh." fi # # Verify the ZFS module stack if loaded. # ${SUDO} ${ZFS_SH} &>/dev/null # # Attempt to cleanup all previous state for a new test run. # if [ $CLEANUPALL -ne 0 ]; then cleanup_all fi # # By default preserve any existing pools # if [ -z "${KEEP}" ]; then KEEP=$(${SUDO} ${ZPOOL} list -H -o name) if [ -z "${KEEP}" ]; then KEEP="rpool" fi fi __ZFS_POOL_EXCLUDE="$(echo $KEEP | sed ':a;N;s/\n/ /g;ba')" msg msg "--- Configuration ---" msg "Runfile: $RUNFILE" msg "STF_TOOLS: $STF_TOOLS" msg "STF_SUITE: $STF_SUITE" # # No DISKS have been provided so a basic file or loopback based devices # must be created for the test suite to use. # if [ -z "${DISKS}" ]; then # # Create sparse files for the test suite. These may be used # directory or have loopback devices layered on them. # for TEST_FILE in ${FILES}; do [ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}" truncate -s ${FILESIZE} ${TEST_FILE} || fail "Failed creating: ${TEST_FILE} ($?)" DISKS="$DISKS$TEST_FILE " done # # If requested setup loopback devices backed by the sparse files. # if [ $LOOPBACK -eq 1 ]; then DISKS="" check_loop_utils for TEST_FILE in ${FILES}; do TEST_LOOPBACK=$(${SUDO} ${LOSETUP} -f) ${SUDO} ${LOSETUP} ${TEST_LOOPBACK} ${TEST_FILE} || fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}" LOOPBACKS="${LOOPBACKS}${TEST_LOOPBACK} " DISKS="$DISKS$(basename $TEST_LOOPBACK) " done fi fi NUM_DISKS=$(echo ${DISKS} | $AWK '{print NF}') [ $NUM_DISKS -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)" # # Disable SELinux until the ZFS Test Suite has been updated accordingly. # if [ -x ${SETENFORCE} ]; then ${SUDO} ${SETENFORCE} permissive &>/dev/null fi msg "FILEDIR: $FILEDIR" msg "FILES: $FILES" msg "LOOPBACKS: $LOOPBACKS" msg "DISKS: $DISKS" msg "NUM_DISKS: $NUM_DISKS" msg "FILESIZE: $FILESIZE" msg "Keep pool(s): $KEEP" msg "" export STF_TOOLS export STF_SUITE export DISKS export KEEP export __ZFS_POOL_EXCLUDE msg "${TEST_RUNNER} ${QUIET} -c ${RUNFILE} -i ${STF_SUITE}" ${TEST_RUNNER} ${QUIET} -c ${RUNFILE} -i ${STF_SUITE} RESULT=$? echo exit ${RESULT} diff --git a/tests/runfiles/linux.run b/tests/runfiles/linux.run index 079bc1a81321..ec42b8e40032 100644 --- a/tests/runfiles/linux.run +++ b/tests/runfiles/linux.run @@ -1,667 +1,668 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # [DEFAULT] pre = setup quiet = False pre_user = root user = root timeout = 600 post_user = root post = cleanup outputdir = /var/tmp/test_results # DISABLED: update to use ZFS_ACL_* variables and user_run helper. # posix_001_pos # posix_002_pos [tests/functional/acl/posix] tests = ['posix_003_pos'] [tests/functional/atime] tests = ['atime_001_pos', 'atime_002_neg', 'atime_003_pos'] # DISABLED: # bootfs_006_pos - needs investigation # bootfs_008_neg - needs investigation [tests/functional/bootfs] tests = ['bootfs_001_pos', 'bootfs_002_neg', 'bootfs_003_pos', 'bootfs_004_neg', 'bootfs_005_neg', 'bootfs_007_neg'] # DISABLED: # cache_001_pos - needs investigation # cache_010_neg - needs investigation [tests/functional/cache] tests = ['cache_002_pos', 'cache_003_pos', 'cache_004_neg', 'cache_005_neg', 'cache_006_pos', 'cache_007_neg', 'cache_008_neg', 'cache_009_pos', 'cache_011_pos'] # DISABLED: needs investigation #[tests/functional/cachefile] #tests = ['cachefile_001_pos', 'cachefile_002_pos', 'cachefile_003_pos', # 'cachefile_004_pos'] #pre = #post = # DISABLED: needs investigation # 'sensitive_none_lookup', 'sensitive_none_delete', # 'sensitive_formd_lookup', 'sensitive_formd_delete', # 'insensitive_none_lookup', 'insensitive_none_delete', # 'insensitive_formd_lookup', 'insensitive_formd_delete', # 'mixed_none_lookup', 'mixed_none_lookup_ci', 'mixed_none_delete', # 'mixed_formd_lookup', 'mixed_formd_lookup_ci', 'mixed_formd_delete'] [tests/functional/casenorm] tests = ['case_all_values', 'norm_all_values'] [tests/functional/chattr] tests = ['chattr_001_pos', 'chattr_002_neg'] [tests/functional/checksum] tests = ['run_edonr_test', 'run_sha2_test', 'run_skein_test', 'filetest_001_pos'] [tests/functional/clean_mirror] tests = [ 'clean_mirror_001_pos', 'clean_mirror_002_pos', 'clean_mirror_003_pos', 'clean_mirror_004_pos'] [tests/functional/cli_root/zdb] tests = ['zdb_001_neg', 'zdb_002_pos'] pre = post = [tests/functional/cli_root/zfs] tests = ['zfs_001_neg', 'zfs_002_pos', 'zfs_003_neg'] # DISABLED: # zfs_clone_005_pos - busy unmount [tests/functional/cli_root/zfs_clone] tests = ['zfs_clone_001_neg', 'zfs_clone_002_pos', 'zfs_clone_003_pos', 'zfs_clone_004_pos', 'zfs_clone_006_pos', 'zfs_clone_007_pos', 'zfs_clone_008_neg', 'zfs_clone_009_neg', 'zfs_clone_010_pos'] # DISABLED: # zfs_copies_003_pos - https://github.com/zfsonlinux/zfs/issues/3484 # zfs_copies_005_neg - https://github.com/zfsonlinux/zfs/issues/3484 [tests/functional/cli_root/zfs_copies] tests = ['zfs_copies_001_pos', 'zfs_copies_002_pos', 'zfs_copies_004_neg', 'zfs_copies_006_pos'] [tests/functional/cli_root/zfs_create] tests = ['zfs_create_001_pos', 'zfs_create_002_pos', 'zfs_create_003_pos', 'zfs_create_004_pos', 'zfs_create_005_pos', 'zfs_create_006_pos', 'zfs_create_007_pos', 'zfs_create_008_neg', 'zfs_create_009_neg', 'zfs_create_010_neg', 'zfs_create_011_pos', 'zfs_create_012_pos', 'zfs_create_013_pos', 'zfs_create_014_pos'] # DISABLED: # zfs_destroy_005_neg - busy mountpoint behavior # zfs_destroy_001_pos - https://github.com/zfsonlinux/zfs/issues/5635 [tests/functional/cli_root/zfs_destroy] tests = ['zfs_destroy_002_pos', 'zfs_destroy_003_pos', 'zfs_destroy_004_pos','zfs_destroy_006_neg', 'zfs_destroy_007_neg', 'zfs_destroy_008_pos','zfs_destroy_009_pos', 'zfs_destroy_010_pos', 'zfs_destroy_011_pos','zfs_destroy_012_pos', 'zfs_destroy_013_neg', 'zfs_destroy_014_pos','zfs_destroy_015_pos', 'zfs_destroy_016_pos'] # DISABLED: # zfs_get_004_pos - https://github.com/zfsonlinux/zfs/issues/3484 # zfs_get_006_neg - needs investigation [tests/functional/cli_root/zfs_get] tests = ['zfs_get_001_pos', 'zfs_get_002_pos', 'zfs_get_003_pos', 'zfs_get_005_neg', 'zfs_get_007_neg', 'zfs_get_008_pos', 'zfs_get_009_pos', 'zfs_get_010_neg'] [tests/functional/cli_root/zfs_inherit] tests = ['zfs_inherit_001_neg', 'zfs_inherit_002_neg', 'zfs_inherit_003_pos'] # DISABLED: # zfs_mount_006_pos - https://github.com/zfsonlinux/zfs/issues/4990 # zfs_mount_007_pos - needs investigation # zfs_mount_009_neg - needs investigation # zfs_mount_all_001_pos - needs investigation [tests/functional/cli_root/zfs_mount] tests = ['zfs_mount_001_pos', 'zfs_mount_002_pos', 'zfs_mount_003_pos', 'zfs_mount_004_pos', 'zfs_mount_005_pos', 'zfs_mount_008_pos', 'zfs_mount_010_neg', 'zfs_mount_011_neg', 'zfs_mount_012_neg'] [tests/functional/cli_root/zfs_promote] tests = ['zfs_promote_001_pos', 'zfs_promote_002_pos', 'zfs_promote_003_pos', 'zfs_promote_004_pos', 'zfs_promote_005_pos', 'zfs_promote_006_neg', 'zfs_promote_007_neg', 'zfs_promote_008_pos'] # DISABLED: # zfs_written_property_001_pos - https://github.com/zfsonlinux/zfs/issues/2441 [tests/functional/cli_root/zfs_property] tests = [] # DISABLED: # zfs_receive_004_neg - Fails for OpenZFS on illumos [tests/functional/cli_root/zfs_receive] tests = ['zfs_receive_001_pos', 'zfs_receive_002_pos', 'zfs_receive_003_pos', 'zfs_receive_005_neg', 'zfs_receive_006_pos', 'zfs_receive_007_neg', 'zfs_receive_008_pos', 'zfs_receive_009_neg', 'zfs_receive_010_pos', 'zfs_receive_011_pos', 'zfs_receive_012_pos', 'zfs_receive_013_pos', 'zfs_receive_014_pos', 'zfs_receive_015_pos'] # DISABLED: # zfs_rename_006_pos - https://github.com/zfsonlinux/zfs/issues/5647 # zfs_rename_009_neg - https://github.com/zfsonlinux/zfs/issues/5648 [tests/functional/cli_root/zfs_rename] tests = ['zfs_rename_001_pos', 'zfs_rename_002_pos', 'zfs_rename_003_pos', 'zfs_rename_004_neg', 'zfs_rename_005_neg', 'zfs_rename_007_pos', 'zfs_rename_008_pos', 'zfs_rename_010_neg', 'zfs_rename_011_pos', 'zfs_rename_012_neg', 'zfs_rename_013_pos'] [tests/functional/cli_root/zfs_reservation] tests = ['zfs_reservation_001_pos', 'zfs_reservation_002_pos'] # DISABLED: # zfs_rollback_001_pos - busy mountpoint behavior # zfs_rollback_002_pos - busy mountpoint behavior [tests/functional/cli_root/zfs_rollback] tests = ['zfs_rollback_003_neg', 'zfs_rollback_004_neg'] [tests/functional/cli_root/zfs_send] tests = ['zfs_send_001_pos', 'zfs_send_002_pos', 'zfs_send_003_pos', 'zfs_send_004_neg', 'zfs_send_005_pos', 'zfs_send_006_pos', 'zfs_send_007_pos'] # DISABLED: # ro_props_001_pos - https://github.com/zfsonlinux/zfs/issues/5511 [tests/functional/cli_root/zfs_set] tests = ['cache_001_pos', 'cache_002_neg', 'canmount_001_pos', 'canmount_002_pos', 'canmount_003_pos', 'canmount_004_pos', 'checksum_001_pos', 'compression_001_pos', 'mountpoint_001_pos', 'mountpoint_002_pos', 'reservation_001_neg', 'user_property_002_pos', 'share_mount_001_neg', 'snapdir_001_pos', 'onoffs_001_pos', 'user_property_001_pos', 'user_property_003_neg', 'readonly_001_pos', 'user_property_004_pos', 'version_001_neg', 'zfs_set_001_neg', 'zfs_set_002_neg', 'zfs_set_003_neg', 'property_alias_001_pos', 'mountpoint_003_pos'] # DISABLED: # zfs_share_005_pos - needs investigation, probably unsupported NFS share format [tests/functional/cli_root/zfs_share] tests = ['zfs_share_001_pos', 'zfs_share_002_pos', 'zfs_share_003_pos', 'zfs_share_004_pos', 'zfs_share_006_pos', 'zfs_share_007_neg', 'zfs_share_008_neg', 'zfs_share_009_neg', 'zfs_share_010_neg', 'zfs_share_011_pos'] [tests/functional/cli_root/zfs_snapshot] tests = ['zfs_snapshot_001_neg', 'zfs_snapshot_002_neg', 'zfs_snapshot_003_neg', 'zfs_snapshot_004_neg', 'zfs_snapshot_005_neg', 'zfs_snapshot_006_pos', 'zfs_snapshot_007_neg', 'zfs_snapshot_008_neg', 'zfs_snapshot_009_pos'] # DISABLED: # zfs_unmount_005_pos - needs investigation # zfs_unmount_009_pos - needs investigation # zfs_unmount_all_001_pos - needs investigation [tests/functional/cli_root/zfs_unmount] tests = ['zfs_unmount_001_pos', 'zfs_unmount_002_pos', 'zfs_unmount_003_pos', 'zfs_unmount_004_pos', 'zfs_unmount_006_pos', 'zfs_unmount_007_neg', 'zfs_unmount_008_neg'] # DISABLED: # zfs_unshare_002_pos - zfs set sharenfs=off won't unshare if it was already off # zfs_unshare_006_pos - some distros come with Samba "user shares" disabled [tests/functional/cli_root/zfs_unshare] tests = ['zfs_unshare_001_pos', 'zfs_unshare_003_pos', 'zfs_unshare_004_neg', 'zfs_unshare_005_neg'] [tests/functional/cli_root/zfs_upgrade] tests = ['zfs_upgrade_001_pos', 'zfs_upgrade_002_pos', 'zfs_upgrade_003_pos', 'zfs_upgrade_004_pos', 'zfs_upgrade_005_pos', 'zfs_upgrade_006_neg', 'zfs_upgrade_007_neg'] [tests/functional/cli_root/zpool] tests = ['zpool_001_neg', 'zpool_002_pos', 'zpool_003_pos'] # DISABLED: # zpool_add_004_pos - https://github.com/zfsonlinux/zfs/issues/3484 # zpool_add_005_pos - no 'dumpadm' command. # zpool_add_006_pos - https://github.com/zfsonlinux/zfs/issues/3484 [tests/functional/cli_root/zpool_add] tests = ['zpool_add_001_pos', 'zpool_add_002_pos', 'zpool_add_003_pos', 'zpool_add_007_neg', 'zpool_add_008_neg', 'zpool_add_009_neg'] [tests/functional/cli_root/zpool_attach] tests = ['zpool_attach_001_neg'] # DISABLED: # zpool_clear_001_pos - https://github.com/zfsonlinux/zfs/issues/5634 [tests/functional/cli_root/zpool_clear] tests = ['zpool_clear_002_neg', 'zpool_clear_003_neg'] # DISABLED: # zpool_create_001_pos - needs investigation # zpool_create_002_pos - needs investigation # zpool_create_004_pos - needs investigation # zpool_create_006_pos - https://github.com/zfsonlinux/zfs/issues/3484 # zpool_create_008_pos - uses VTOC labels (?) and 'overlapping slices' # zpool_create_011_neg - tries to access /etc/vfstab etc # zpool_create_012_neg - swap devices # zpool_create_014_neg - swap devices # zpool_create_015_neg - swap devices # zpool_create_016_pos - no dumadm command. # zpool_create_020_pos - needs investigation [tests/functional/cli_root/zpool_create] tests = [ 'zpool_create_003_pos', 'zpool_create_005_pos', 'zpool_create_007_neg', 'zpool_create_009_neg', 'zpool_create_010_neg', 'zpool_create_017_neg', 'zpool_create_018_pos', 'zpool_create_019_pos', 'zpool_create_021_pos', 'zpool_create_022_pos', 'zpool_create_023_neg', 'zpool_create_024_pos', 'zpool_create_features_001_pos', 'zpool_create_features_002_pos', 'zpool_create_features_003_pos', 'zpool_create_features_004_neg', 'zpool_create_features_005_pos'] # DISABLED: # zpool_destroy_001_pos - needs investigation # zpool_destroy_002_pos - busy mountpoint behavior [tests/functional/cli_root/zpool_destroy] tests = [ 'zpool_destroy_003_neg'] pre = post = [tests/functional/cli_root/zpool_detach] tests = ['zpool_detach_001_neg'] # DISABLED: Requires full FMA support in ZED # zpool_expand_* - https://github.com/zfsonlinux/zfs/issues/2437 #[tests/functional/cli_root/zpool_expand] #tests = ['zpool_expand_001_pos', 'zpool_expand_002_pos', # 'zpool_expand_003_neg'] # DISABLED: # zpool_export_004_pos - https://github.com/zfsonlinux/zfs/issues/3484 [tests/functional/cli_root/zpool_export] tests = ['zpool_export_001_pos', 'zpool_export_002_pos', 'zpool_export_003_neg'] [tests/functional/cli_root/zpool_get] tests = ['zpool_get_001_pos', 'zpool_get_002_pos', 'zpool_get_003_pos', 'zpool_get_004_neg'] [tests/functional/cli_root/zpool_history] tests = ['zpool_history_001_neg', 'zpool_history_002_pos'] # DISABLED: # zpool_import_002_pos - https://github.com/zfsonlinux/zfs/issues/5202 # zpool_import_012_pos - sharenfs issue # zpool_import_all_001_pos - partition issue [tests/functional/cli_root/zpool_import] tests = ['zpool_import_001_pos', 'zpool_import_003_pos', 'zpool_import_004_pos', 'zpool_import_005_pos', 'zpool_import_006_pos', 'zpool_import_007_pos', 'zpool_import_008_pos', 'zpool_import_009_neg', 'zpool_import_010_pos', 'zpool_import_011_neg', 'zpool_import_013_neg', 'zpool_import_014_pos', 'zpool_import_features_001_pos', 'zpool_import_features_002_neg', 'zpool_import_features_003_pos','zpool_import_missing_001_pos', 'zpool_import_missing_002_pos', 'zpool_import_missing_003_pos', 'zpool_import_rename_001_pos'] [tests/functional/cli_root/zpool_offline] tests = ['zpool_offline_001_pos', 'zpool_offline_002_neg'] [tests/functional/cli_root/zpool_online] tests = ['zpool_online_001_pos', 'zpool_online_002_neg'] # DISABLED: # zpool_remove_003_pos - needs investigation [tests/functional/cli_root/zpool_remove] tests = ['zpool_remove_001_neg', 'zpool_remove_002_pos'] [tests/functional/cli_root/zpool_replace] tests = ['zpool_replace_001_neg'] [tests/functional/cli_root/zpool_scrub] tests = ['zpool_scrub_001_neg', 'zpool_scrub_002_pos', 'zpool_scrub_003_pos', 'zpool_scrub_004_pos', 'zpool_scrub_005_pos'] [tests/functional/cli_root/zpool_set] tests = ['zpool_set_001_pos', 'zpool_set_002_neg', 'zpool_set_003_neg'] pre = post = [tests/functional/cli_root/zpool_status] tests = ['zpool_status_001_pos', 'zpool_status_002_pos'] # DISABLED: # zpool_upgrade_002_pos - https://github.com/zfsonlinux/zfs/issues/4034 # zpool_upgrade_004_pos - https://github.com/zfsonlinux/zfs/issues/4034 # zpool_upgrade_007_pos - needs investigation [tests/functional/cli_root/zpool_upgrade] tests = ['zpool_upgrade_001_pos', 'zpool_upgrade_003_pos', 'zpool_upgrade_005_neg', 'zpool_upgrade_006_neg', 'zpool_upgrade_008_pos', 'zpool_upgrade_009_neg'] # DISABLED: # zfs_share_001_neg - requires additional dependencies # zfs_unshare_001_neg - requires additional dependencies [tests/functional/cli_user/misc] tests = ['zdb_001_neg', 'zfs_001_neg', 'zfs_allow_001_neg', 'zfs_clone_001_neg', 'zfs_create_001_neg', 'zfs_destroy_001_neg', 'zfs_get_001_neg', 'zfs_inherit_001_neg', 'zfs_mount_001_neg', 'zfs_promote_001_neg', 'zfs_receive_001_neg', 'zfs_rename_001_neg', 'zfs_rollback_001_neg', 'zfs_send_001_neg', 'zfs_set_001_neg', 'zfs_snapshot_001_neg', 'zfs_unallow_001_neg', 'zfs_unmount_001_neg', 'zfs_upgrade_001_neg', 'zpool_001_neg', 'zpool_add_001_neg', 'zpool_attach_001_neg', 'zpool_clear_001_neg', 'zpool_create_001_neg', 'zpool_destroy_001_neg', 'zpool_detach_001_neg', 'zpool_export_001_neg', 'zpool_get_001_neg', 'zpool_history_001_neg', 'zpool_import_001_neg', 'zpool_import_002_neg', 'zpool_offline_001_neg', 'zpool_online_001_neg', 'zpool_remove_001_neg', 'zpool_replace_001_neg', 'zpool_scrub_001_neg', 'zpool_set_001_neg', 'zpool_status_001_neg', 'zpool_upgrade_001_neg', 'arcstat_001_pos', 'arc_summary_001_pos', 'dbufstat_001_pos'] user = [tests/functional/cli_user/zfs_list] tests = ['zfs_list_001_pos', 'zfs_list_002_pos', 'zfs_list_003_pos', 'zfs_list_004_neg', 'zfs_list_007_pos', 'zfs_list_008_neg'] user = [tests/functional/cli_user/zpool_iostat] tests = ['zpool_iostat_001_neg', 'zpool_iostat_002_pos', 'zpool_iostat_003_neg', 'zpool_iostat_004_pos', 'zpool_iostat_005_pos'] user = [tests/functional/cli_user/zpool_list] tests = ['zpool_list_001_pos', 'zpool_list_002_neg'] user = [tests/functional/compression] tests = ['compress_001_pos', 'compress_002_pos', 'compress_003_pos', 'compress_004_pos'] [tests/functional/ctime] tests = ['ctime_001_pos' ] # DISABLED: # zfs_allow_010_pos - https://github.com/zfsonlinux/zfs/issues/5646 [tests/functional/delegate] tests = ['zfs_allow_001_pos', 'zfs_allow_002_pos', 'zfs_allow_004_pos', 'zfs_allow_005_pos', 'zfs_allow_006_pos', 'zfs_allow_007_pos', 'zfs_allow_008_pos', 'zfs_allow_009_neg', 'zfs_allow_011_neg', 'zfs_allow_012_neg', 'zfs_unallow_001_pos', 'zfs_unallow_002_pos', 'zfs_unallow_003_pos', 'zfs_unallow_004_pos', 'zfs_unallow_005_pos', 'zfs_unallow_006_pos', 'zfs_unallow_007_neg', 'zfs_unallow_008_neg'] # DISABLED: # devices_001_pos - needs investigation # devices_002_neg - needs investigation [tests/functional/devices] tests = ['devices_003_pos'] # DISABLED: # exec_002_neg - needs investigation [tests/functional/exec] tests = ['exec_001_pos'] [tests/functional/features/async_destroy] tests = ['async_destroy_001_pos'] [tests/functional/features/large_dnode] tests = ['large_dnode_001_pos', 'large_dnode_002_pos', 'large_dnode_003_pos', 'large_dnode_004_neg', 'large_dnode_005_pos', 'large_dnode_006_pos', 'large_dnode_007_neg'] # DISABLED: needs investigation #[tests/functional/grow_pool] #tests = ['grow_pool_001_pos'] #pre = #post = # DISABLED: needs investigation #[tests/functional/grow_replicas] #tests = ['grow_replicas_001_pos'] #pre = #post = # DISABLED: # history_004_pos - https://github.com/zfsonlinux/zfs/issues/5664 # history_006_neg - https://github.com/zfsonlinux/zfs/issues/5657 # history_008_pos - https://github.com/zfsonlinux/zfs/issues/5658 [tests/functional/history] tests = ['history_001_pos', 'history_002_pos', 'history_003_pos', 'history_005_neg', 'history_007_pos', 'history_009_pos', 'history_010_pos'] [tests/functional/inheritance] tests = ['inherit_001_pos'] pre = # DISABLED: # inuse_001_pos, inuse_007_pos - no dumpadm command # inuse_005_pos - partition issue # inuse_006_pos - partition issue # inuse_008_pos - partition issue # inuse_009_pos - partition issue [tests/functional/inuse] tests = ['inuse_004_pos'] post = # DISABLED: needs investigation # large_files_001_pos [tests/functional/large_files] tests = ['large_files_002_pos'] # DISABLED: needs investigation #[tests/functional/largest_pool] #tests = ['largest_pool_001_pos'] #pre = #post = # DISABLED: needs investigation #[tests/functional/link_count] #tests = ['link_count_001'] [tests/functional/migration] tests = ['migration_001_pos', 'migration_002_pos', 'migration_003_pos', 'migration_004_pos', 'migration_005_pos', 'migration_006_pos', 'migration_007_pos', 'migration_008_pos', 'migration_009_pos', 'migration_010_pos', 'migration_011_pos', 'migration_012_pos'] # DISABLED: # mmap_write_001_pos - needs investigation [tests/functional/mmap] tests = ['mmap_read_001_pos'] # DISABLED: # umountall_001 - requires umountall command. [tests/functional/mount] tests = ['umount_001'] [tests/functional/mv_files] tests = ['mv_files_001_pos', 'mv_files_002_pos'] [tests/functional/nestedfs] tests = ['nestedfs_001_pos'] [tests/functional/no_space] tests = ['enospc_001_pos'] # DISABLED: # nopwrite_volume - https://github.com/zfsonlinux/zfs/issues/5510 # nopwrite_varying_compression - needs investigation [tests/functional/nopwrite] tests = ['nopwrite_copies', 'nopwrite_mtime', 'nopwrite_negative', 'nopwrite_promoted_clone', 'nopwrite_recsize', 'nopwrite_sync'] # DISABLED: needs investigation #[tests/functional/online_offline] #tests = ['online_offline_001_pos', 'online_offline_002_neg', # 'online_offline_003_neg'] [tests/functional/pool_names] tests = ['pool_names_001_pos', 'pool_names_002_neg'] pre = post = [tests/functional/poolversion] tests = ['poolversion_001_pos', 'poolversion_002_pos'] # DISABLED: requires pfexec command or 'RBAC profile' #[tests/functional/privilege] #tests = ['privilege_001_pos', 'privilege_002_pos'] [tests/functional/quota] tests = ['quota_001_pos', 'quota_002_pos', 'quota_003_pos', 'quota_004_pos', 'quota_005_pos', 'quota_006_neg'] [tests/functional/raidz] tests = ['raidz_001_neg', 'raidz_002_pos'] [tests/functional/redundancy] -tests = ['redundancy_001_pos', 'redundancy_002_pos', 'redundancy_003_pos'] +tests = ['redundancy_001_pos', 'redundancy_002_pos', 'redundancy_003_pos', + 'redundancy_004_neg'] [tests/functional/refquota] tests = ['refquota_001_pos', 'refquota_002_pos', 'refquota_003_pos', 'refquota_004_pos', 'refquota_005_pos', 'refquota_006_neg'] # DISABLED: # refreserv_004_pos - needs investigation [tests/functional/refreserv] tests = ['refreserv_001_pos', 'refreserv_002_pos', 'refreserv_003_pos', 'refreserv_005_pos'] # DISABLED: #[tests/functional/rename_dirs] #tests = ['rename_dirs_001_pos'] [tests/functional/replacement] tests = ['replacement_001_pos', 'replacement_002_pos', 'replacement_003_pos'] # DISABLED: # reservation_001_pos - https://github.com/zfsonlinux/zfs/issues/4445 # reservation_013_pos - https://github.com/zfsonlinux/zfs/issues/4444 # reservation_018_pos - https://github.com/zfsonlinux/zfs/issues/5642 [tests/functional/reservation] tests = ['reservation_001_pos', 'reservation_002_pos', 'reservation_003_pos', 'reservation_004_pos', 'reservation_005_pos', 'reservation_006_pos', 'reservation_007_pos', 'reservation_008_pos', 'reservation_009_pos', 'reservation_010_pos', 'reservation_011_pos', 'reservation_012_pos', 'reservation_013_pos', 'reservation_014_pos', 'reservation_015_pos', 'reservation_016_pos', 'reservation_017_pos'] # DISABLED: Root pools must be handled differently under Linux #[tests/functional/rootpool] #tests = ['rootpool_002_neg', 'rootpool_003_neg', 'rootpool_007_neg'] # DISABLED: # rsend_008_pos - Fails for OpenZFS on illumos # rsend_009_pos - Fails for OpenZFS on illumos # rsend_020_pos - ASSERTs in dump_record() [tests/functional/rsend] tests = ['rsend_001_pos', 'rsend_002_pos', 'rsend_003_pos', 'rsend_004_pos', 'rsend_005_pos', 'rsend_006_pos', 'rsend_007_pos', 'rsend_010_pos', 'rsend_011_pos', 'rsend_012_pos', 'rsend_013_pos', 'rsend_014_pos', 'rsend_019_pos', 'rsend_021_pos', 'rsend_022_pos', 'rsend_024_pos'] [tests/functional/scrub_mirror] tests = ['scrub_mirror_001_pos', 'scrub_mirror_002_pos', 'scrub_mirror_003_pos', 'scrub_mirror_004_pos'] # DISABLED: Scripts need to be updated. # slog_012_neg - needs investigation # slog_013_pos - requires 'lofiadm' command. # slog_014_pos - needs investigation [tests/functional/slog] tests = ['slog_001_pos', 'slog_002_pos', 'slog_003_pos', 'slog_004_pos', 'slog_005_pos', 'slog_006_pos', 'slog_007_pos', 'slog_008_neg', 'slog_009_neg', 'slog_010_neg', 'slog_011_neg', 'slog_015_pos'] # DISABLED: # clone_001_pos - https://github.com/zfsonlinux/zfs/issues/3484 # rollback_003_pos - Hangs in unmount and spins. # snapshot_016_pos - Problem with automount [tests/functional/snapshot] tests = ['rollback_001_pos', 'rollback_002_pos', 'snapshot_001_pos', 'snapshot_002_pos', 'snapshot_003_pos', 'snapshot_004_pos', 'snapshot_005_pos', 'snapshot_006_pos', 'snapshot_007_pos', 'snapshot_008_pos', 'snapshot_009_pos', 'snapshot_010_pos', 'snapshot_011_pos', 'snapshot_012_pos', 'snapshot_013_pos', 'snapshot_014_pos', 'snapshot_015_pos', 'snapshot_017_pos'] # DISABLED: # snapused_004_pos - https://github.com/zfsonlinux/zfs/issues/5513 [tests/functional/snapused] tests = ['snapused_001_pos', 'snapused_002_pos', 'snapused_003_pos', 'snapused_005_pos'] [tests/functional/sparse] tests = ['sparse_001_pos'] # DISABLED: needs investigation #[tests/functional/threadsappend] #tests = ['threadsappend_001_pos'] [tests/functional/tmpfile] tests = ['tmpfile_001_pos', 'tmpfile_002_pos', 'tmpfile_003_pos'] [tests/functional/truncate] tests = ['truncate_001_pos', 'truncate_002_pos'] [tests/functional/upgrade] tests = [ 'upgrade_userobj_001_pos' ] [tests/functional/userquota] tests = [ 'userquota_001_pos', 'userquota_002_pos', 'userquota_003_pos', 'userquota_004_pos', 'userquota_005_neg', 'userquota_006_pos', 'userquota_007_pos', 'userquota_008_pos', 'userquota_009_pos', 'userquota_010_pos', 'userquota_011_pos', 'userquota_012_neg', 'userquota_013_pos', 'userspace_001_pos', 'userspace_002_pos', 'userspace_003_pos', 'groupspace_001_pos', 'groupspace_002_pos', 'groupspace_003_pos' ] # DISABLED: # vdev_zaps_007_pos -- fails due to a pre-existing issue with zpool split [tests/functional/vdev_zaps] tests = ['vdev_zaps_001_pos', 'vdev_zaps_002_pos', 'vdev_zaps_003_pos', 'vdev_zaps_004_pos', 'vdev_zaps_005_pos', 'vdev_zaps_006_pos'] # DISABLED: # write_dirs_002_pos - needs investigation [tests/functional/write_dirs] tests = ['write_dirs_001_pos'] # DISABLED: No 'runat' command, replace the Linux equivalent and add xattrtest #[tests/functional/xattr] #tests = ['xattr_001_pos', 'xattr_002_neg', 'xattr_003_neg', 'xattr_004_pos', # 'xattr_005_pos', 'xattr_006_pos', 'xattr_007_neg', 'xattr_008_pos', # 'xattr_009_neg', 'xattr_010_neg', 'xattr_011_pos', 'xattr_012_pos', # 'xattr_013_pos'] [tests/functional/zvol/zvol_ENOSPC] tests = ['zvol_ENOSPC_001_pos'] [tests/functional/zvol/zvol_cli] tests = ['zvol_cli_001_pos', 'zvol_cli_002_pos', 'zvol_cli_003_neg'] # DISABLED: requires dumpadm #[tests/functional/zvol/zvol_misc] #tests = ['zvol_misc_001_neg', 'zvol_misc_002_pos', 'zvol_misc_003_neg', # 'zvol_misc_004_pos', 'zvol_misc_005_neg', 'zvol_misc_006_pos'] # DISABLED: requires updated for Linux #[tests/functional/zvol/zvol_swap] #tests = ['zvol_swap_001_pos', 'zvol_swap_002_pos', 'zvol_swap_003_pos', # 'zvol_swap_004_pos', 'zvol_swap_005_pos', 'zvol_swap_006_pos'] diff --git a/tests/zfs-tests/include/default.cfg.in b/tests/zfs-tests/include/default.cfg.in index d6913f1f3a3e..ef34d2b90b62 100644 --- a/tests/zfs-tests/include/default.cfg.in +++ b/tests/zfs-tests/include/default.cfg.in @@ -1,211 +1,217 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # # Copyright (c) 2016 by Delphix. All rights reserved. # . $STF_SUITE/include/commands.cfg # Common paths bindir=@bindir@ sbindir=@sbindir@ # ZFS Commands export ZDB=${ZDB:-${sbindir}/zdb} export ZFS=${ZFS:-${sbindir}/zfs} export ZHACK=${ZHACK:-${sbindir}/zhack} export ZINJECT=${ZINJECT:-${sbindir}/zinject} export ZPOOL=${ZPOOL:-${sbindir}/zpool} export ZTEST=${ZTEST:-${sbindir}/ztest} export ZPIOS=${ZPIOS:-${sbindir}/zpios} export RAIDZ_TEST=${RAIDZ_TEST:-${bindir}/raidz_test} export ARC_SUMMARY=${ARC_SUMMARY:-${bindir}/arc_summary.py} export ARCSTAT=${ARCSTAT:-${bindir}/arcstat.py} export DBUFSTAT=${DBUFSTAT:-${bindir}/dbufstat.py} . $STF_SUITE/include/libtest.shlib # Optionally override the installed ZFS commands to run in-tree if [[ -f "$SRCDIR/zfs-script-config.sh" ]]; then . $SRCDIR/zfs-script-config.sh fi # Define run length constants export RT_LONG="3" export RT_MEDIUM="2" export RT_SHORT="1" # Define macro for zone test export ZONE_POOL="zonepool" export ZONE_CTR="zonectr" # Test Suite Specific Commands helperdir=@datarootdir@/@PACKAGE@/zfs-tests/bin export CHG_USR_EXEC=${CHG_USR_EXEC:-${helperdir}/chg_usr_exec} export DEVNAME2DEVID=${DEVNAME2DEVID:-${helperdir}/devname2devid} export DIR_RD_UPDATE=${DIR_RD_UPDATE:-${helperdir}/dir_rd_update} export FILE_CHECK=${FILE_CHECK:-${helperdir}/file_check} export FILE_TRUNC=${FILE_TRUNC:-${helperdir}/file_trunc} export FILE_WRITE=${FILE_WRITE:-${helperdir}/file_write} export LARGEST_FILE=${LARGEST_FILE:-${helperdir}/largest_file} export MKBUSY=${MKBUSY:-${helperdir}/mkbusy} export MKFILE=${MKFILE:-${helperdir}/mkfile} export MKFILES=${MKFILES:-${helperdir}/mkfiles} export MKTREE=${MKTREE:-${helperdir}/mktree} export MMAP_EXEC=${MMAP_EXEC:-${helperdir}/mmap_exec} export MMAPWRITE=${MMAPWRITE:-${helperdir}/mmapwrite} export RANDFREE_FILE=${RANDFREE_FILE:-${helperdir}/randfree_file} export READMMAP=${READMMAP:-${helperdir}/readmmap} export RENAME_DIR=${RENAME_DIR:-${helperdir}/rename_dir} export RM_LNKCNT_ZERO_FILE=${RM_LNKCNT_ZERO_FILE:-${helperdir}/rm_lnkcnt_zero_file} export THREADSAPPEND=${THREADSAPPEND:-${helperdir}/threadsappend} export XATTRTEST=${XATTRTEST:-${helperdir}/xattrtest} # ensure we're running in the C locale, since # localised messages may result in test failures export LC_ALL="C" export LANG="C" # # pattern to ignore from 'zpool list'. # export NO_POOLS="no pools available" # pattern to ignore from 'zfs list'. export NO_DATASETS="no datasets available" export TEST_BASE_DIR="/var/tmp" # Default to compression ON export COMPRESSION_PROP=on # Default to using the checksum export CHECKSUM_PROP=on # some common variables used by test scripts : export FIO_SCRIPTS=$STF_SUITE/tests/perf/fio export PERF_SCRIPTS=$STF_SUITE/tests/perf/scripts # some test pool names export TESTPOOL=testpool.$$ export TESTPOOL1=testpool1.$$ export TESTPOOL2=testpool2.$$ export TESTPOOL3=testpool3.$$ export PERFPOOL=perfpool # some test file system names export TESTFS=testfs.$$ export TESTFS1=testfs1.$$ export TESTFS2=testfs2.$$ export TESTFS3=testfs3.$$ # some test directory names export TESTDIR=${TEST_BASE_DIR%%/}/testdir$$ export TESTDIR0=${TEST_BASE_DIR%%/}/testdir0$$ export TESTDIR1=${TEST_BASE_DIR%%/}/testdir1$$ export TESTDIR2=${TEST_BASE_DIR%%/}/testdir2$$ export ZFSROOT= export TESTSNAP=testsnap$$ export TESTSNAP1=testsnap1$$ export TESTSNAP2=testsnap2$$ export TESTCLONE=testclone$$ export TESTCLONE1=testclone1$$ export TESTCLONE2=testclone2$$ export TESTCLCT=testclct$$ export TESTCTR=testctr$$ export TESTCTR1=testctr1$$ export TESTCTR2=testctr2$$ export TESTVOL=testvol$$ export TESTVOL1=testvol1$$ export TESTVOL2=testvol2$$ export TESTFILE0=testfile0.$$ export TESTFILE1=testfile1.$$ export TESTFILE2=testfile2.$$ export TESTBKMARK=testbkmark$$ export LONGPNAME="poolname50charslong_012345678901234567890123456789" export LONGFSNAME="fsysname50charslong_012345678901234567890123456789" export SNAPFS="$TESTPOOL/$TESTFS@$TESTSNAP" export SNAPFS1="$TESTPOOL/$TESTVOL@$TESTSNAP" export VOLSIZE=150m export BIGVOLSIZE=1eb # Default to limit disks to be checked export MAX_FINDDISKSNUM=6 +# Default minimum size for file based vdevs in the test suite +export MINVDEVSIZE=$((256 * 1024 * 1024)) + +# Minimum vdev size possible as defined in the OS +export SPA_MINDEVSIZE=$((64 * 1024 * 1024)) + # For iscsi target support export ISCSITGTFILE=/tmp/iscsitgt_file export ISCSITGT_FMRI=svc:/system/iscsitgt:default # # finally, if we're running in a local zone # we take some additional actions if ! is_global_zone; then reexport_pool fi export ZFS_VERSION=5 export ZFS_ALL_VERSIONS="1 2 3 4 5" for i in $ZFS_ALL_VERSIONS; do eval 'export ZFS_VERSION_$i="v${i}-fs"' done export MAX_PARTITIONS=8 if is_linux; then unpack_opts="--sparse -xf" pack_opts="--sparse -cf" verbose=" -v" unpack_preserve=" -xpf" pack_preserve=" -cpf" ZVOL_DEVDIR="/dev/zvol" ZVOL_RDEVDIR="/dev/zvol" DEV_RDSKDIR="/dev" DEV_MPATHDIR="/dev/mapper" NEWFS_DEFAULT_FS="ext2" else unpack_opts="xv" pack_opts="cf" verbose="v" unpack_preserve="xpf" pack_preserve="cpf" ZVOL_DEVDIR="/dev/zvol/dsk" ZVOL_RDEVDIR="/dev/zvol/rdsk" DEV_DSKDIR="/dev/dsk" DEV_RDSKDIR="/dev/rdsk" NEWFS_DEFAULT_FS="ufs" fi export unpack_opts pack_opts verbose unpack_preserve pack_preserve \ ZVOL_DEVDIR ZVOL_RDEVDIR NEWFS_DEFAULT_FS DEV_RDSKDIR DEV_MPATHDIR diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index 31b1e1dfc053..22b79b15a7c3 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -1,3094 +1,3094 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # # Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . ${STF_TOOLS}/include/logapi.shlib # Determine if this is a Linux test system # # Return 0 if platform Linux, 1 if otherwise function is_linux { if [[ $($UNAME -o) == "GNU/Linux" ]]; then return 0 else return 1 fi } # Determine if this is a 32-bit system # # Return 0 if platform is 32-bit, 1 if otherwise function is_32bit { if [[ $(getconf LONG_BIT) == "32" ]]; then return 0 else return 1 fi } # Determine if kmemleak is enabled # # Return 0 if kmemleak is enabled, 1 if otherwise function is_kmemleak { if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then return 0 else return 1 fi } # Determine whether a dataset is mounted # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs # # Return 0 if dataset is mounted; 1 if unmounted; 2 on error function ismounted { typeset fstype=$2 [[ -z $fstype ]] && fstype=zfs typeset out dir name ret case $fstype in zfs) if [[ "$1" == "/"* ]] ; then for out in $($ZFS mount | $AWK '{print $2}'); do [[ $1 == $out ]] && return 0 done else for out in $($ZFS mount | $AWK '{print $1}'); do [[ $1 == $out ]] && return 0 done fi ;; ufs|nfs) out=$($DF -F $fstype $1 2>/dev/null) ret=$? (($ret != 0)) && return $ret dir=${out%%\(*} dir=${dir%% *} name=${out##*\(} name=${name%%\)*} name=${name%% *} [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0 ;; ext2) out=$($DF -t $fstype $1 2>/dev/null) return $? ;; zvol) if [[ -L "$ZVOL_DEVDIR/$1" ]]; then link=$(readlink -f $ZVOL_DEVDIR/$1) [[ -n "$link" ]] && \ $MOUNT | $GREP -q "^$link" && \ return 0 fi ;; esac return 1 } # Return 0 if a dataset is mounted; 1 otherwise # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs function mounted { ismounted $1 $2 (($? == 0)) && return 0 return 1 } # Return 0 if a dataset is unmounted; 1 otherwise # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs function unmounted { ismounted $1 $2 (($? == 1)) && return 0 return 1 } # split line on "," # # $1 - line to split function splitline { $ECHO $1 | $SED "s/,/ /g" } function default_setup { default_setup_noexit "$@" log_pass } # # Given a list of disks, setup storage pools and datasets. # function default_setup_noexit { typeset disklist=$1 typeset container=$2 typeset volume=$3 log_note begin default_setup_noexit if is_global_zone; then if poolexists $TESTPOOL ; then destroy_pool $TESTPOOL fi [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL log_note creating pool $TESTPOOL $disklist log_must $ZPOOL create -f $TESTPOOL $disklist else reexport_pool fi $RM -rf $TESTDIR || log_unresolved Could not remove $TESTDIR $MKDIR -p $TESTDIR || log_unresolved Could not create $TESTDIR log_must $ZFS create $TESTPOOL/$TESTFS log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS if [[ -n $container ]]; then $RM -rf $TESTDIR1 || \ log_unresolved Could not remove $TESTDIR1 $MKDIR -p $TESTDIR1 || \ log_unresolved Could not create $TESTDIR1 log_must $ZFS create $TESTPOOL/$TESTCTR log_must $ZFS set canmount=off $TESTPOOL/$TESTCTR log_must $ZFS create $TESTPOOL/$TESTCTR/$TESTFS1 log_must $ZFS set mountpoint=$TESTDIR1 \ $TESTPOOL/$TESTCTR/$TESTFS1 fi if [[ -n $volume ]]; then if is_global_zone ; then log_must $ZFS create -V $VOLSIZE $TESTPOOL/$TESTVOL block_device_wait else log_must $ZFS create $TESTPOOL/$TESTVOL fi fi } # # Given a list of disks, setup a storage pool, file system and # a container. # function default_container_setup { typeset disklist=$1 default_setup "$disklist" "true" } # # Given a list of disks, setup a storage pool,file system # and a volume. # function default_volume_setup { typeset disklist=$1 default_setup "$disklist" "" "true" } # # Given a list of disks, setup a storage pool,file system, # a container and a volume. # function default_container_volume_setup { typeset disklist=$1 default_setup "$disklist" "true" "true" } # # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on # filesystem # # $1 Existing filesystem or volume name. Default, $TESTFS # $2 snapshot name. Default, $TESTSNAP # function create_snapshot { typeset fs_vol=${1:-$TESTFS} typeset snap=${2:-$TESTSNAP} [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." [[ -z $snap ]] && log_fail "Snapshot's name is undefined." if snapexists $fs_vol@$snap; then log_fail "$fs_vol@$snap already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." log_must $ZFS snapshot $fs_vol@$snap } # # Create a clone from a snapshot, default clone name is $TESTCLONE. # # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default. # $2 Clone name, $TESTPOOL/$TESTCLONE is default. # function create_clone # snapshot clone { typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} typeset clone=${2:-$TESTPOOL/$TESTCLONE} [[ -z $snap ]] && \ log_fail "Snapshot name is undefined." [[ -z $clone ]] && \ log_fail "Clone name is undefined." log_must $ZFS clone $snap $clone } # # Create a bookmark of the given snapshot. Defaultly create a bookmark on # filesystem. # # $1 Existing filesystem or volume name. Default, $TESTFS # $2 Existing snapshot name. Default, $TESTSNAP # $3 bookmark name. Default, $TESTBKMARK # function create_bookmark { typeset fs_vol=${1:-$TESTFS} typeset snap=${2:-$TESTSNAP} typeset bkmark=${3:-$TESTBKMARK} [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." [[ -z $snap ]] && log_fail "Snapshot's name is undefined." [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined." if bkmarkexists $fs_vol#$bkmark; then log_fail "$fs_vol#$bkmark already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." snapexists $fs_vol@$snap || \ log_fail "$fs_vol@$snap must exist." log_must $ZFS bookmark $fs_vol@$snap $fs_vol#$bkmark } function default_mirror_setup { default_mirror_setup_noexit $1 $2 $3 log_pass } # # Given a pair of disks, set up a storage pool and dataset for the mirror # @parameters: $1 the primary side of the mirror # $2 the secondary side of the mirror # @uses: ZPOOL ZFS TESTPOOL TESTFS function default_mirror_setup_noexit { readonly func="default_mirror_setup_noexit" typeset primary=$1 typeset secondary=$2 [[ -z $primary ]] && \ log_fail "$func: No parameters passed" [[ -z $secondary ]] && \ log_fail "$func: No secondary partition passed" [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL log_must $ZPOOL create -f $TESTPOOL mirror $@ log_must $ZFS create $TESTPOOL/$TESTFS log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS } # # create a number of mirrors. # We create a number($1) of 2 way mirrors using the pairs of disks named # on the command line. These mirrors are *not* mounted # @parameters: $1 the number of mirrors to create # $... the devices to use to create the mirrors on # @uses: ZPOOL ZFS TESTPOOL function setup_mirrors { typeset -i nmirrors=$1 shift while ((nmirrors > 0)); do log_must test -n "$1" -a -n "$2" [[ -d /$TESTPOOL$nmirrors ]] && $RM -rf /$TESTPOOL$nmirrors log_must $ZPOOL create -f $TESTPOOL$nmirrors mirror $1 $2 shift 2 ((nmirrors = nmirrors - 1)) done } # # create a number of raidz pools. # We create a number($1) of 2 raidz pools using the pairs of disks named # on the command line. These pools are *not* mounted # @parameters: $1 the number of pools to create # $... the devices to use to create the pools on # @uses: ZPOOL ZFS TESTPOOL function setup_raidzs { typeset -i nraidzs=$1 shift while ((nraidzs > 0)); do log_must test -n "$1" -a -n "$2" [[ -d /$TESTPOOL$nraidzs ]] && $RM -rf /$TESTPOOL$nraidzs log_must $ZPOOL create -f $TESTPOOL$nraidzs raidz $1 $2 shift 2 ((nraidzs = nraidzs - 1)) done } # # Destroy the configured testpool mirrors. # the mirrors are of the form ${TESTPOOL}{number} # @uses: ZPOOL ZFS TESTPOOL function destroy_mirrors { default_cleanup_noexit log_pass } # # Given a minimum of two disks, set up a storage pool and dataset for the raid-z # $1 the list of disks # function default_raidz_setup { typeset disklist="$*" disks=(${disklist[*]}) if [[ ${#disks[*]} -lt 2 ]]; then log_fail "A raid-z requires a minimum of two disks." fi [[ -d /$TESTPOOL ]] && $RM -rf /$TESTPOOL log_must $ZPOOL create -f $TESTPOOL raidz $1 $2 $3 log_must $ZFS create $TESTPOOL/$TESTFS log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS log_pass } # # Common function used to cleanup storage pools and datasets. # # Invoked at the start of the test suite to ensure the system # is in a known state, and also at the end of each set of # sub-tests to ensure errors from one set of tests doesn't # impact the execution of the next set. function default_cleanup { default_cleanup_noexit log_pass } function default_cleanup_noexit { typeset exclude="" typeset pool="" # # Destroying the pool will also destroy any # filesystems it contains. # if is_global_zone; then $ZFS unmount -a > /dev/null 2>&1 [[ -z "$KEEP" ]] && KEEP="rpool" exclude=`eval $ECHO \"'(${KEEP})'\"` ALL_POOLS=$($ZPOOL list -H -o name \ | $GREP -v "$NO_POOLS" | $EGREP -vw "$exclude") # Here, we loop through the pools we're allowed to # destroy, only destroying them if it's safe to do # so. while [ ! -z ${ALL_POOLS} ] do for pool in ${ALL_POOLS} do if safe_to_destroy_pool $pool ; then destroy_pool $pool fi ALL_POOLS=$($ZPOOL list -H -o name \ | $GREP -v "$NO_POOLS" \ | $EGREP -v "$exclude") done done $ZFS mount -a else typeset fs="" for fs in $($ZFS list -H -o name \ | $GREP "^$ZONE_POOL/$ZONE_CTR[01234]/"); do datasetexists $fs && \ log_must $ZFS destroy -Rf $fs done # Need cleanup here to avoid garbage dir left. for fs in $($ZFS list -H -o name); do [[ $fs == /$ZONE_POOL ]] && continue [[ -d $fs ]] && log_must $RM -rf $fs/* done # # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to # the default value # for fs in $($ZFS list -H -o name); do if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then log_must $ZFS set reservation=none $fs log_must $ZFS set recordsize=128K $fs log_must $ZFS set mountpoint=/$fs $fs typeset enc="" enc=$(get_prop encryption $fs) if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \ [[ "$enc" == "off" ]]; then log_must $ZFS set checksum=on $fs fi log_must $ZFS set compression=off $fs log_must $ZFS set atime=on $fs log_must $ZFS set devices=off $fs log_must $ZFS set exec=on $fs log_must $ZFS set setuid=on $fs log_must $ZFS set readonly=off $fs log_must $ZFS set snapdir=hidden $fs log_must $ZFS set aclmode=groupmask $fs log_must $ZFS set aclinherit=secure $fs fi done fi [[ -d $TESTDIR ]] && \ log_must $RM -rf $TESTDIR disk1=${DISKS%% *} if is_mpath_device $disk1; then delete_partitions fi } # # Common function used to cleanup storage pools, file systems # and containers. # function default_container_cleanup { if ! is_global_zone; then reexport_pool fi ismounted $TESTPOOL/$TESTCTR/$TESTFS1 [[ $? -eq 0 ]] && \ log_must $ZFS unmount $TESTPOOL/$TESTCTR/$TESTFS1 datasetexists $TESTPOOL/$TESTCTR/$TESTFS1 && \ log_must $ZFS destroy -R $TESTPOOL/$TESTCTR/$TESTFS1 datasetexists $TESTPOOL/$TESTCTR && \ log_must $ZFS destroy -Rf $TESTPOOL/$TESTCTR [[ -e $TESTDIR1 ]] && \ log_must $RM -rf $TESTDIR1 > /dev/null 2>&1 default_cleanup } # # Common function used to cleanup snapshot of file system or volume. Default to # delete the file system's snapshot # # $1 snapshot name # function destroy_snapshot { typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} if ! snapexists $snap; then log_fail "'$snap' does not existed." fi # # For the sake of the value which come from 'get_prop' is not equal # to the really mountpoint when the snapshot is unmounted. So, firstly # check and make sure this snapshot's been mounted in current system. # typeset mtpt="" if ismounted $snap; then mtpt=$(get_prop mountpoint $snap) (($? != 0)) && \ log_fail "get_prop mountpoint $snap failed." fi log_must $ZFS destroy $snap [[ $mtpt != "" && -d $mtpt ]] && \ log_must $RM -rf $mtpt } # # Common function used to cleanup clone. # # $1 clone name # function destroy_clone { typeset clone=${1:-$TESTPOOL/$TESTCLONE} if ! datasetexists $clone; then log_fail "'$clone' does not existed." fi # With the same reason in destroy_snapshot typeset mtpt="" if ismounted $clone; then mtpt=$(get_prop mountpoint $clone) (($? != 0)) && \ log_fail "get_prop mountpoint $clone failed." fi log_must $ZFS destroy $clone [[ $mtpt != "" && -d $mtpt ]] && \ log_must $RM -rf $mtpt } # # Common function used to cleanup bookmark of file system or volume. Default # to delete the file system's bookmark. # # $1 bookmark name # function destroy_bookmark { typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK} if ! bkmarkexists $bkmark; then log_fail "'$bkmarkp' does not existed." fi log_must $ZFS destroy $bkmark } # Return 0 if a snapshot exists; $? otherwise # # $1 - snapshot name function snapexists { $ZFS list -H -t snapshot "$1" > /dev/null 2>&1 return $? } # # Return 0 if a bookmark exists; $? otherwise # # $1 - bookmark name # function bkmarkexists { $ZFS list -H -t bookmark "$1" > /dev/null 2>&1 return $? } # # Set a property to a certain value on a dataset. # Sets a property of the dataset to the value as passed in. # @param: # $1 dataset who's property is being set # $2 property to set # $3 value to set property to # @return: # 0 if the property could be set. # non-zero otherwise. # @use: ZFS # function dataset_setprop { typeset fn=dataset_setprop if (($# < 3)); then log_note "$fn: Insufficient parameters (need 3, had $#)" return 1 fi typeset output= output=$($ZFS set $2=$3 $1 2>&1) typeset rv=$? if ((rv != 0)); then log_note "Setting property on $1 failed." log_note "property $2=$3" log_note "Return Code: $rv" log_note "Output: $output" return $rv fi return 0 } # # Assign suite defined dataset properties. # This function is used to apply the suite's defined default set of # properties to a dataset. # @parameters: $1 dataset to use # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP # @returns: # 0 if the dataset has been altered. # 1 if no pool name was passed in. # 2 if the dataset could not be found. # 3 if the dataset could not have it's properties set. # function dataset_set_defaultproperties { typeset dataset="$1" [[ -z $dataset ]] && return 1 typeset confset= typeset -i found=0 for confset in $($ZFS list); do if [[ $dataset = $confset ]]; then found=1 break fi done [[ $found -eq 0 ]] && return 2 if [[ -n $COMPRESSION_PROP ]]; then dataset_setprop $dataset compression $COMPRESSION_PROP || \ return 3 log_note "Compression set to '$COMPRESSION_PROP' on $dataset" fi if [[ -n $CHECKSUM_PROP ]]; then dataset_setprop $dataset checksum $CHECKSUM_PROP || \ return 3 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset" fi return 0 } # # Check a numeric assertion # @parameter: $@ the assertion to check # @output: big loud notice if assertion failed # @use: log_fail # function assert { (($@)) || log_fail "$@" } # # Function to format partition size of a disk # Given a disk cxtxdx reduces all partitions # to 0 size # function zero_partitions # { typeset diskname=$1 typeset i if is_linux; then log_must $FORMAT $DEV_DSKDIR/$diskname -s -- mklabel gpt else for i in 0 1 3 4 5 6 7 do set_partition $i "" 0mb $diskname done fi } # # Given a slice, size and disk, this function # formats the slice to the specified size. # Size should be specified with units as per # the `format` command requirements eg. 100mb 3gb # # NOTE: This entire interface is problematic for the Linux parted utilty # which requires the end of the partition to be specified. It would be # best to retire this interface and replace it with something more flexible. # At the moment a best effort is made. # function set_partition # { typeset -i slicenum=$1 typeset start=$2 typeset size=$3 typeset disk=$4 [[ -z $slicenum || -z $size || -z $disk ]] && \ log_fail "The slice, size or disk name is unspecified." if is_linux; then typeset size_mb=${size%%[mMgG]} size_mb=${size_mb%%[mMgG][bB]} if [[ ${size:1:1} == 'g' ]]; then ((size_mb = size_mb * 1024)) fi # Create GPT partition table when setting slice 0 or # when the device doesn't already contain a GPT label. $FORMAT $DEV_DSKDIR/$disk -s -- print 1 >/dev/null typeset ret_val=$? if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then log_must $FORMAT $DEV_DSKDIR/$disk -s -- mklabel gpt fi # When no start is given align on the first cylinder. if [[ -z "$start" ]]; then start=1 fi # Determine the cylinder size for the device and using # that calculate the end offset in cylinders. typeset -i cly_size_kb=0 cly_size_kb=$($FORMAT -m $DEV_DSKDIR/$disk -s -- \ unit cyl print | $HEAD -3 | $TAIL -1 | \ $AWK -F '[:k.]' '{print $4}') ((end = (size_mb * 1024 / cly_size_kb) + start)) log_must $FORMAT $DEV_DSKDIR/$disk -s -- \ mkpart part$slicenum ${start}cyl ${end}cyl $BLOCKDEV --rereadpt $DEV_DSKDIR/$disk 2>/dev/null block_device_wait else typeset format_file=/var/tmp/format_in.$$ $ECHO "partition" >$format_file $ECHO "$slicenum" >> $format_file $ECHO "" >> $format_file $ECHO "" >> $format_file $ECHO "$start" >> $format_file $ECHO "$size" >> $format_file $ECHO "label" >> $format_file $ECHO "" >> $format_file $ECHO "q" >> $format_file $ECHO "q" >> $format_file $FORMAT -e -s -d $disk -f $format_file fi typeset ret_val=$? $RM -f $format_file [[ $ret_val -ne 0 ]] && \ log_fail "Unable to format $disk slice $slicenum to $size" return 0 } # # Delete all partitions on all disks - this is specifically for the use of multipath # devices which currently can only be used in the test suite as raw/un-partitioned # devices (ie a zpool cannot be created on a whole mpath device that has partitions) # function delete_partitions { typeset -i j=1 if [[ -z $DISK_ARRAY_NUM ]]; then DISK_ARRAY_NUM=$($ECHO ${DISKS} | $NAWK '{print NF}') fi if [[ -z $DISKSARRAY ]]; then DISKSARRAY=$DISKS fi if is_linux; then if (( $DISK_ARRAY_NUM == 1 )); then while ((j < MAX_PARTITIONS)); do $FORMAT $DEV_DSKDIR/$DISK -s rm $j > /dev/null 2>&1 if (( $? == 1 )); then $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null if (( $? == 1 )); then log_note "Partitions for $DISK should be deleted" else log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted" fi return 0 else $LSBLK | $EGREP ${DISK}${SLICE_PREFIX}${j} > /dev/null if (( $? == 0 )); then log_fail "Partition for ${DISK}${SLICE_PREFIX}${j} not deleted" fi fi ((j = j+1)) done else for disk in `$ECHO $DISKSARRAY`; do while ((j < MAX_PARTITIONS)); do $FORMAT $DEV_DSKDIR/$disk -s rm $j > /dev/null 2>&1 if (( $? == 1 )); then $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null if (( $? == 1 )); then log_note "Partitions for $disk should be deleted" else log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted" fi j=7 else $LSBLK | $EGREP ${disk}${SLICE_PREFIX}${j} > /dev/null if (( $? == 0 )); then log_fail "Partition for ${disk}${SLICE_PREFIX}${j} not deleted" fi fi ((j = j+1)) done j=1 done fi fi return 0 } # # Get the end cyl of the given slice # function get_endslice # { typeset disk=$1 typeset slice=$2 if [[ -z $disk || -z $slice ]] ; then log_fail "The disk name or slice number is unspecified." fi if is_linux; then endcyl=$($FORMAT -s $DEV_DSKDIR/$disk -- unit cyl print | \ $GREP "part${slice}" | \ $AWK '{print $3}' | \ $SED 's,cyl,,') ((endcyl = (endcyl + 1))) else disk=${disk#/dev/dsk/} disk=${disk#/dev/rdsk/} disk=${disk%s*} typeset -i ratio=0 ratio=$($PRTVTOC /dev/rdsk/${disk}s2 | \ $GREP "sectors\/cylinder" | \ $AWK '{print $2}') if ((ratio == 0)); then return fi typeset -i endcyl=$($PRTVTOC -h /dev/rdsk/${disk}s2 | $NAWK -v token="$slice" '{if ($1==token) print $6}') ((endcyl = (endcyl + 1) / ratio)) fi echo $endcyl } # # Given a size,disk and total slice number, this function formats the # disk slices from 0 to the total slice number with the same specified # size. # function partition_disk # { typeset -i i=0 typeset slice_size=$1 typeset disk_name=$2 typeset total_slices=$3 typeset cyl zero_partitions $disk_name while ((i < $total_slices)); do if ! is_linux; then if ((i == 2)); then ((i = i + 1)) continue fi fi set_partition $i "$cyl" $slice_size $disk_name cyl=$(get_endslice $disk_name $i) ((i = i+1)) done } # # This function continues to write to a filenum number of files into dirnum # number of directories until either $FILE_WRITE returns an error or the # maximum number of files per directory have been written. # # Usage: # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data] # # Return value: 0 on success # non 0 on error # # Where : # destdir: is the directory where everything is to be created under # dirnum: the maximum number of subdirectories to use, -1 no limit # filenum: the maximum number of files per subdirectory # bytes: number of bytes to write # num_writes: numer of types to write out bytes # data: the data that will be written # # E.g. # file_fs /testdir 20 25 1024 256 0 # # Note: bytes * num_writes equals the size of the testfile # function fill_fs # destdir dirnum filenum bytes num_writes data { typeset destdir=${1:-$TESTDIR} typeset -i dirnum=${2:-50} typeset -i filenum=${3:-50} typeset -i bytes=${4:-8192} typeset -i num_writes=${5:-10240} typeset -i data=${6:-0} typeset -i odirnum=1 typeset -i idirnum=0 typeset -i fn=0 typeset -i retval=0 log_must $MKDIR -p $destdir/$idirnum while (($odirnum > 0)); do if ((dirnum >= 0 && idirnum >= dirnum)); then odirnum=0 break fi $FILE_WRITE -o create -f $destdir/$idirnum/$TESTFILE.$fn \ -b $bytes -c $num_writes -d $data retval=$? if (($retval != 0)); then odirnum=0 break fi if (($fn >= $filenum)); then fn=0 ((idirnum = idirnum + 1)) log_must $MKDIR -p $destdir/$idirnum else ((fn = fn + 1)) fi done return $retval } # # Simple function to get the specified property. If unable to # get the property then exits. # # Note property is in 'parsable' format (-p) # function get_prop # property dataset { typeset prop_val typeset prop=$1 typeset dataset=$2 prop_val=$($ZFS get -pH -o value $prop $dataset 2>/dev/null) if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for dataset " \ "$dataset" return 1 fi $ECHO "$prop_val" return 0 } # # Simple function to get the specified property of pool. If unable to # get the property then exits. # function get_pool_prop # property pool { typeset prop_val typeset prop=$1 typeset pool=$2 if poolexists $pool ; then prop_val=$($ZPOOL get $prop $pool 2>/dev/null | $TAIL -1 | \ $AWK '{print $3}') if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for pool " \ "$pool" return 1 fi else log_note "Pool $pool not exists." return 1 fi $ECHO "$prop_val" return 0 } # Return 0 if a pool exists; $? otherwise # # $1 - pool name function poolexists { typeset pool=$1 if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi $ZPOOL get name "$pool" > /dev/null 2>&1 return $? } # Return 0 if all the specified datasets exist; $? otherwise # # $1-n dataset name function datasetexists { if (($# == 0)); then log_note "No dataset name given." return 1 fi while (($# > 0)); do $ZFS get name $1 > /dev/null 2>&1 || \ return $? shift done return 0 } # return 0 if none of the specified datasets exists, otherwise return 1. # # $1-n dataset name function datasetnonexists { if (($# == 0)); then log_note "No dataset name given." return 1 fi while (($# > 0)); do $ZFS list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \ && return 1 shift done return 0 } # # Given a mountpoint, or a dataset name, determine if it is shared via NFS. # # Returns 0 if shared, 1 otherwise. # function is_shared { typeset fs=$1 typeset mtpt if [[ $fs != "/"* ]] ; then if datasetnonexists "$fs" ; then return 1 else mtpt=$(get_prop mountpoint "$fs") case $mtpt in none|legacy|-) return 1 ;; *) fs=$mtpt ;; esac fi fi if is_linux; then for mtpt in `$SHARE | $AWK '{print $1}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done return 1 fi for mtpt in `$SHARE | $AWK '{print $2}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done typeset stat=$($SVCS -H -o STA nfs/server:default) if [[ $stat != "ON" ]]; then log_note "Current nfs/server status: $stat" fi return 1 } # # Given a dataset name determine if it is shared via SMB. # # Returns 0 if shared, 1 otherwise. # function is_shared_smb { typeset fs=$1 typeset mtpt if datasetnonexists "$fs" ; then return 1 else fs=$(echo $fs | sed 's@/@_@g') fi if is_linux; then for mtpt in `$NET usershare list | $AWK '{print $1}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done return 1 else log_unsupported "Currently unsupported by the test framework" return 1 fi } # # Given a mountpoint, determine if it is not shared via NFS. # # Returns 0 if not shared, 1 otherwise. # function not_shared { typeset fs=$1 is_shared $fs if (($? == 0)); then return 1 fi return 0 } # # Given a dataset determine if it is not shared via SMB. # # Returns 0 if not shared, 1 otherwise. # function not_shared_smb { typeset fs=$1 is_shared_smb $fs if (($? == 0)); then return 1 fi return 0 } # # Helper function to unshare a mountpoint. # function unshare_fs #fs { typeset fs=$1 is_shared $fs || is_shared_smb $fs if (($? == 0)); then log_must $ZFS unshare $fs fi return 0 } # # Helper function to share a NFS mountpoint. # function share_nfs #fs { typeset fs=$1 if is_linux; then is_shared $fs if (($? != 0)); then log_must $SHARE "*:$fs" fi else is_shared $fs if (($? != 0)); then log_must $SHARE -F nfs $fs fi fi return 0 } # # Helper function to unshare a NFS mountpoint. # function unshare_nfs #fs { typeset fs=$1 if is_linux; then is_shared $fs if (($? == 0)); then log_must $UNSHARE -u "*:$fs" fi else is_shared $fs if (($? == 0)); then log_must $UNSHARE -F nfs $fs fi fi return 0 } # # Helper function to show NFS shares. # function showshares_nfs { if is_linux; then $SHARE -v else $SHARE -F nfs fi return 0 } # # Helper function to show SMB shares. # function showshares_smb { if is_linux; then $NET usershare list else $SHARE -F smb fi return 0 } # # Check NFS server status and trigger it online. # function setup_nfs_server { # Cannot share directory in non-global zone. # if ! is_global_zone; then log_note "Cannot trigger NFS server by sharing in LZ." return fi if is_linux; then log_note "NFS server must started prior to running test framework." return fi typeset nfs_fmri="svc:/network/nfs/server:default" if [[ $($SVCS -Ho STA $nfs_fmri) != "ON" ]]; then # # Only really sharing operation can enable NFS server # to online permanently. # typeset dummy=/tmp/dummy if [[ -d $dummy ]]; then log_must $RM -rf $dummy fi log_must $MKDIR $dummy log_must $SHARE $dummy # # Waiting for fmri's status to be the final status. # Otherwise, in transition, an asterisk (*) is appended for # instances, unshare will reverse status to 'DIS' again. # # Waiting for 1's at least. # log_must $SLEEP 1 timeout=10 while [[ timeout -ne 0 && $($SVCS -Ho STA $nfs_fmri) == *'*' ]] do log_must $SLEEP 1 ((timeout -= 1)) done log_must $UNSHARE $dummy log_must $RM -rf $dummy fi log_note "Current NFS status: '$($SVCS -Ho STA,FMRI $nfs_fmri)'" } # # To verify whether calling process is in global zone # # Return 0 if in global zone, 1 in non-global zone # function is_global_zone { typeset cur_zone=$($ZONENAME 2>/dev/null) if [[ $cur_zone != "global" ]]; then return 1 fi return 0 } # # Verify whether test is permitted to run from # global zone, local zone, or both # # $1 zone limit, could be "global", "local", or "both"(no limit) # # Return 0 if permitted, otherwise exit with log_unsupported # function verify_runnable # zone limit { typeset limit=$1 [[ -z $limit ]] && return 0 if is_global_zone ; then case $limit in global|both) ;; local) log_unsupported "Test is unable to run from "\ "global zone." ;; *) log_note "Warning: unknown limit $limit - " \ "use both." ;; esac else case $limit in local|both) ;; global) log_unsupported "Test is unable to run from "\ "local zone." ;; *) log_note "Warning: unknown limit $limit - " \ "use both." ;; esac reexport_pool fi return 0 } # Return 0 if create successfully or the pool exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - pool name # $2-n - [keyword] devs_list function create_pool #pool devs_list { typeset pool=${1%%/*} shift if [[ -z $pool ]]; then log_note "Missing pool name." return 1 fi if poolexists $pool ; then destroy_pool $pool fi if is_global_zone ; then [[ -d /$pool ]] && $RM -rf /$pool log_must $ZPOOL create -f $pool $@ fi return 0 } # Return 0 if destroy successfully or the pool exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - pool name # Destroy pool with the given parameters. function destroy_pool #pool { typeset pool=${1%%/*} typeset mtpt if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi if is_global_zone ; then if poolexists "$pool" ; then mtpt=$(get_prop mountpoint "$pool") # At times, syseventd activity can cause attempts to # destroy a pool to fail with EBUSY. We retry a few # times allowing failures before requiring the destroy # to succeed. typeset -i wait_time=10 ret=1 count=0 must="" while [[ $ret -ne 0 ]]; do $must $ZPOOL destroy -f $pool ret=$? [[ $ret -eq 0 ]] && break log_note "zpool destroy failed with $ret" [[ count++ -ge 7 ]] && must=log_must $SLEEP $wait_time done [[ -d $mtpt ]] && \ log_must $RM -rf $mtpt else log_note "Pool does not exist. ($pool)" return 1 fi fi return 0 } # # Firstly, create a pool with 5 datasets. Then, create a single zone and # export the 5 datasets to it. In addition, we also add a ZFS filesystem # and a zvol device to the zone. # # $1 zone name # $2 zone root directory prefix # $3 zone ip # function zfs_zones_setup #zone_name zone_root zone_ip { typeset zone_name=${1:-$(hostname)-z} typeset zone_root=${2:-"/zone_root"} typeset zone_ip=${3:-"10.1.1.10"} typeset prefix_ctr=$ZONE_CTR typeset pool_name=$ZONE_POOL typeset -i cntctr=5 typeset -i i=0 # Create pool and 5 container within it # [[ -d /$pool_name ]] && $RM -rf /$pool_name log_must $ZPOOL create -f $pool_name $DISKS while ((i < cntctr)); do log_must $ZFS create $pool_name/$prefix_ctr$i ((i += 1)) done # create a zvol log_must $ZFS create -V 1g $pool_name/zone_zvol block_device_wait # # If current system support slog, add slog device for pool # if verify_slog_support ; then typeset sdevs="/var/tmp/sdev1 /var/tmp/sdev2" - log_must $MKFILE 100M $sdevs + log_must $MKFILE $MINVDEVSIZE $sdevs log_must $ZPOOL add $pool_name log mirror $sdevs fi # this isn't supported just yet. # Create a filesystem. In order to add this to # the zone, it must have it's mountpoint set to 'legacy' # log_must $ZFS create $pool_name/zfs_filesystem # log_must $ZFS set mountpoint=legacy $pool_name/zfs_filesystem [[ -d $zone_root ]] && \ log_must $RM -rf $zone_root/$zone_name [[ ! -d $zone_root ]] && \ log_must $MKDIR -p -m 0700 $zone_root/$zone_name # Create zone configure file and configure the zone # typeset zone_conf=/tmp/zone_conf.$$ $ECHO "create" > $zone_conf $ECHO "set zonepath=$zone_root/$zone_name" >> $zone_conf $ECHO "set autoboot=true" >> $zone_conf i=0 while ((i < cntctr)); do $ECHO "add dataset" >> $zone_conf $ECHO "set name=$pool_name/$prefix_ctr$i" >> \ $zone_conf $ECHO "end" >> $zone_conf ((i += 1)) done # add our zvol to the zone $ECHO "add device" >> $zone_conf $ECHO "set match=$ZVOL_DEVDIR/$pool_name/zone_zvol" >> $zone_conf $ECHO "end" >> $zone_conf # add a corresponding zvol rdsk to the zone $ECHO "add device" >> $zone_conf $ECHO "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf $ECHO "end" >> $zone_conf # once it's supported, we'll add our filesystem to the zone # $ECHO "add fs" >> $zone_conf # $ECHO "set type=zfs" >> $zone_conf # $ECHO "set special=$pool_name/zfs_filesystem" >> $zone_conf # $ECHO "set dir=/export/zfs_filesystem" >> $zone_conf # $ECHO "end" >> $zone_conf $ECHO "verify" >> $zone_conf $ECHO "commit" >> $zone_conf log_must $ZONECFG -z $zone_name -f $zone_conf log_must $RM -f $zone_conf # Install the zone $ZONEADM -z $zone_name install if (($? == 0)); then log_note "SUCCESS: $ZONEADM -z $zone_name install" else log_fail "FAIL: $ZONEADM -z $zone_name install" fi # Install sysidcfg file # typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg $ECHO "system_locale=C" > $sysidcfg $ECHO "terminal=dtterm" >> $sysidcfg $ECHO "network_interface=primary {" >> $sysidcfg $ECHO "hostname=$zone_name" >> $sysidcfg $ECHO "}" >> $sysidcfg $ECHO "name_service=NONE" >> $sysidcfg $ECHO "root_password=mo791xfZ/SFiw" >> $sysidcfg $ECHO "security_policy=NONE" >> $sysidcfg $ECHO "timezone=US/Eastern" >> $sysidcfg # Boot this zone log_must $ZONEADM -z $zone_name boot } # # Reexport TESTPOOL & TESTPOOL(1-4) # function reexport_pool { typeset -i cntctr=5 typeset -i i=0 while ((i < cntctr)); do if ((i == 0)); then TESTPOOL=$ZONE_POOL/$ZONE_CTR$i if ! ismounted $TESTPOOL; then log_must $ZFS mount $TESTPOOL fi else eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i if eval ! ismounted \$TESTPOOL$i; then log_must eval $ZFS mount \$TESTPOOL$i fi fi ((i += 1)) done } # # Verify a given disk is online or offline # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_state # pool disk state{online,offline} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 $ZPOOL status -v $pool | grep "$disk" \ | grep -i "$state" > /dev/null 2>&1 return $? } # # Get the mountpoint of snapshot # For the snapshot use /.zfs/snapshot/ # as its mountpoint # function snapshot_mountpoint { typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} if [[ $dataset != *@* ]]; then log_fail "Error name of snapshot '$dataset'." fi typeset fs=${dataset%@*} typeset snap=${dataset#*@} if [[ -z $fs || -z $snap ]]; then log_fail "Error name of snapshot '$dataset'." fi $ECHO $(get_prop mountpoint $fs)/.zfs/snapshot/$snap } # # Given a pool and file system, this function will verify the file system # using the zdb internal tool. Note that the pool is exported and imported # to ensure it has consistent state. # function verify_filesys # pool filesystem dir { typeset pool="$1" typeset filesys="$2" typeset zdbout="/tmp/zdbout.$$" shift shift typeset dirs=$@ typeset search_path="" log_note "Calling $ZDB to verify filesystem '$filesys'" $ZFS unmount -a > /dev/null 2>&1 log_must $ZPOOL export $pool if [[ -n $dirs ]] ; then for dir in $dirs ; do search_path="$search_path -d $dir" done fi log_must $ZPOOL import $search_path $pool $ZDB -cudi $filesys > $zdbout 2>&1 if [[ $? != 0 ]]; then log_note "Output: $ZDB -cudi $filesys" $CAT $zdbout log_fail "$ZDB detected errors with: '$filesys'" fi log_must $ZFS mount -a log_must $RM -rf $zdbout } # # Given a pool, and this function list all disks in the pool # function get_disklist # pool { typeset disklist="" disklist=$($ZPOOL iostat -v $1 | $NAWK '(NR >4) {print $1}' | \ $GREP -v "\-\-\-\-\-" | \ $EGREP -v -e "^(mirror|raidz1|raidz2|spare|log|cache)$") $ECHO $disklist } # # Given a pool, and this function list all disks in the pool with their full # path (like "/dev/sda" instead of "sda"). # function get_disklist_fullpath # pool { args="-P $1" get_disklist $args } # /** # This function kills a given list of processes after a time period. We use # this in the stress tests instead of STF_TIMEOUT so that we can have processes # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT # would be listed as FAIL, which we don't want : we're happy with stress tests # running for a certain amount of time, then finishing. # # @param $1 the time in seconds after which we should terminate these processes # @param $2..$n the processes we wish to terminate. # */ function stress_timeout { typeset -i TIMEOUT=$1 shift typeset cpids="$@" log_note "Waiting for child processes($cpids). " \ "It could last dozens of minutes, please be patient ..." log_must $SLEEP $TIMEOUT log_note "Killing child processes after ${TIMEOUT} stress timeout." typeset pid for pid in $cpids; do $PS -p $pid > /dev/null 2>&1 if (($? == 0)); then log_must $KILL -USR1 $pid fi done } # # Verify a given hotspare disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_hotspare_state # pool disk state{inuse,avail} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk "spares") if [[ $state != ${cur_state} ]]; then return 1 fi return 0 } # # Verify a given slog disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_slog_state # pool disk state{online,offline,unavail} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk "logs") if [[ $state != ${cur_state} ]]; then return 1 fi return 0 } # # Verify a given vdev disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_vdev_state # pool disk state{online,offline,unavail} { typeset pool=$1 typeset disk=${2#$/DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk) if [[ $state != ${cur_state} ]]; then return 1 fi return 0 } # # Check the output of 'zpool status -v ', # and to see if the content of contain the specified. # # Return 0 is contain, 1 otherwise # function check_pool_status # pool token keyword { typeset pool=$1 typeset token=$2 typeset keyword=$3 $ZPOOL status -v "$pool" 2>/dev/null | $NAWK -v token="$token:" ' ($1==token) {print $0}' \ | $GREP -i "$keyword" > /dev/null 2>&1 return $? } # # These 5 following functions are instance of check_pool_status() # is_pool_resilvering - to check if the pool is resilver in progress # is_pool_resilvered - to check if the pool is resilver completed # is_pool_scrubbing - to check if the pool is scrub in progress # is_pool_scrubbed - to check if the pool is scrub completed # is_pool_scrub_stopped - to check if the pool is scrub stopped # function is_pool_resilvering #pool { check_pool_status "$1" "scan" "resilver in progress since " return $? } function is_pool_resilvered #pool { check_pool_status "$1" "scan" "resilvered " return $? } function is_pool_scrubbing #pool { check_pool_status "$1" "scan" "scrub in progress since " return $? } function is_pool_scrubbed #pool { check_pool_status "$1" "scan" "scrub repaired" return $? } function is_pool_scrub_stopped #pool { check_pool_status "$1" "scan" "scrub canceled" return $? } # # Use create_pool()/destroy_pool() to clean up the information in # in the given disk to avoid slice overlapping. # function cleanup_devices #vdevs { typeset pool="foopool$$" if poolexists $pool ; then destroy_pool $pool fi create_pool $pool $@ destroy_pool $pool return 0 } # # Verify the rsh connectivity to each remote host in RHOSTS. # # Return 0 if remote host is accessible; otherwise 1. # $1 remote host name # $2 username # function verify_rsh_connect #rhost, username { typeset rhost=$1 typeset username=$2 typeset rsh_cmd="$RSH -n" typeset cur_user= $GETENT hosts $rhost >/dev/null 2>&1 if (($? != 0)); then log_note "$rhost cannot be found from" \ "administrative database." return 1 fi $PING $rhost 3 >/dev/null 2>&1 if (($? != 0)); then log_note "$rhost is not reachable." return 1 fi if ((${#username} != 0)); then rsh_cmd="$rsh_cmd -l $username" cur_user="given user \"$username\"" else cur_user="current user \"`$LOGNAME`\"" fi if ! $rsh_cmd $rhost $TRUE; then log_note "$RSH to $rhost is not accessible" \ "with $cur_user." return 1 fi return 0 } # # Verify the remote host connection via rsh after rebooting # $1 remote host # function verify_remote { rhost=$1 # # The following loop waits for the remote system rebooting. # Each iteration will wait for 150 seconds. there are # total 5 iterations, so the total timeout value will # be 12.5 minutes for the system rebooting. This number # is an approxiate number. # typeset -i count=0 while ! verify_rsh_connect $rhost; do sleep 150 ((count = count + 1)) if ((count > 5)); then return 1 fi done return 0 } # # Replacement function for /usr/bin/rsh. This function will include # the /usr/bin/rsh and meanwhile return the execution status of the # last command. # # $1 usrname passing down to -l option of /usr/bin/rsh # $2 remote machine hostname # $3... command string # function rsh_status { typeset ruser=$1 typeset rhost=$2 typeset -i ret=0 typeset cmd_str="" typeset rsh_str="" shift; shift cmd_str="$@" err_file=/tmp/${rhost}.$$.err if ((${#ruser} == 0)); then rsh_str="$RSH -n" else rsh_str="$RSH -n -l $ruser" fi $rsh_str $rhost /bin/ksh -c "'$cmd_str; \ print -u 2 \"status=\$?\"'" \ >/dev/null 2>$err_file ret=$? if (($ret != 0)); then $CAT $err_file $RM -f $std_file $err_file log_fail "$RSH itself failed with exit code $ret..." fi ret=$($GREP -v 'print -u 2' $err_file | $GREP 'status=' | \ $CUT -d= -f2) (($ret != 0)) && $CAT $err_file >&2 $RM -f $err_file >/dev/null 2>&1 return $ret } # # Get the SUNWstc-fs-zfs package installation path in a remote host # $1 remote host name # function get_remote_pkgpath { typeset rhost=$1 typeset pkgpath="" pkgpath=$($RSH -n $rhost "$PKGINFO -l SUNWstc-fs-zfs | $GREP BASEDIR: |\ $CUT -d: -f2") $ECHO $pkgpath } #/** # A function to find and locate free disks on a system or from given # disks as the parameter. It works by locating disks that are in use # as swap devices and dump devices, and also disks listed in /etc/vfstab # # $@ given disks to find which are free, default is all disks in # the test system # # @return a string containing the list of available disks #*/ function find_disks { # Trust provided list, no attempt is made to locate unused devices. if is_linux; then $ECHO "$@" return fi sfi=/tmp/swaplist.$$ dmpi=/tmp/dumpdev.$$ max_finddisksnum=${MAX_FINDDISKSNUM:-6} $SWAP -l > $sfi $DUMPADM > $dmpi 2>/dev/null # write an awk script that can process the output of format # to produce a list of disks we know about. Note that we have # to escape "$2" so that the shell doesn't interpret it while # we're creating the awk script. # ------------------- $CAT > /tmp/find_disks.awk </dev/null | /tmp/find_disks.awk)} $RM /tmp/find_disks.awk unused="" for disk in $disks; do # Check for mounted $GREP "${disk}[sp]" /etc/mnttab >/dev/null (($? == 0)) && continue # Check for swap $GREP "${disk}[sp]" $sfi >/dev/null (($? == 0)) && continue # check for dump device $GREP "${disk}[sp]" $dmpi >/dev/null (($? == 0)) && continue # check to see if this disk hasn't been explicitly excluded # by a user-set environment variable $ECHO "${ZFS_HOST_DEVICES_IGNORE}" | $GREP "${disk}" > /dev/null (($? == 0)) && continue unused_candidates="$unused_candidates $disk" done $RM $sfi $RM $dmpi # now just check to see if those disks do actually exist # by looking for a device pointing to the first slice in # each case. limit the number to max_finddisksnum count=0 for disk in $unused_candidates; do if [ -b $DEV_DSKDIR/${disk}s0 ]; then if [ $count -lt $max_finddisksnum ]; then unused="$unused $disk" # do not impose limit if $@ is provided [[ -z $@ ]] && ((count = count + 1)) fi fi done # finally, return our disk list $ECHO $unused } # # Add specified user to specified group # # $1 group name # $2 user name # $3 base of the homedir (optional) # function add_user # { typeset gname=$1 typeset uname=$2 typeset basedir=${3:-"/var/tmp"} if ((${#gname} == 0 || ${#uname} == 0)); then log_fail "group name or user name are not defined." fi log_must $USERADD -g $gname -d $basedir/$uname -m $uname # Add new users to the same group and the command line utils. # This allows them to be run out of the original users home # directory as long as it permissioned to be group readable. if is_linux; then cmd_group=$(stat --format="%G" $ZFS) log_must $USERMOD -a -G $cmd_group $uname fi return 0 } # # Delete the specified user. # # $1 login name # $2 base of the homedir (optional) # function del_user # { typeset user=$1 typeset basedir=${2:-"/var/tmp"} if ((${#user} == 0)); then log_fail "login name is necessary." fi if $ID $user > /dev/null 2>&1; then log_must $USERDEL $user fi [[ -d $basedir/$user ]] && $RM -fr $basedir/$user return 0 } # # Select valid gid and create specified group. # # $1 group name # function add_group # { typeset group=$1 if ((${#group} == 0)); then log_fail "group name is necessary." fi # Assign 100 as the base gid, a larger value is selected for # Linux because for many distributions 1000 and under are reserved. if is_linux; then while true; do $GROUPADD $group > /dev/null 2>&1 typeset -i ret=$? case $ret in 0) return 0 ;; *) return 1 ;; esac done else typeset -i gid=100 while true; do $GROUPADD -g $gid $group > /dev/null 2>&1 typeset -i ret=$? case $ret in 0) return 0 ;; # The gid is not unique 4) ((gid += 1)) ;; *) return 1 ;; esac done fi } # # Delete the specified group. # # $1 group name # function del_group # { typeset grp=$1 if ((${#grp} == 0)); then log_fail "group name is necessary." fi if is_linux; then $GETENT group $grp > /dev/null 2>&1 typeset -i ret=$? case $ret in # Group does not exist. 2) return 0 ;; # Name already exists as a group name 0) log_must $GROUPDEL $grp ;; *) return 1 ;; esac else $GROUPMOD -n $grp $grp > /dev/null 2>&1 typeset -i ret=$? case $ret in # Group does not exist. 6) return 0 ;; # Name already exists as a group name 9) log_must $GROUPDEL $grp ;; *) return 1 ;; esac fi return 0 } # # This function will return true if it's safe to destroy the pool passed # as argument 1. It checks for pools based on zvols and files, and also # files contained in a pool that may have a different mountpoint. # function safe_to_destroy_pool { # $1 the pool name typeset pool="" typeset DONT_DESTROY="" # We check that by deleting the $1 pool, we're not # going to pull the rug out from other pools. Do this # by looking at all other pools, ensuring that they # aren't built from files or zvols contained in this pool. for pool in $($ZPOOL list -H -o name) do ALTMOUNTPOOL="" # this is a list of the top-level directories in each of the # files that make up the path to the files the pool is based on FILEPOOL=$($ZPOOL status -v $pool | $GREP /$1/ | \ $AWK '{print $1}') # this is a list of the zvols that make up the pool ZVOLPOOL=$($ZPOOL status -v $pool | $GREP "$ZVOL_DEVDIR/$1$" \ | $AWK '{print $1}') # also want to determine if it's a file-based pool using an # alternate mountpoint... POOL_FILE_DIRS=$($ZPOOL status -v $pool | \ $GREP / | $AWK '{print $1}' | \ $AWK -F/ '{print $2}' | $GREP -v "dev") for pooldir in $POOL_FILE_DIRS do OUTPUT=$($ZFS list -H -r -o mountpoint $1 | \ $GREP "${pooldir}$" | $AWK '{print $1}') ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}" done if [ ! -z "$ZVOLPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $ZVOLPOOL on $1" fi if [ ! -z "$FILEPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $FILEPOOL on $1" fi if [ ! -z "$ALTMOUNTPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $ALTMOUNTPOOL on $1" fi done if [ -z "${DONT_DESTROY}" ] then return 0 else log_note "Warning: it is not safe to destroy $1!" return 1 fi } # # Get the available ZFS compression options # $1 option type zfs_set|zfs_compress # function get_compress_opts { typeset COMPRESS_OPTS typeset GZIP_OPTS="gzip gzip-1 gzip-2 gzip-3 gzip-4 gzip-5 \ gzip-6 gzip-7 gzip-8 gzip-9" if [[ $1 == "zfs_compress" ]] ; then COMPRESS_OPTS="on lzjb" elif [[ $1 == "zfs_set" ]] ; then COMPRESS_OPTS="on off lzjb" fi typeset valid_opts="$COMPRESS_OPTS" $ZFS get 2>&1 | $GREP gzip >/dev/null 2>&1 if [[ $? -eq 0 ]]; then valid_opts="$valid_opts $GZIP_OPTS" fi $ECHO "$valid_opts" } # # Verify zfs operation with -p option work as expected # $1 operation, value could be create, clone or rename # $2 dataset type, value could be fs or vol # $3 dataset name # $4 new dataset name # function verify_opt_p_ops { typeset ops=$1 typeset datatype=$2 typeset dataset=$3 typeset newdataset=$4 if [[ $datatype != "fs" && $datatype != "vol" ]]; then log_fail "$datatype is not supported." fi # check parameters accordingly case $ops in create) newdataset=$dataset dataset="" if [[ $datatype == "vol" ]]; then ops="create -V $VOLSIZE" fi ;; clone) if [[ -z $newdataset ]]; then log_fail "newdataset should not be empty" \ "when ops is $ops." fi log_must datasetexists $dataset log_must snapexists $dataset ;; rename) if [[ -z $newdataset ]]; then log_fail "newdataset should not be empty" \ "when ops is $ops." fi log_must datasetexists $dataset log_mustnot snapexists $dataset ;; *) log_fail "$ops is not supported." ;; esac # make sure the upper level filesystem does not exist if datasetexists ${newdataset%/*} ; then log_must $ZFS destroy -rRf ${newdataset%/*} fi # without -p option, operation will fail log_mustnot $ZFS $ops $dataset $newdataset log_mustnot datasetexists $newdataset ${newdataset%/*} # with -p option, operation should succeed log_must $ZFS $ops -p $dataset $newdataset block_device_wait if ! datasetexists $newdataset ; then log_fail "-p option does not work for $ops" fi # when $ops is create or clone, redo the operation still return zero if [[ $ops != "rename" ]]; then log_must $ZFS $ops -p $dataset $newdataset fi return 0 } # # Get configuration of pool # $1 pool name # $2 config name # function get_config { typeset pool=$1 typeset config=$2 typeset alt_root if ! poolexists "$pool" ; then return 1 fi alt_root=$($ZPOOL list -H $pool | $AWK '{print $NF}') if [[ $alt_root == "-" ]]; then value=$($ZDB -C $pool | $GREP "$config:" | $AWK -F: \ '{print $2}') else value=$($ZDB -e $pool | $GREP "$config:" | $AWK -F: \ '{print $2}') fi if [[ -n $value ]] ; then value=${value#'} value=${value%'} fi echo $value return 0 } # # Privated function. Random select one of items from arguments. # # $1 count # $2-n string # function _random_get { typeset cnt=$1 shift typeset str="$@" typeset -i ind ((ind = RANDOM % cnt + 1)) typeset ret=$($ECHO "$str" | $CUT -f $ind -d ' ') $ECHO $ret } # # Random select one of item from arguments which include NONE string # function random_get_with_non { typeset -i cnt=$# ((cnt =+ 1)) _random_get "$cnt" "$@" } # # Random select one of item from arguments which doesn't include NONE string # function random_get { _random_get "$#" "$@" } # # Detect if the current system support slog # function verify_slog_support { typeset dir=/tmp/disk.$$ typeset pool=foo.$$ typeset vdev=$dir/a typeset sdev=$dir/b $MKDIR -p $dir - $MKFILE 64M $vdev $sdev + $MKFILE $MINVDEVSIZE $vdev $sdev typeset -i ret=0 if ! $ZPOOL create -n $pool $vdev log $sdev > /dev/null 2>&1; then ret=1 fi $RM -r $dir return $ret } # # The function will generate a dataset name with specific length # $1, the length of the name # $2, the base string to construct the name # function gen_dataset_name { typeset -i len=$1 typeset basestr="$2" typeset -i baselen=${#basestr} typeset -i iter=0 typeset l_name="" if ((len % baselen == 0)); then ((iter = len / baselen)) else ((iter = len / baselen + 1)) fi while ((iter > 0)); do l_name="${l_name}$basestr" ((iter -= 1)) done $ECHO $l_name } # # Get cksum tuple of dataset # $1 dataset name # # sample zdb output: # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744 function datasetcksum { typeset cksum $SYNC cksum=$($ZDB -vvv $1 | $GREP "^Dataset $1 \[" | $GREP "cksum" \ | $AWK -F= '{print $7}') $ECHO $cksum } # # Get cksum of file # #1 file path # function checksum { typeset cksum cksum=$($CKSUM $1 | $AWK '{print $1}') $ECHO $cksum } # # Get the given disk/slice state from the specific field of the pool # function get_device_state #pool disk field("", "spares","logs") { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset field=${3:-$pool} state=$($ZPOOL status -v "$pool" 2>/dev/null | \ $NAWK -v device=$disk -v pool=$pool -v field=$field \ 'BEGIN {startconfig=0; startfield=0; } /config:/ {startconfig=1} (startconfig==1) && ($1==field) {startfield=1; next;} (startfield==1) && ($1==device) {print $2; exit;} (startfield==1) && ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}') echo $state } # # print the given directory filesystem type # # $1 directory name # function get_fstype { typeset dir=$1 if [[ -z $dir ]]; then log_fail "Usage: get_fstype " fi # # $ df -n / # / : ufs # $DF -n $dir | $AWK '{print $3}' } # # Given a disk, label it to VTOC regardless what label was on the disk # $1 disk # function labelvtoc { typeset disk=$1 if [[ -z $disk ]]; then log_fail "The disk name is unspecified." fi typeset label_file=/var/tmp/labelvtoc.$$ typeset arch=$($UNAME -p) if is_linux; then log_note "Currently unsupported by the test framework" return 1 fi if [[ $arch == "i386" ]]; then $ECHO "label" > $label_file $ECHO "0" >> $label_file $ECHO "" >> $label_file $ECHO "q" >> $label_file $ECHO "q" >> $label_file $FDISK -B $disk >/dev/null 2>&1 # wait a while for fdisk finishes $SLEEP 60 elif [[ $arch == "sparc" ]]; then $ECHO "label" > $label_file $ECHO "0" >> $label_file $ECHO "" >> $label_file $ECHO "" >> $label_file $ECHO "" >> $label_file $ECHO "q" >> $label_file else log_fail "unknown arch type" fi $FORMAT -e -s -d $disk -f $label_file typeset -i ret_val=$? $RM -f $label_file # # wait the format to finish # $SLEEP 60 if ((ret_val != 0)); then log_fail "unable to label $disk as VTOC." fi return 0 } # # check if the system was installed as zfsroot or not # return: 0 ture, otherwise false # function is_zfsroot { $DF -n / | $GREP zfs > /dev/null 2>&1 return $? } # # get the root filesystem name if it's zfsroot system. # # return: root filesystem name function get_rootfs { typeset rootfs="" rootfs=$($AWK '{if ($2 == "/" && $3 == "zfs") print $1}' \ /etc/mnttab) if [[ -z "$rootfs" ]]; then log_fail "Can not get rootfs" fi $ZFS list $rootfs > /dev/null 2>&1 if (($? == 0)); then $ECHO $rootfs else log_fail "This is not a zfsroot system." fi } # # get the rootfs's pool name # return: # rootpool name # function get_rootpool { typeset rootfs="" typeset rootpool="" rootfs=$($AWK '{if ($2 == "/" && $3 =="zfs") print $1}' \ /etc/mnttab) if [[ -z "$rootfs" ]]; then log_fail "Can not get rootpool" fi $ZFS list $rootfs > /dev/null 2>&1 if (($? == 0)); then rootpool=`$ECHO $rootfs | awk -F\/ '{print $1}'` $ECHO $rootpool else log_fail "This is not a zfsroot system." fi } # # Get the sub string from specified source string # # $1 source string # $2 start position. Count from 1 # $3 offset # function get_substr #src_str pos offset { typeset pos offset $ECHO $1 | \ $NAWK -v pos=$2 -v offset=$3 '{print substr($0, pos, offset)}' } # # Check if the given device is physical device # function is_physical_device #device { typeset device=${1#$DEV_DSKDIR} device=${device#$DEV_RDSKDIR} if is_linux; then [[ -b "$DEV_DSKDIR/$device" ]] && \ [[ -f /sys/module/loop/parameters/max_part ]] return $? else $ECHO $device | $EGREP "^c[0-F]+([td][0-F]+)+$" > /dev/null 2>&1 return $? fi } # # Check if the given device is a real device (ie SCSI device) # function is_real_device #disk { typeset disk=$1 [[ -z $disk ]] && log_fail "No argument for disk given." if is_linux; then $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP disk > /dev/null 2>&1 return $? fi } # # Check if the given device is a loop device # function is_loop_device #disk { typeset disk=$1 [[ -z $disk ]] && log_fail "No argument for disk given." if is_linux; then $LSBLK $DEV_RDSKDIR/$disk -o TYPE | $EGREP loop > /dev/null 2>&1 return $? fi } # # Check if the given device is a multipath device and if there is a sybolic # link to a device mapper and to a disk # Currently no support for dm devices alone without multipath # function is_mpath_device #disk { typeset disk=$1 [[ -z $disk ]] && log_fail "No argument for disk given." if is_linux; then $LSBLK $DEV_MPATHDIR/$disk -o TYPE | $EGREP mpath > /dev/null 2>&1 if (($? == 0)); then $READLINK $DEV_MPATHDIR/$disk > /dev/null 2>&1 return $? else return $? fi fi } # Set the slice prefix for disk partitioning depending # on whether the device is a real, multipath, or loop device. # Currently all disks have to be of the same type, so only # checks first disk to determine slice prefix. # function set_slice_prefix { typeset disk typeset -i i=0 if is_linux; then while (( i < $DISK_ARRAY_NUM )); do disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')" if ( is_mpath_device $disk ) && [[ -z $($ECHO $disk | awk 'substr($1,18,1)\ ~ /^[[:digit:]]+$/') ]] || ( is_real_device $disk ); then export SLICE_PREFIX="" return 0 elif ( is_mpath_device $disk || is_loop_device $disk ); then export SLICE_PREFIX="p" return 0 else log_fail "$disk not supported for partitioning." fi (( i = i + 1)) done fi } # # Set the directory path of the listed devices in $DISK_ARRAY_NUM # Currently all disks have to be of the same type, so only # checks first disk to determine device directory # default = /dev (linux) # real disk = /dev (linux) # multipath device = /dev/mapper (linux) # function set_device_dir { typeset disk typeset -i i=0 if is_linux; then while (( i < $DISK_ARRAY_NUM )); do disk="$($ECHO $DISKS | $NAWK '{print $(i + 1)}')" if is_mpath_device $disk; then export DEV_DSKDIR=$DEV_MPATHDIR return 0 else export DEV_DSKDIR=$DEV_RDSKDIR return 0 fi (( i = i + 1)) done else export DEV_DSKDIR=$DEV_RDSKDIR fi } # # Get the directory path of given device # function get_device_dir #device { typeset device=$1 if ! $(is_physical_device $device) ; then if [[ $device != "/" ]]; then device=${device%/*} fi if [[ -b "$DEV_DSKDIR/$device" ]]; then device="$DEV_DSKDIR" fi $ECHO $device else $ECHO "$DEV_DSKDIR" fi } # # Get the package name # function get_package_name { typeset dirpath=${1:-$STC_NAME} echo "SUNWstc-${dirpath}" | /usr/bin/sed -e "s/\//-/g" } # # Get the word numbers from a string separated by white space # function get_word_count { $ECHO $1 | $WC -w } # # To verify if the require numbers of disks is given # function verify_disk_count { typeset -i min=${2:-1} typeset -i count=$(get_word_count "$1") if ((count < min)); then log_untested "A minimum of $min disks is required to run." \ " You specified $count disk(s)" fi } function ds_is_volume { typeset type=$(get_prop type $1) [[ $type = "volume" ]] && return 0 return 1 } function ds_is_filesystem { typeset type=$(get_prop type $1) [[ $type = "filesystem" ]] && return 0 return 1 } function ds_is_snapshot { typeset type=$(get_prop type $1) [[ $type = "snapshot" ]] && return 0 return 1 } # # Check if Trusted Extensions are installed and enabled # function is_te_enabled { $SVCS -H -o state labeld 2>/dev/null | $GREP "enabled" if (($? != 0)); then return 1 else return 0 fi } # Utility function to determine if a system has multiple cpus. function is_mp { if is_linux; then (($($NPROC) > 1)) else (($($PSRINFO | $WC -l) > 1)) fi return $? } function get_cpu_freq { if is_linux; then lscpu | $AWK '/CPU MHz/ { print $3 }' else $PSRINFO -v 0 | $AWK '/processor operates at/ {print $6}' fi } # Run the given command as the user provided. function user_run { typeset user=$1 shift log_note "user:$user $@" eval \$SU \$user -c \"$@\" > /tmp/out 2>/tmp/err return $? } # # Check if the pool contains the specified vdevs # # $1 pool # $2..n ... # # Return 0 if the vdevs are contained in the pool, 1 if any of the specified # vdevs is not in the pool, and 2 if pool name is missing. # function vdevs_in_pool { typeset pool=$1 typeset vdev if [[ -z $pool ]]; then log_note "Missing pool name." return 2 fi shift typeset tmpfile=$($MKTEMP) $ZPOOL list -Hv "$pool" >$tmpfile for vdev in $@; do $GREP -w ${vdev##*/} $tmpfile >/dev/null 2>&1 [[ $? -ne 0 ]] && return 1 done $RM -f $tmpfile return 0; } function get_max { typeset -l i max=$1 shift for i in "$@"; do max=$(echo $((max > i ? max : i))) done echo $max } function get_min { typeset -l i min=$1 shift for i in "$@"; do min=$(echo $((min < i ? min : i))) done echo $min } # # Wait for newly created block devices to have their minors created. # function block_device_wait { if is_linux; then $UDEVADM trigger $UDEVADM settle fi } # # Synchronize all the data in pool # # $1 pool name # function sync_pool #pool { typeset pool=${1:-$TESTPOOL} log_must $SYNC log_must $SLEEP 2 # Flush all the pool data. typeset -i ret $ZPOOL scrub $pool >/dev/null 2>&1 ret=$? (( $ret != 0 )) && \ log_fail "$ZPOOL scrub $pool failed." while ! is_pool_scrubbed $pool; do if is_pool_resilvered $pool ; then log_fail "$pool should not be resilver completed." fi log_must $SLEEP 2 done } # # Wait for zpool 'freeing' property drops to zero. # # $1 pool name # function wait_freeing #pool { typeset pool=${1:-$TESTPOOL} while true; do [[ "0" == "$($ZPOOL list -Ho freeing $pool)" ]] && break log_must $SLEEP 1 done } diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh index ad416a5bcc18..2d67258efb94 100755 --- a/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_001_pos.ksh @@ -1,78 +1,82 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # Copyright 2015 Nexenta Systems, Inc. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # Valid datasets and snapshots are accepted as bootfs property values # # STRATEGY: # 1. Create a set of datasets and snapshots in a test pool # 2. Try setting them as boot filesystems # verify_runnable "global" function cleanup { if poolexists $TESTPOOL ; then log_must $ZPOOL destroy $TESTPOOL fi if [[ -f $VDEV ]]; then log_must $RM -f $VDEV fi } $ZPOOL set 2>&1 | $GREP bootfs > /dev/null if [ $? -ne 0 ] then log_unsupported "bootfs pool property not supported on this release." fi log_assert "Valid datasets are accepted as bootfs property values" log_onexit cleanup typeset VDEV=$TESTDIR/bootfs_001_pos_a.$$.dat -log_must $MKFILE 400m $VDEV +log_must $MKFILE $MINVDEVSIZE $VDEV create_pool "$TESTPOOL" "$VDEV" log_must $ZFS create $TESTPOOL/$TESTFS log_must $ZFS snapshot $TESTPOOL/$TESTFS@snap log_must $ZFS clone $TESTPOOL/$TESTFS@snap $TESTPOOL/clone log_must $ZPOOL set bootfs=$TESTPOOL/$TESTFS $TESTPOOL log_must $ZPOOL set bootfs=$TESTPOOL/$TESTFS@snap $TESTPOOL log_must $ZPOOL set bootfs=$TESTPOOL/clone $TESTPOOL log_must $ZFS promote $TESTPOOL/clone log_must $ZPOOL set bootfs=$TESTPOOL/clone $TESTPOOL log_pass "Valid datasets are accepted as bootfs property values" diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh index cd61b9288142..cb1a1c9b9c7d 100755 --- a/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh +++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_002_neg.ksh @@ -1,78 +1,82 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # Copyright 2015 Nexenta Systems, Inc. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # Invalid datasets are rejected as boot property values # # STRATEGY: # # 1. Create a zvol # 2. Verify that we can't set the bootfs to that dataset # verify_runnable "global" function cleanup { if datasetexists $TESTPOOL/vol then log_must $ZFS destroy $TESTPOOL/vol fi if poolexists $TESTPOOL then log_must $ZPOOL destroy $TESTPOOL fi if [[ -f $VDEV ]]; then log_must $RM -f $VDEV fi } $ZPOOL set 2>&1 | $GREP bootfs > /dev/null if [ $? -ne 0 ] then log_unsupported "bootfs pool property not supported on this release." fi log_assert "Invalid datasets are rejected as boot property values" log_onexit cleanup typeset VDEV=$TESTDIR/bootfs_002_neg_a.$$.dat log_must $MKFILE 400m $VDEV create_pool "$TESTPOOL" "$VDEV" log_must $ZFS create -V 10m $TESTPOOL/vol block_device_wait log_mustnot $ZPOOL set bootfs=$TESTPOOL/vol $TESTPOOL log_pass "Invalid datasets are rejected as boot property values" diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_003_pos.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_003_pos.ksh index f3cf1c943dfc..2ead6164022a 100755 --- a/tests/zfs-tests/tests/functional/bootfs/bootfs_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_003_pos.ksh @@ -1,82 +1,86 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # Valid pool names are accepted # # STRATEGY: # 1. Using a list of valid pool names # 2. Create a filesystem in that pool # 2. Verify we can set the bootfs to that filesystem # verify_runnable "global" set -A pools "pool.$$" "pool123" "mypool" function cleanup { if poolexists $POOL ; then log_must $ZPOOL destroy $POOL fi $RM /bootfs_003.$$.dat } $ZPOOL set 2>&1 | $GREP bootfs > /dev/null if [ $? -ne 0 ] then log_unsupported "bootfs pool property not supported on this release." fi log_onexit cleanup log_assert "Valid pool names are accepted by zpool set bootfs" -$MKFILE 64m $TESTDIR/bootfs_003.$$.dat +$MKFILE $MINVDEVSIZE $TESTDIR/bootfs_003.$$.dat typeset -i i=0; while [ $i -lt "${#pools[@]}" ] do POOL=${pools[$i]} log_must $ZPOOL create $POOL $TESTDIR/bootfs_003.$$.dat log_must $ZFS create $POOL/$TESTFS log_must $ZPOOL set bootfs=$POOL/$TESTFS $POOL RES=$($ZPOOL get bootfs $POOL | $TAIL -1 | $AWK '{print $3}' ) if [ $RES != "$POOL/$TESTFS" ] then log_fail "Expected $RES == $POOL/$TESTFS" fi log_must $ZPOOL destroy $POOL i=$(( $i + 1 )) done log_pass "Valid pool names are accepted by zpool set bootfs" diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_004_neg.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_004_neg.ksh index 62bbb3d73302..764147d0de7d 100755 --- a/tests/zfs-tests/tests/functional/bootfs/bootfs_004_neg.ksh +++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_004_neg.ksh @@ -1,91 +1,95 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # Invalid pool names are rejected by zpool set bootfs # # STRATEGY: # 1. Try to set bootfs on some non-existent pools # # # verify_runnable "global" set -A pools "pool//$$" "pool%d123" "mirror" "c0t0d0s0" "pool*23*" "*po!l" \ "%s££%^" function cleanup { if poolexists $POOL; then log_must $ZPOOL destroy $POOL fi $RM /bootfs_004.$$.dat } $ZPOOL set 2>&1 | $GREP bootfs > /dev/null if [ $? -ne 0 ] then log_unsupported "bootfs pool property not supported on this release." fi log_assert "Invalid pool names are rejected by zpool set bootfs" log_onexit cleanup # here, we build up a large string and add it to the list of pool names # a word to the ksh-wary, ${#array[@]} gives you the # total number of entries in an array, so array[${#array[@]}] # will index the last entry+1, ksh arrays start at index 0. COUNT=0 while [ $COUNT -le 1025 ] do bigname="${bigname}o" COUNT=$(( $COUNT + 1 )) done pools[${#pools[@]}]="$bigname" -$MKFILE 64m $TESTDIR/bootfs_004.$$.dat +$MKFILE $MINVDEVSIZE $TESTDIR/bootfs_004.$$.dat typeset -i i=0; while [ $i -lt "${#pools[@]}" ] do POOL=${pools[$i]}/$TESTFS log_mustnot $ZPOOL create $POOL $TESTDIR/bootfs_004.$$.dat log_mustnot $ZFS create $POOL/$TESTFS log_mustnot $ZPOOL set bootfs=$POOL/$TESTFS $POOL i=$(( $i + 1 )) done log_pass "Invalid pool names are rejected by zpool set bootfs" diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_005_neg.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_005_neg.ksh index 3928cdd72504..f09b32d2b0e9 100755 --- a/tests/zfs-tests/tests/functional/bootfs/bootfs_005_neg.ksh +++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_005_neg.ksh @@ -1,79 +1,83 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zpool_upgrade/zpool_upgrade.kshlib # # DESCRIPTION: # # Boot properties cannot be set on pools with older versions # # STRATEGY: # 1. Copy and import some pools of older versions # 2. Create a filesystem on each # 3. Verify that zpool set bootfs fails on each # verify_runnable "global" function cleanup { # # we need destroy pools that created on top of $TESTPOOL first # typeset pool_name for config in $CONFIGS; do pool_name=$(eval $ECHO \$ZPOOL_VERSION_${config}_NAME) if poolexists $pool_name; then log_must $ZPOOL destroy $pool_name fi done if poolexists $TESTPOOL ; then log_must $ZPOOL destroy $TESTPOOL fi } log_assert "Boot properties cannot be set on pools with older versions" # These are configs from zpool_upgrade.cfg - see that file for more info. CONFIGS="1 2 3" log_onexit cleanup log_must $ZPOOL create -f $TESTPOOL $DISKS for config in $CONFIGS do create_old_pool $config POOL_NAME=$(eval $ECHO \$ZPOOL_VERSION_${config}_NAME) log_must $ZFS create $POOL_NAME/$TESTFS log_mustnot $ZPOOL set bootfs=$POOL_NAME/$TESTFS $POOL_NAME log_must destroy_upgraded_pool $config done log_pass "Boot properties cannot be set on pools with older versions" diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_006_pos.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_006_pos.ksh index fc3ca142f8f0..ab6cae1ee347 100755 --- a/tests/zfs-tests/tests/functional/bootfs/bootfs_006_pos.ksh +++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_006_pos.ksh @@ -1,142 +1,146 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # Pools of correct vdev types accept boot property # # STRATEGY: # 1. create pools of each vdev type (raid, raidz, raidz2, mirror + hotspares) # 2. verify we can set bootfs on each pool type according to design # verify_runnable "global" $ZPOOL set 2>&1 | $GREP bootfs > /dev/null if [ $? -ne 0 ] then log_unsupported "bootfs pool property not supported on this release." fi VDEV1=$TESTDIR/bootfs_006_pos_a.$$.dat VDEV2=$TESTDIR/bootfs_006_pos_b.$$.dat VDEV3=$TESTDIR/bootfs_006_pos_c.$$.dat VDEV4=$TESTDIR/bootfs_006_pos_d.$$.dat function verify_bootfs { # $POOL POOL=$1 log_must $ZFS create $POOL/$TESTFS log_must $ZPOOL set bootfs=$POOL/$TESTFS $POOL VAL=$($ZPOOL get bootfs $POOL | $TAIL -1 | $AWK '{print $3}' ) if [ $VAL != "$POOL/$TESTFS" ] then log_must $ZPOOL status -v $POOL log_fail \ "set/get failed on $POOL - expected $VAL == $POOL/$TESTFS" fi log_must $ZPOOL destroy $POOL } function verify_no_bootfs { # $POOL POOL=$1 log_must $ZFS create $POOL/$TESTFS log_mustnot $ZPOOL set bootfs=$POOL/$TESTFS $POOL VAL=$($ZPOOL get bootfs $POOL | $TAIL -1 | $AWK '{print $3}' ) if [ $VAL == "$POOL/$TESTFS" ] then log_must $ZPOOL status -v $POOL log_fail "set/get unexpectedly failed $VAL != $POOL/$TESTFS" fi log_must $ZPOOL destroy $POOL } function cleanup { if poolexists $TESTPOOL then log_must $ZPOOL destroy $TESTPOOL fi log_must $RM $VDEV1 $VDEV2 $VDEV3 $VDEV4 } log_assert "Pools of correct vdev types accept boot property" log_onexit cleanup -log_must $MKFILE 64m $VDEV1 $VDEV2 $VDEV3 $VDEV4 +log_must $MKFILE $MINVDEVSIZE $VDEV1 $VDEV2 $VDEV3 $VDEV4 ## the following configurations are supported bootable pools # normal log_must $ZPOOL create $TESTPOOL $VDEV1 verify_bootfs $TESTPOOL # normal + hotspare log_must $ZPOOL create $TESTPOOL $VDEV1 spare $VDEV2 verify_bootfs $TESTPOOL # mirror log_must $ZPOOL create $TESTPOOL mirror $VDEV1 $VDEV2 verify_bootfs $TESTPOOL # mirror + hotspare log_must $ZPOOL create $TESTPOOL mirror $VDEV1 $VDEV2 spare $VDEV3 verify_bootfs $TESTPOOL ## the following configurations are not supported as bootable pools # stripe log_must $ZPOOL create $TESTPOOL $VDEV1 $VDEV2 verify_no_bootfs $TESTPOOL # stripe + hotspare log_must $ZPOOL create $TESTPOOL $VDEV1 $VDEV2 spare $VDEV3 verify_no_bootfs $TESTPOOL # raidz log_must $ZPOOL create $TESTPOOL raidz $VDEV1 $VDEV2 verify_no_bootfs $TESTPOOL # raidz + hotspare log_must $ZPOOL create $TESTPOOL raidz $VDEV1 $VDEV2 spare $VDEV3 verify_no_bootfs $TESTPOOL # raidz2 log_must $ZPOOL create $TESTPOOL raidz2 $VDEV1 $VDEV2 $VDEV3 verify_no_bootfs $TESTPOOL # raidz2 + hotspare log_must $ZPOOL create $TESTPOOL raidz2 $VDEV1 $VDEV2 $VDEV3 spare $VDEV4 verify_no_bootfs $TESTPOOL log_pass "Pools of correct vdev types accept boot property" diff --git a/tests/zfs-tests/tests/functional/bootfs/bootfs_008_neg.ksh b/tests/zfs-tests/tests/functional/bootfs/bootfs_008_neg.ksh index 1bbac9eed06b..57da5a24b3df 100755 --- a/tests/zfs-tests/tests/functional/bootfs/bootfs_008_neg.ksh +++ b/tests/zfs-tests/tests/functional/bootfs/bootfs_008_neg.ksh @@ -1,78 +1,82 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # setting bootfs on a dataset which has gzip compression enabled will fail # # STRATEGY: # 1. create pools based on a valid vdev # 2. create a filesystem on this pool and set the compression property to gzip1-9 # 3. set the pool's bootfs property to filesystem we just configured which # should fail # verify_runnable "global" function cleanup { if poolexists $TESTPOOL ; then destroy_pool "$TESTPOOL" fi if [[ -f $VDEV ]]; then log_must $RM -f $VDEV fi } typeset assert_msg="setting bootfs on a dataset which has gzip \ compression enabled will fail" typeset VDEV=$TESTDIR/bootfs_008_neg_a.$$.dat typeset COMP_FS=$TESTPOOL/COMP_FS log_onexit cleanup log_assert $assert_msg -log_must $MKFILE 300m $VDEV +log_must $MKFILE $MINVDEVSIZE $VDEV log_must $ZPOOL create $TESTPOOL $VDEV log_must $ZFS create $COMP_FS typeset -i i=0 set -A gtype "gzip" "gzip-1" "gzip-2" "gzip-3" "gzip-4" "gzip-5" \ "gzip-6" "gzip-7" "gzip-8" "gzip-9" while (( i < ${#gtype[@]} )); do log_must $ZFS set compression=${gtype[i]} $COMP_FS log_mustnot $ZPOOL set bootfs=$COMP_FS $TESTPOOL log_must $ZFS set compression=off $COMP_FS (( i += 1 )) done log_pass $assert_msg diff --git a/tests/zfs-tests/tests/functional/cache/cache.cfg b/tests/zfs-tests/tests/functional/cache/cache.cfg index f3155323d315..07e482d7df96 100644 --- a/tests/zfs-tests/tests/functional/cache/cache.cfg +++ b/tests/zfs-tests/tests/functional/cache/cache.cfg @@ -1,71 +1,71 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib export DISK_ARRAY_NUM=0 function set_disks { set -A disk_array $(find_disks $DISKS) if (( ${#disk_array[*]} <= 1 )); then export DISK=${DISKS%% *} else export DISK="" typeset -i i=0 while (( i < ${#disk_array[*]} )); do export DISK${i}="${disk_array[$i]}" DISKSARRAY="$DISKSARRAY ${disk_array[$i]}" (( i = i + 1 )) done export DISK_ARRAY_NUM=$i export DISKSARRAY fi if (( $DISK_ARRAY_NUM == 0 )); then export disk=$DISK else export disk=$DISK0 fi } set_disks set_device_dir -export SIZE=64M +export SIZE=$MINVDEVSIZE export VDIR=$TESTDIR/disk.cache export VDIR2=$TESTDIR/disk2.cache export VDEV="$VDIR/a $VDIR/b $VDIR/c" export LDEV="$DISK0" export VDEV2="$VDIR2/a $VDIR2/b $VDIR2/c" export LDEV2="$DISK1" diff --git a/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh b/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh index 98adfd08db4f..ea12cf2b6e86 100755 --- a/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cachefile/cachefile_004_pos.ksh @@ -1,124 +1,124 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cachefile/cachefile.cfg . $STF_SUITE/tests/functional/cachefile/cachefile.kshlib # # DESCRIPTION: # Verify set, export and destroy when cachefile is set on pool. # # STRATEGY: # 1. Create two pools with one same cahcefile1. # 2. Set cachefile of the two pools to another same cachefile2. # 3. Verify cachefile1 not exist. # 4. Export the two pools. # 5. Verify cachefile2 not exist. # 6. Import the two pools and set cachefile to cachefile2. # 7. Destroy the two pools. # 8. Verify cachefile2 not exist. # verify_runnable "global" function cleanup { poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1 poolexists $TESTPOOL2 && destroy_pool $TESTPOOL2 mntpnt=$(get_prop mountpoint $TESTPOOL) typeset -i i=0 while ((i < 2)); do if [[ -e $mntpnt/vdev$i ]]; then log_must $RM -f $mntpnt/vdev$i fi ((i += 1)) done if poolexists $TESTPOOL ; then destroy_pool $TESTPOOL fi for file in $CPATH1 $CPATH2 ; do if [[ -f $file ]] ; then log_must $RM $file fi done } log_assert "Verify set, export and destroy when cachefile is set on pool." log_onexit cleanup log_must $ZPOOL create $TESTPOOL $DISKS mntpnt=$(get_prop mountpoint $TESTPOOL) typeset -i i=0 while ((i < 2)); do - log_must $MKFILE 64M $mntpnt/vdev$i + log_must $MKFILE $MINVDEVSIZE $mntpnt/vdev$i eval vdev$i=$mntpnt/vdev$i ((i += 1)) done log_must $ZPOOL create -o cachefile=$CPATH1 $TESTPOOL1 $vdev0 log_must pool_in_cache $TESTPOOL1 $CPATH1 log_must $ZPOOL create -o cachefile=$CPATH1 $TESTPOOL2 $vdev1 log_must pool_in_cache $TESTPOOL2 $CPATH1 log_must $ZPOOL set cachefile=$CPATH2 $TESTPOOL1 log_must pool_in_cache $TESTPOOL1 $CPATH2 log_must $ZPOOL set cachefile=$CPATH2 $TESTPOOL2 log_must pool_in_cache $TESTPOOL2 $CPATH2 if [[ -f $CPATH1 ]]; then log_fail "Verify set when cachefile is set on pool." fi log_must $ZPOOL export $TESTPOOL1 log_must $ZPOOL export $TESTPOOL2 if [[ -f $CPATH2 ]]; then log_fail "Verify export when cachefile is set on pool." fi log_must $ZPOOL import -d $mntpnt $TESTPOOL1 log_must $ZPOOL set cachefile=$CPATH2 $TESTPOOL1 log_must pool_in_cache $TESTPOOL1 $CPATH2 log_must $ZPOOL import -d $mntpnt $TESTPOOL2 log_must $ZPOOL set cachefile=$CPATH2 $TESTPOOL2 log_must pool_in_cache $TESTPOOL2 $CPATH2 log_must $ZPOOL destroy $TESTPOOL1 log_must $ZPOOL destroy $TESTPOOL2 if [[ -f $CPATH2 ]]; then log_fail "Verify destroy when cachefile is set on pool." fi log_pass "Verify set, export and destroy when cachefile is set on pool." diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh index 579e74784165..f2e5ce6cfd53 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_destroy/zfs_destroy_015_pos.ksh @@ -1,161 +1,161 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # # DESCRIPTION # zfs destroy can destroy a list of multiple # snapshots from the same datasets # # STRATEGY # 1. Create multiple snapshots for the same datset # 2. Run zfs destroy for these snapshots for a mix of valid and # invalid snapshot names # 3. Run zfs destroy for snapshots from different datasets and # pools . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zfs_destroy/zfs_destroy.cfg function cleanup { datasetexists $TESTPOOL/$TESTFS1 && $ZFS destroy -R $TESTPOOL/$TESTFS1 datasetexists $TESTPOOL/$TESTFS2 && $ZFS destroy -R $TESTPOOL/$TESTFS2 poolexists $TESTPOOL2 && $ZPOOL destroy $TESTPOOL2 $RM -rf $VIRTUAL_DISK } log_assert "zfs destroy for multiple snapshot is handled correctly" log_onexit cleanup $ZFS create $TESTPOOL/$TESTFS1 typeset -i i=1 snaplist="" log_note "zfs destroy on valid snapshot names" for i in 1 2 3 4 5; do log_must $ZFS snapshot $TESTPOOL/$TESTFS1@snap$i snaplist=$snaplist,snap$i done snaplist=${snaplist#,} log_must $ZFS destroy $TESTPOOL/$TESTFS1@$snaplist for i in 1 2 3 4 5; do log_mustnot snapexists $TESTPOOL/$TESFS1@snap$i done log_note "zfs destroy with all bogus snapshot names" log_mustnot $ZFS destroy $TESTPOOL/$TESTFS1@snap12,snap21,sna@pple1@,s""nappy2 log_note "zfs destroy with some bogus snapshot names" for i in 1 2 3; do log_must $ZFS snapshot $TESTPOOL/$TESTFS1@snap$i done log_must $ZFS destroy $TESTPOOL/$TESTFS1@snap1,snap2,snapple1,snappy2,snap3 for i in 1 2 3; do log_mustnot snapexists $TESTPOOL/$TESTFS1@snap$i done log_note "zfs destroy with some snapshot names having special characters" for i in 1 2 3; do log_must $ZFS snapshot $TESTPOOL/$TESTFS1@snap$i done log_must $ZFS destroy $TESTPOOL/$TESTFS1@snap1,@,snap2,,,,snap3, for i in 1 2 3; do log_mustnot snapexists $TESTPOOL/$TESTFS1@snap$i done log_note "zfs destroy for too many snapshots" snaplist="" for i in {1..100}; do log_must $ZFS snapshot $TESTPOOL/$TESTFS1@snap$i snaplist=$snaplist,snap$i done snaplist=${snaplist#,} log_must $ZFS destroy $TESTPOOL/$TESTFS1@$snaplist for i in {1..100}; do log_mustnot snapexists $TESTPOOL/$TESTFS1@snap$i done log_note "zfs destroy multiple snapshots with hold" snaplist="" for i in 1 2 3 4 5; do log_must $ZFS snapshot $TESTPOOL/$TESTFS1@snap$i log_must $ZFS hold keep $TESTPOOL/$TESTFS1@snap$i snaplist=$snaplist,snap$i done snaplist=${snaplist#,} log_mustnot $ZFS destroy $TESTPOOL/$TESTFS1@$snaplist for i in 1 2 3 4 5; do log_must $ZFS release keep $TESTPOOL/$TESTFS1@snap$i done log_must $ZFS destroy $TESTPOOL/$TESTFS1@$snaplist log_note "zfs destroy for multiple snapshots having clones" for i in 1 2 3 4 5; do log_must $ZFS snapshot $TESTPOOL/$TESTFS1@snap$i done snaplist="" for i in 1 2 3 4 5; do log_must $ZFS clone $TESTPOOL/$TESTFS1@snap$i $TESTPOOL/$TESTFS1/clone$i snaplist=$snaplist,snap$i done snaplist=${snaplist#,} log_mustnot $ZFS destroy $TESTPOOL/$TESTFS1@$snaplist for i in 1 2 3 4 5; do log_must snapexists $TESTPOOL/$TESTFS1@snap$i log_must $ZFS destroy $TESTPOOL/$TESTFS1/clone$i done log_note "zfs destroy for snapshots for different datasets" log_must $ZFS create $TESTPOOL/$TESTFS2 log_must $ZFS snapshot $TESTPOOL/$TESTFS2@fs2snap log_must $ZFS create $TESTPOOL/$TESTFS1/$TESTFS2 log_must $ZFS snapshot $TESTPOOL/$TESTFS1/$TESTFS2@fs12snap long_arg=$TESTPOOL/$TESTFS1@snap1,$TESTPOOL/$TESTFS2@fs2snap, long_arg=$long_arg$TESTPOOL/$TESTFS1/$TESTFS2@fs12snap log_must $ZFS destroy $long_arg log_mustnot snapexists $TESTPOOL/$TESTFS1@snap1 log_must snapexists $TESTPOOL/$TESTFS2@fs2snap log_must snapexists $TESTPOOL/$TESTFS1/$TESTFS2@fs12snap log_must $ZFS destroy $TESTPOOL/$TESTFS1@fs2snap,fs12snap,snap2 log_must snapexists $TESTPOOL/$TESTFS2@fs2snap log_must snapexists $TESTPOOL/$TESTFS1/$TESTFS2@fs12snap log_mustnot snapexists $TESTPOOL/$TESTFS1@snap2 log_must $ZFS destroy $TESTPOOL/$TESTFS2@fs2snap,fs12snap,snap3 log_mustnot snapexists $TESTPOOL/$TESTFS2@fs2snap log_must snapexists $TESTPOOL/$TESTFS1/$TESTFS2@fs12snap log_must snapexists $TESTPOOL/$TESTFS1@snap3 log_note "zfs destroy for snapshots from different pools" VIRTUAL_DISK=/var/tmp/disk -log_must $DD if=/dev/urandom of=$VIRTUAL_DISK bs=1M count=64 +log_must $MKFILE $MINVDEVSIZE $VIRTUAL_DISK log_must $ZPOOL create $TESTPOOL2 $VIRTUAL_DISK log_must poolexists $TESTPOOL2 log_must $ZFS create $TESTPOOL2/$TESTFS1 log_must $ZFS snapshot $TESTPOOL2/$TESTFS1@snap long_arg=$TESTPOOL2/$TESTFS1@snap,$TESTPOOL/$TESTFS1@snap3, long_arg=$long_arg$TESTPOOL/$TESTFS1@snap5 log_must $ZFS destroy $long_arg log_mustnot snapexists $TESTPOOL2/$TESTFS1@snap log_must snapexists $TESTPOOL/$TESTFS1@snap3 log_must snapexists $TESTPOOL/$TESTFS1@snap5 log_must $ZFS snapshot $TESTPOOL2/$TESTFS1@snap log_must $ZFS destroy $TESTPOOL2/$TESTFS1@snap5,snap3,snap log_mustnot snapexists $TESTPOOL2/$TESTFS1@snap log_must snapexists $TESTPOOL/$TESTFS1@snap3 log_must snapexists $TESTPOOL/$TESTFS1@snap5 log_pass "zfs destroy for multiple snapshots passes" diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh index 1dce6e9cc144..566da2272a68 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_get/zfs_get_004_pos.ksh @@ -1,227 +1,227 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # Verify 'zfs get all' can get all properties for all datasets in the system # # STRATEGY: # 1. Create datasets for testing # 2. Issue 'zfs get all' command # 3. Verify the command gets all available properties of all datasets # verify_runnable "both" function cleanup { [[ -e $propfile ]] && $RM -f $propfile datasetexists $clone && \ log_must $ZFS destroy $clone for snap in $fssnap $volsnap ; do snapexists $snap && \ log_must $ZFS destroy $snap done if [[ -n $globalzone ]] ; then for pool in $TESTPOOL1 $TESTPOOL2 $TESTPOOL3; do poolexists $pool && \ log_must $ZPOOL destroy -f $pool done for file in `$LS $TESTDIR1/poolfile*`; do $RM -f $file done else for fs in $TESTPOOL/$TESTFS1 $TESTPOOL/$TESTFS2 $TESTPOOL/$TESTFS3; do datasetexists $fs && \ log_must $ZFS destroy -rf $fs done fi } log_assert "Verify the functions of 'zfs get all' work." log_onexit cleanup typeset globalzone="" if is_global_zone ; then globalzone="true" fi set -A opts "" "-r" "-H" "-p" "-rHp" "-o name" \ "-s local,default,temporary,inherited,none" \ "-o name -s local,default,temporary,inherited,none" \ "-rHp -o name -s local,default,temporary,inherited,none" set -A usrprops "a:b=c" "d_1:1_e=0f" "123:456=789" fs=$TESTPOOL/$TESTFS fssnap=$fs@$TESTSNAP clone=$TESTPOOL/$TESTCLONE volsnap=$TESTPOOL/$TESTVOL@$TESTSNAP #set user defined properties for $TESTPOOL for usrprop in ${usrprops[@]}; do log_must $ZFS set $usrprop $TESTPOOL done # create snapshot and clone in $TESTPOOL log_must $ZFS snapshot $fssnap log_must $ZFS clone $fssnap $clone log_must $ZFS snapshot $volsnap # collect datasets which can be set user defined properties usrpropds="$clone $fs" # collect all datasets which we are creating allds="$fs $clone $fssnap $volsnap" #create pool and datasets to guarantee testing under multiple pools and datasets. file=$TESTDIR1/poolfile -typeset -i FILESIZE=104857600 #100M -(( DFILESIZE = FILESIZE * 2 )) # double of FILESIZE -typeset -i VOLSIZE=10485760 #10M +typeset FILESIZE=$MINVDEVSIZE +(( DFILESIZE = $FILESIZE * 2 )) +typeset -i VOLSIZE=10485760 availspace=$(get_prop available $TESTPOOL) typeset -i i=0 # make sure 'availspace' is larger then twice of FILESIZE to create a new pool. # If any, we only totally create 3 pools for multple datasets testing to limit # testing time while (( availspace > DFILESIZE )) && (( i < 3 )) ; do (( i += 1 )) if [[ -n $globalzone ]] ; then log_must $MKFILE $FILESIZE ${file}$i eval pool=\$TESTPOOL$i log_must $ZPOOL create $pool ${file}$i else eval pool=$TESTPOOL/\$TESTFS$i log_must $ZFS create $pool fi #set user defined properties for testing for usrprop in ${usrprops[@]}; do log_must $ZFS set $usrprop $pool done #create datasets in pool log_must $ZFS create $pool/$TESTFS log_must $ZFS snapshot $pool/$TESTFS@$TESTSNAP log_must $ZFS clone $pool/$TESTFS@$TESTSNAP $pool/$TESTCLONE if [[ -n $globalzone ]] ; then log_must $ZFS create -V $VOLSIZE $pool/$TESTVOL else log_must $ZFS create $pool/$TESTVOL fi ds=`$ZFS list -H -r -o name -t filesystem,volume $pool` usrpropds="$usrpropds $pool/$TESTFS $pool/$TESTCLONE $pool/$TESTVOL" allds="$allds $pool/$TESTFS $pool/$TESTCLONE $pool/$TESTVOL \ $pool/$TESTFS@$TESTSNAP" availspace=$(get_prop available $TESTPOOL) done #the expected number of property for each type of dataset in this testing typeset -i fspropnum=27 typeset -i snappropnum=8 typeset -i volpropnum=15 propfile=/var/tmp/allpropfile.$$ typeset -i i=0 typeset -i propnum=0 typeset -i failflag=0 while (( i < ${#opts[*]} )); do [[ -e $propfile ]] && $RM -f $propfile log_must eval "$ZFS get ${opts[i]} all >$propfile" for ds in $allds; do $GREP $ds $propfile >/dev/null 2>&1 (( $? != 0 )) && \ log_fail "There is no property for" \ "dataset $ds in 'get all' output." propnum=`$CAT $propfile | $AWK '{print $1}' | \ $GREP "${ds}$" | $WC -l` ds_type=`$ZFS get -H -o value type $ds` case $ds_type in filesystem ) (( propnum < fspropnum )) && \ (( failflag += 1 )) ;; snapshot ) (( propnum < snappropnum )) && \ (( failflag += 1 )) ;; volume ) (( propnum < volpropnum )) && \ (( failflag += 1 )) ;; esac (( failflag != 0 )) && \ log_fail " 'zfs get all' fails to get out " \ "all properties for dataset $ds." (( propnum = 0 )) (( failflag = 0 )) done (( i += 1 )) done log_note "'zfs get' can get particular property for all datasets with that property." function do_particular_prop_test # { typeset props="$1" typeset ds="$2" for prop in ${commprops[*]}; do ds=`$ZFS get -H -o name $prop` [[ "$ds" != "$allds" ]] && \ log_fail "The result datasets are $ds, but all suitable" \ "datasets are $allds for the property $prop" done } # Here, we do a testing for user defined properties and the most common properties # for all datasets. commprop="type creation used referenced compressratio" usrprop="a:b d_1:1_e 123:456" do_particular_prop_test "$commprop" "$allds" do_particular_prop_test "$usrprop" "$usrpropds" log_pass "'zfs get all' works as expected." diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh index 6010af18ae4d..b3f1b39671d4 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_rename/zfs_rename_005_neg.ksh @@ -1,87 +1,91 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # + +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zfs_rename/zfs_rename.kshlib # # DESCRIPTION: # 'zfs rename' should fail when the dataset are not within the same pool # # STRATEGY: # 1. Given a file system, snapshot and volume. # 2. Rename each dataset object to a different pool. # 3. Verify the operation fails, and only the original name # is displayed by zfs list. # verify_runnable "global" function my_cleanup { poolexists $TESTPOOL1 && \ destroy_pool $TESTPOOL1 [[ -e $TESTDIR/$TESTFILE1 ]] && \ log_must $RM -f $TESTDIR/$TESTFILE1 cleanup } set -A src_dataset \ "$TESTPOOL/$TESTFS1" "$TESTPOOL/$TESTCTR1" \ "$TESTPOOL/$TESTCTR/$TESTFS1" "$TESTPOOL/$TESTVOL" \ "$TESTPOOL/$TESTFS@snapshot" "$TESTPOOL/$TESTFS-clone" # # cleanup defined in zfs_rename.kshlib # log_onexit my_cleanup log_assert "'zfs rename' should fail while datasets are within different pool." additional_setup -typeset FILESIZE=64m -log_must $MKFILE $FILESIZE $TESTDIR/$TESTFILE1 +log_must $MKFILE $MINVDEVSIZE $TESTDIR/$TESTFILE1 create_pool $TESTPOOL1 $TESTDIR/$TESTFILE1 for src in ${src_dataset[@]} ; do dest=${src#$TESTPOOL/} if [[ $dest == *"@"* ]]; then dest=${dest#*@} dest=${TESTPOOL1}@$dest else dest=${TESTPOOL1}/$dest fi log_mustnot $ZFS rename $src $dest log_mustnot $ZFS rename -p $src $dest # # Verify original dataset name still in use # log_must datasetexists $src done log_pass "'zfs rename' fail while datasets are within different pool." diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh index de67baa60cd8..935e277d14f7 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_snapshot/zfs_snapshot_008_neg.ksh @@ -1,68 +1,68 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/cli_root/zfs_snapshot/zfs_snapshot.cfg # # DESCRIPTION: # 'zfs snapshot pool1@snap pool2@snap' should fail since both snapshots # are not in the same pool. # # STRATEGY: # 1. Create 2 separate zpools, zpool name lengths must be the same. # 2. Attempt to simultaneously create a snapshot of each pool. # 3. Veriy the snapshot creation failed. # verify_runnable "both" function cleanup { for pool in $SNAPPOOL1 $SNAPPOOL2 ; do if poolexists $pool ; then log_must $ZPOOL destroy -f $pool fi done for dev in $SNAPDEV1 $SNAPDEV2 ; do if [[ -f $dev ]] ; then log_must rm -f $dev fi done } log_assert "'zfs snapshot pool1@snap1 pool2@snap2' should fail since snapshots are in different pools." log_onexit cleanup -log_must $MKFILE 64m $SNAPDEV1 -log_must $MKFILE 64m $SNAPDEV2 +log_must $MKFILE $MINVDEVSIZE $SNAPDEV1 +log_must $MKFILE $MINVDEVSIZE $SNAPDEV2 log_must $ZPOOL create $SNAPPOOL1 $SNAPDEV1 log_must $ZPOOL create $SNAPPOOL2 $SNAPDEV2 log_mustnot $ZFS snapshot $SNAPPOOL1@snap1 $SNAPPOOL2@snap2 log_pass "'zfs snapshot pool1@snap1 pool2@snap2' should fail since snapshots are in different pools." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_002_pos.ksh index d4265af041c6..a2dc206cd7cc 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool/zpool_002_pos.ksh @@ -1,103 +1,108 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # + +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # With ZFS_ABORT set, all zpool commands should be able to abort and generate a core file. # # STRATEGY: # 1. Create an array of zpool command # 2. Execute each command in the array # 3. Verify the command aborts and generate a core file # verify_runnable "global" function cleanup { unset ZFS_ABORT if [[ -d $corepath ]]; then $RM -rf $corepath fi if poolexists $pool; then log_must $ZPOOL destroy -f $pool fi } log_assert "With ZFS_ABORT set, all zpool commands can abort and generate a core file." log_onexit cleanup #preparation work for testing corepath=$TESTDIR/core if [[ -d $corepath ]]; then $RM -rf $corepath fi $MKDIR $corepath pool=pool.$$ vdev1=$TESTDIR/file1 vdev2=$TESTDIR/file2 vdev3=$TESTDIR/file3 for vdev in $vdev1 $vdev2 $vdev3; do - $MKFILE 64m $vdev + $MKFILE $MINVDEVSIZE $vdev done set -A cmds "create $pool mirror $vdev1 $vdev2" "list $pool" "iostat $pool" \ "status $pool" "upgrade $pool" "get delegation $pool" "set delegation=off $pool" \ "export $pool" "import -d $TESTDIR $pool" "offline $pool $vdev1" \ "online $pool $vdev1" "clear $pool" "detach $pool $vdev2" \ "attach $pool $vdev1 $vdev2" "replace $pool $vdev2 $vdev3" \ "scrub $pool" "destroy -f $pool" set -A badparams "" "create" "destroy" "add" "remove" "list *" "iostat" "status" \ "online" "offline" "clear" "attach" "detach" "replace" "scrub" \ "import" "export" "upgrade" "history -?" "get" "set" if is_linux; then ulimit -c unlimited echo "$corepath/core.zpool" >/proc/sys/kernel/core_pattern echo 0 >/proc/sys/kernel/core_uses_pid else $COREADM -p ${corepath}/core.%f fi export ZFS_ABORT=yes for subcmd in "${cmds[@]}" "${badparams[@]}"; do corefile=${corepath}/core.zpool $ZPOOL $subcmd >/dev/null 2>&1 ls -l $corepath >>/tmp/CORE if [[ ! -e $corefile ]]; then log_fail "$ZPOOL $subcmd cannot generate core file with ZFS_ABORT set." fi $RM -f $corefile done log_pass "With ZFS_ABORT set, zpool command can abort and generate core file as expected." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.cfg index f12a883dc8c4..e4429b2a8343 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.cfg +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add.cfg @@ -1,94 +1,86 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012, 2014 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # export DISK_ARRAY_NUM=0 export DISK_ARRAY_LIMIT=4 export DISKSARRAY="" -# -# Variables for zpool_add_006 -# -export VDEVS_NUM=32 - function set_disks { set -A disk_array $(find_disks $DISKS) if (( ${#disk_array[*]} <= 1 )); then export DISK=${DISKS%% *} else export DISK="" typeset -i i=0 while (( i < ${#disk_array[*]} )); do export DISK${i}="${disk_array[$i]}" DISKSARRAY="$DISKSARRAY ${disk_array[$i]}" (( i = i + 1 )) (( i>$DISK_ARRAY_LIMIT )) && break done export DISK_ARRAY_NUM=$i export DISKSARRAY fi if (( $DISK_ARRAY_NUM == 0 )); then export disk=$DISK else export disk=$DISK0 fi } set_disks -export FILESIZE="100m" -export FILESIZE1="150m" -export SIZE="150m" -export SIZE1="250m" +export SIZE="$(((MINVDEVSIZE / (1024 * 1024)) * 2))m" if is_linux; then set_device_dir set_slice_prefix export SLICE0=1 export SLICE1=2 export SLICE3=4 export SLICE4=5 export SLICE5=6 export SLICE6=7 else export DEV_DSKDIR="/dev" export SLICE_PREFIX="s" export SLICE0=0 export SLICE1=1 export SLICE3=3 export SLICE4=4 export SLICE5=5 export SLICE6=6 fi -export VOLSIZE=84m +export VOLSIZE=$MINVDEVSIZE diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_006_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_006_pos.ksh index c60814d40d63..ad2aa685b5d2 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_006_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_add/zpool_add_006_pos.ksh @@ -1,78 +1,78 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2014 by Delphix. All rights reserved. +# Copyright (c) 2014, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zpool_add/zpool_add.kshlib # # DESCRIPTION: # Adding a large number of file based vdevs to a zpool works. # # STRATEGY: # 1. Create a file based pool. -# 2. Add 32 file based vdevs to it. +# 2. Add 16 file based vdevs to it. # 3. Attempt to add a file based vdev that's too small; verify failure. # verify_runnable "global" function cleanup { poolexists $TESTPOOL1 && \ destroy_pool $TESTPOOL1 poolexists $TESTPOOL && \ destroy_pool $TESTPOOL [[ -d $TESTDIR ]] && log_must $RM -rf $TESTDIR partition_cleanup } log_assert "Adding a large number of file based vdevs to a zpool works." log_onexit cleanup create_pool $TESTPOOL ${DISKS%% *} log_must $ZFS create -o mountpoint=$TESTDIR $TESTPOOL/$TESTFS -log_must $MKFILE 64m $TESTDIR/file.00 +log_must $MKFILE $MINVDEVSIZE $TESTDIR/file.00 create_pool "$TESTPOOL1" "$TESTDIR/file.00" -vdevs_list=$($ECHO $TESTDIR/file.{01..32}) -log_must $MKFILE 64m $vdevs_list +vdevs_list=$($ECHO $TESTDIR/file.{01..16}) +log_must $MKFILE $MINVDEVSIZE $vdevs_list log_must $ZPOOL add -f "$TESTPOOL1" $vdevs_list log_must vdevs_in_pool "$TESTPOOL1" "$vdevs_list" # Attempt to add a file based vdev that's too small. log_must $MKFILE 32m $TESTDIR/broken_file log_mustnot $ZPOOL add -f "$TESTPOOL1" ${TESTDIR}/broken_file log_mustnot vdevs_in_pool "$TESTPOOL1" "${TESTDIR}/broken_file" log_pass "Adding a large number of file based vdevs to a zpool works." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear.cfg index 097a43b0ed80..e6977350af90 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear.cfg +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_clear/zpool_clear.cfg @@ -1,33 +1,33 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # -export FILESIZE=100m +export FILESIZE=$MINVDEVSIZE export BLOCKSZ=$(( 1024 * 1024 )) export NUM_WRITES=40 diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/setup.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/setup.ksh index a21813ca6585..28adaea2cd70 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/setup.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/setup.ksh @@ -1,64 +1,64 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib [[ -z $FORMAT ]] || \ [[ -z $MKDIR ]] || \ [[ -z $LSBLK ]] || \ [[ -z $READLINK ]] || \ [[ -z $TOUCH ]] && \ log_fail "Missing required commands" verify_runnable "global" if ! $(is_physical_device $DISKS) ; then log_unsupported "This directory cannot be run on raw files." fi if [[ -n $DISK ]]; then # # Use 'zpool create' to clean up the information in # in the given disk to avoid slice overlapping. # cleanup_devices $DISK - partition_disk $SIZE $DISK 7 + partition_disk $((($MINVDEVSIZE / (1024 * 1024)) * 2))m $DISK 7 else for disk in `$ECHO $DISKSARRAY`; do cleanup_devices $disk - partition_disk $SIZE $disk 7 + partition_disk $((($MINVDEVSIZE / (1024 * 1024)) * 2))m $disk 7 done fi log_pass diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.cfg index 33dd8866e214..93e0c38d3e6e 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.cfg +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create.cfg @@ -1,115 +1,114 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012, 2014 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib export DISK_ARRAY_NUM=0 export DISK_ARRAY_LIMIT=4 export DISKSARRAY="" -export VDEVS_NUM=32 function set_disks { typeset -a disk_array=($(find_disks $DISKS)) if (( ${#disk_array[*]} <= 1 )); then export DISK=${DISKS%% *} export DISK_ARRAY_NUM=1 else export DISK="" typeset -i i=0 while (( i < ${#disk_array[*]} )); do export DISK${i}="${disk_array[$i]}" DISKSARRAY="$DISKSARRAY ${disk_array[$i]}" (( i = i + 1 )) (( i>$DISK_ARRAY_LIMIT )) && break done export DISK_ARRAY_NUM=$i export DISKSARRAY fi } set_disks -export FILESIZE="100m" -export FILESIZE1="150m" -export SIZE="200m" -export SIZE1="250m" +export FILESIZE="$MINVDEVSIZE" +export FILESIZE1="$(($MINVDEVSIZE * 2))" +export SIZE="$((MINVDEVSIZE / (1024 * 1024)))"m +export SIZE1="$(($MINVDEVSIZE * 2 / (1024 * 1024)))m" if is_linux; then set_device_dir set_slice_prefix export SLICE0=1 export SLICE1=2 export SLICE2=3 export SLICE3=4 export SLICE4=5 export SLICE5=6 export SLICE6=7 export SLICE7=8 disk1=${DISKS%% *} if is_mpath_device $disk1; then delete_partitions fi else export SLICE_PREFIX="s" export SLICE0=0 export SLICE1=1 export SLICE2=2 export SLICE3=3 export SLICE4=4 export SLICE5=5 export SLICE6=6 export SLICE7=7 fi export FILEDISK=filedisk_create export FILEDISK0=filedisk0_create export FILEDISK1=filedisk1_create export FILEDISK2=filedisk2_create export FILEDISK3=filedisk3_create export BYND_MAX_NAME="byondmaxnamelength\ 012345678901234567890123456789\ 012345678901234567890123456789\ 012345678901234567890123456789\ 012345678901234567890123456789\ 012345678901234567890123456789\ 012345678901234567890123456789\ 012345678901234567890123456789\ 012345678901234567890123456789" export TOOSMALL="toosmall" export TESTPOOL4=testpool4.create export TESTPOOL5=testpool5.create export TESTPOOL6=testpool6.create export CPATH="/var/tmp/cachefile.create" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_001_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_001_pos.ksh index 824c6dc00401..2a975edc51a0 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_001_pos.ksh @@ -1,145 +1,146 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib # # DESCRIPTION: # 'zpool create ...' can successfully create a # new pool with a name in ZFS namespace. # # STRATEGY: # 1. Create storage pools with a name in ZFS namespace with different # vdev specs. # 2. Verify the pool created successfully # verify_runnable "global" function cleanup { poolexists $TESTPOOL && destroy_pool $TESTPOOL clean_blockfile "$TESTDIR0 $TESTDIR1" if [[ -n $DISK ]]; then - partition_disk $SIZE $DISK 7 + partition_disk $((($MINVDEVSIZE / (1024 * 1024)) * 2))m $DISK 7 else typeset disk="" for disk in $DISK0 $DISK1; do - partition_disk $SIZE $disk 7 + partition_disk \ + $((($MINVDEVSIZE / (1024 * 1024)) * 2))m $disk 7 done fi } log_assert "'zpool create ...' can successfully create" \ "a new pool with a name in ZFS namespace." log_onexit cleanup set -A keywords "" "mirror" "raidz" "raidz1" case $DISK_ARRAY_NUM in 0|1) typeset disk="" if (( $DISK_ARRAY_NUM == 0 )); then disk=$DISK else disk=$DISK0 fi create_blockfile $FILESIZE $TESTDIR0/$FILEDISK0 \ ${disk}${SLICE_PREFIX}${SLICE5} create_blockfile $FILESIZE $TESTDIR1/$FILEDISK1 \ ${disk}${SLICE_PREFIX}${SLICE6} pooldevs="${disk}${SLICE_PREFIX}${SLICE0} \ ${DEV_DSKDIR}/${disk}${SLICE_PREFIX}${SLICE0} \ \"${disk}${SLICE_PREFIX}${SLICE0} \ ${disk}${SLICE_PREFIX}${SLICE1}\" \ $TESTDIR0/$FILEDISK0" raidzdevs="\"${DEV_DSKDIR}/${disk}${SLICE_PREFIX}${SLICE0} \ ${disk}${SLICE_PREFIX}${SLICE1}\" \ \"${disk}${SLICE_PREFIX}${SLICE0} \ ${disk}${SLICE_PREFIX}${SLICE1} \ ${disk}${SLICE_PREFIX}${SLICE3}\" \ \"${disk}${SLICE_PREFIX}${SLICE0} \ ${disk}${SLICE_PREFIX}${SLICE1} \ ${disk}${SLICE_PREFIX}${SLICE3} \ ${disk}${SLICE_PREFIX}${SLICE4}\"\ \"$TESTDIR0/$FILEDISK0 $TESTDIR1/$FILEDISK1\"" mirrordevs=$raidzdevs ;; 2|*) create_blockfile $FILESIZE $TESTDIR0/$FILEDISK0 \ ${DISK0}${SLICE_PREFIX}${SLICE5} create_blockfile $FILESIZE $TESTDIR1/$FILEDISK1 \ ${DISK1}${SLICE_PREFIX}${SLICE5} pooldevs="${DISK0}${SLICE_PREFIX}${SLICE0} \ \"${DEV_DSKDIR}/${DISK0}${SLICE_PREFIX}${SLICE0} \ ${DISK1}${SLICE_PREFIX}${SLICE0}\" \ \"${DISK0}${SLICE_PREFIX}${SLICE0} \ ${DISK0}${SLICE_PREFIX}${SLICE1} \ ${DISK1}${SLICE_PREFIX}${SLICE1}\"\ \"${DISK0}${SLICE_PREFIX}${SLICE0} \ ${DISK1}${SLICE_PREFIX}${SLICE0} \ ${DISK0}${SLICE_PREFIX}${SLICE1}\ ${DISK1}${SLICE_PREFIX}${SLICE1}\" \ \"$TESTDIR0/$FILEDISK0 $TESTDIR1/$FILEDISK1\"" raidzdevs="\"${DEV_DSKDIR}/${DISK0}${SLICE_PREFIX}${SLICE0} \ ${DISK1}${SLICE_PREFIX}${SLICE0}\" \ \"${DISK0}${SLICE_PREFIX}${SLICE0} \ ${DISK0}${SLICE_PREFIX}${SLICE1} \ ${DISK1}${SLICE_PREFIX}${SLICE1}\" \ \"${DISK0}${SLICE_PREFIX}${SLICE0} \ ${DISK1}${SLICE_PREFIX}${SLICE0} \ ${DISK0}${SLICE_PREFIX}${SLICE1} \ ${DISK1}${SLICE_PREFIX}${SLICE1}\" \ \"$TESTDIR0/$FILEDISK0 $TESTDIR1/$FILEDISK1\"" mirrordevs=$raidzdevs ;; esac typeset -i i=0 while (( $i < ${#keywords[*]} )); do case ${keywords[i]} in "") create_pool_test "$TESTPOOL" "${keywords[i]}" "$pooldevs";; mirror) create_pool_test "$TESTPOOL" "${keywords[i]}" "$mirrordevs";; raidz|raidz1) create_pool_test "$TESTPOOL" "${keywords[i]}" "$raidzdevs" ;; esac (( i = i+1 )) done log_pass "'zpool create ...' success." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_004_pos.ksh index b5a37095b3bb..f9542d4bef86 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_004_pos.ksh @@ -1,80 +1,80 @@ #!/bin/ksh # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012, 2014 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib # # DESCRIPTION: # Create a storage pool with many file based vdevs. # # STRATEGY: # 1. Create assigned number of files in ZFS filesystem as vdevs. # 2. Creating a new pool based on the vdevs should work. # 3. Creating a pool with a file based vdev that is too small should fail. # verify_runnable "global" function cleanup { typeset pool="" poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1 poolexists $TESTPOOL && destroy_pool $TESTPOOL [[ -d $TESTDIR ]] && log_must $RM -rf $TESTDIR partition_disk $SIZE $disk 6 } -log_assert "Storage pools with $VDEVS_NUM file based vdevs can be created." +log_assert "Storage pools with 16 file based vdevs can be created." log_onexit cleanup disk=${DISKS%% *} create_pool $TESTPOOL $disk log_must $ZFS create -o mountpoint=$TESTDIR $TESTPOOL/$TESTFS -vdevs_list=$($ECHO $TESTDIR/file.{01..32}) -log_must $MKFILE 64m $vdevs_list +vdevs_list=$($ECHO $TESTDIR/file.{01..16}) +log_must $MKFILE $MINVDEVSIZE $vdevs_list create_pool "$TESTPOOL1" $vdevs_list log_must vdevs_in_pool "$TESTPOOL1" "$vdevs_list" if poolexists $TESTPOOL1; then destroy_pool $TESTPOOL1 else log_fail "Creating pool with large numbers of file-vdevs failed." fi log_must $MKFILE 32m $TESTDIR/broken_file -vdevs_list="$vdevs_list ${TESTDIR}/broken_file" +vdevs_list="$vdevs_list $TESTDIR/broken_file" log_mustnot $ZPOOL create -f $TESTPOOL1 $vdevs_list log_pass "Storage pools with many file based vdevs can be created." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_006_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_006_pos.ksh index 56c6e54eeb07..de6b1d4382a7 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_006_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_006_pos.ksh @@ -1,124 +1,128 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib # # DESCRIPTION: # Verify zpool create succeed with multiple keywords combination. # # STRATEGY: # 1. Create base filesystem to hold virtual disk files. -# 2. Create several files >= 64M. +# 2. Create several files == $MINVDEVSIZE. # 3. Verify 'zpool create' succeed with valid keywords combination. # verify_runnable "global" function cleanup { datasetexists $TESTPOOL1 && destroy_pool $TESTPOOL1 datasetexists $TESTPOOL && destroy_pool $TESTPOOL } log_assert "Verify 'zpool create' succeed with keywords combination." log_onexit cleanup create_pool $TESTPOOL $DISKS mntpnt=$(get_prop mountpoint $TESTPOOL) typeset -i i=0 while ((i < 10)); do - log_must $MKFILE 64M $mntpnt/vdev$i + log_must $MKFILE $MINVDEVSIZE $mntpnt/vdev$i eval vdev$i=$mntpnt/vdev$i ((i += 1)) done set -A valid_args \ "mirror $vdev0 $vdev1 $vdev2 mirror $vdev3 $vdev4 $vdev5" \ "mirror $vdev0 $vdev1 mirror $vdev2 $vdev3 mirror $vdev4 $vdev5" \ "mirror $vdev0 $vdev1 $vdev2 mirror $vdev3 $vdev4 $vdev5 \ spare $vdev6" \ "mirror $vdev0 $vdev1 mirror $vdev2 $vdev3 mirror $vdev4 $vdev5 \ spare $vdev6 $vdev7" \ "mirror $vdev0 $vdev1 spare $vdev2 mirror $vdev3 $vdev4" \ "raidz $vdev0 $vdev1 $vdev2 raidz1 $vdev3 $vdev4 $vdev5" \ "raidz $vdev0 $vdev1 raidz1 $vdev2 $vdev3 raidz $vdev4 $vdev5" \ "raidz $vdev0 $vdev1 $vdev2 raidz1 $vdev3 $vdev4 $vdev5 \ spare $vdev6" \ "raidz $vdev0 $vdev1 raidz1 $vdev2 $vdev3 raidz $vdev4 $vdev5 \ spare $vdev6 $vdev7" \ "raidz $vdev0 $vdev1 spare $vdev2 raidz $vdev3 $vdev4" \ "raidz2 $vdev0 $vdev1 $vdev2 raidz2 $vdev3 $vdev4 $vdev5" \ "raidz2 $vdev0 $vdev1 $vdev2 raidz2 $vdev3 $vdev4 $vdev5 \ raidz2 $vdev6 $vdev7 $vdev8" \ "raidz2 $vdev0 $vdev1 $vdev2 raidz2 $vdev3 $vdev4 $vdev5 \ spare $vdev6" \ "raidz2 $vdev0 $vdev1 $vdev2 raidz2 $vdev3 $vdev4 $vdev5 \ raidz2 $vdev6 $vdev7 $vdev8 spare $vdev9" \ "raidz2 $vdev0 $vdev1 $vdev2 spare $vdev3 raidz2 $vdev4 $vdev5 $vdev6" set -A forced_args \ "$vdev0 raidz $vdev1 $vdev2 raidz1 $vdev3 $vdev4 $vdev5" \ "$vdev0 raidz2 $vdev1 $vdev2 $vdev3 raidz2 $vdev4 $vdev5 $vdev6" \ "$vdev0 mirror $vdev1 $vdev2 mirror $vdev3 $vdev4" \ "$vdev0 mirror $vdev1 $vdev2 raidz $vdev3 $vdev4 \ raidz2 $vdev5 $vdev6 $vdev7 spare $vdev8" \ "$vdev0 mirror $vdev1 $vdev2 spare $vdev3 raidz $vdev4 $vdev5" \ "raidz $vdev0 $vdev1 raidz2 $vdev2 $vdev3 $vdev4" \ "raidz $vdev0 $vdev1 raidz2 $vdev2 $vdev3 $vdev4 spare $vdev5" \ "raidz $vdev0 $vdev1 spare $vdev2 raidz2 $vdev3 $vdev4 $vdev5" \ "mirror $vdev0 $vdev1 raidz $vdev2 $vdev3 raidz2 $vdev4 $vdev5 $vdev6" \ "mirror $vdev0 $vdev1 raidz $vdev2 $vdev3 \ raidz2 $vdev4 $vdev5 $vdev6 spare $vdev7" \ "mirror $vdev0 $vdev1 raidz $vdev2 $vdev3 \ spare $vdev4 raidz2 $vdev5 $vdev6 $vdev7" \ "spare $vdev0 $vdev1 $vdev2 mirror $vdev3 $vdev4 raidz $vdev5 $vdev6" i=0 while ((i < ${#valid_args[@]})); do log_must $ZPOOL create $TESTPOOL1 ${valid_args[$i]} $SYNC; $SYNC log_must $ZPOOL destroy -f $TESTPOOL1 ((i += 1)) done i=0 while ((i < ${#forced_args[@]})); do log_mustnot $ZPOOL create $TESTPOOL1 ${forced_args[$i]} log_must $ZPOOL create -f $TESTPOOL1 ${forced_args[$i]} $SYNC; $SYNC log_must $ZPOOL destroy -f $TESTPOOL1 ((i += 1)) done log_pass "'zpool create' succeed with keywords combination." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_010_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_010_neg.ksh index 7b555de9bd97..694397ff1410 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_010_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_create/zpool_create_010_neg.ksh @@ -1,88 +1,89 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zpool_create/zpool_create.shlib # # DESCRIPTION: -# 'zpool create' should return an error with VDEVsof size <64mb +# 'zpool create' should return an error with VDEVs of size SPA_MINDEVSIZE # # STRATEGY: # 1. Create an array of parameters # 2. For each parameter in the array, execute 'zpool create' # 3. Verify an error is returned. # -log_assert "'zpool create' should return an error with VDEVs <64mb" +log_assert "'zpool create' should return an error with VDEVs SPA_MINDEVSIZE" verify_runnable "global" function cleanup { poolexists $TOOSMALL && destroy_pool $TOOSMALL poolexists $TESTPOOL1 && destroy_pool $TESTPOOL1 poolexists $TESTPOOL && destroy_pool $TESTPOOL [[ -d $TESTDIR ]] && $RM -rf $TESTDIR partition_disk $SIZE $disk 6 } log_onexit cleanup if [[ -n $DISK ]]; then disk=$DISK else disk=$DISK0 fi create_pool $TESTPOOL $disk log_must $ZFS create $TESTPOOL/$TESTFS log_must $ZFS set mountpoint=$TESTDIR $TESTPOOL/$TESTFS +typeset -l devsize=$(($SPA_MINDEVSIZE - 1024 * 1024)) for files in $TESTDIR/file1 $TESTDIR/file2 do - log_must $MKFILE 63m $files + log_must $MKFILE $devsize $files done set -A args \ "$TOOSMALL $TESTDIR/file1" "$TESTPOOL1 $TESTDIR/file1 $TESTDIR/file2" \ "$TOOSMALL mirror $TESTDIR/file1 $TESTDIR/file2" \ "$TOOSMALL raidz $TESTDIR/file1 $TESTDIR/file2" typeset -i i=0 while [[ $i -lt ${#args[*]} ]]; do log_mustnot $ZPOOL create ${args[i]} ((i = i + 1)) done log_pass "'zpool create' with badly formed parameters failed as expected." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh index d7ce33d99850..ce27e22320b8 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_expand/zpool_expand_002_pos.ksh @@ -1,148 +1,148 @@ #! /bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_root/zpool_expand/zpool_expand.cfg # # DESCRIPTION: # After zpool online -e poolname zvol vdevs, zpool can autoexpand by # Dynamic LUN Expansion # # # STRATEGY: # 1) Create a pool # 2) Create volume on top of the pool # 3) Create pool by using the zvols # 4) Expand the vol size by zfs set volsize # 5 Use zpool online -e to online the zvol vdevs # 6) Check that the pool size was expaned # verify_runnable "global" function cleanup { if poolexists $TESTPOOL1; then log_must $ZPOOL destroy $TESTPOOL1 fi for i in 1 2 3; do if datasetexists $VFS/vol$i; then log_must $ZFS destroy $VFS/vol$i fi done } log_onexit cleanup log_assert "zpool can expand after zpool online -e zvol vdevs on LUN expansion" for i in 1 2 3; do log_must $ZFS create -V $org_size $VFS/vol$i done for type in " " mirror raidz raidz2; do log_must $ZPOOL create $TESTPOOL1 $type ${ZVOL_DEVDIR}/$VFS/vol1 \ ${ZVOL_DEVDIR}/$VFS/vol2 ${ZVOL_DEVDIR}/$VFS/vol3 typeset autoexp=$(get_pool_prop autoexpand $TESTPOOL1) if [[ $autoexp != "off" ]]; then log_fail "zpool $TESTPOOL1 autoexpand should off but is " \ "$autoexp" fi typeset prev_size=$(get_pool_prop size $TESTPOOL1) typeset zfs_prev_size=$($ZFS get -p avail $TESTPOOL1 | $TAIL -1 | \ $AWK '{print $3}') for i in 1 2 3; do log_must $ZFS set volsize=$exp_size $VFS/vol$i done for i in 1 2 3; do log_must $ZPOOL online -e $TESTPOOL1 ${ZVOL_DEVDIR}/$VFS/vol$i done $SYNC $SLEEP 10 $SYNC typeset expand_size=$(get_pool_prop size $TESTPOOL1) typeset zfs_expand_size=$($ZFS get -p avail $TESTPOOL1 | $TAIL -1 | \ $AWK '{print $3}') log_note "$TESTPOOL1 $type has previous size: $prev_size and " \ "expanded size: $expand_size" # compare available pool size from zfs - if [[ $zfs_expand_size > $zfs_prev_size ]]; then + if [[ $zfs_expand_size -gt $zfs_prev_size ]]; then # check for zpool history for the pool size expansion if [[ $type == " " ]]; then typeset size_addition=$($ZPOOL history -il $TESTPOOL1 \ | $GREP "pool '$TESTPOOL1' size:" | \ $GREP "vdev online" | \ $GREP "(+${EX_1GB}" | wc -l) if [[ $size_addition -ne $i ]]; then log_fail "pool $TESTPOOL1 is not autoexpand " \ "after LUN expansion" fi elif [[ $type == "mirror" ]]; then $ZPOOL history -il $TESTPOOL1 | \ $GREP "pool '$TESTPOOL1' size:" | \ $GREP "vdev online" | \ $GREP "(+${EX_1GB})" >/dev/null 2>&1 if [[ $? -ne 0 ]]; then log_fail "pool $TESTPOOL1 is not autoexpand " \ "after LUN expansion" fi else $ZPOOL history -il $TESTPOOL1 | \ $GREP "pool '$TESTPOOL1' size:" | \ $GREP "vdev online" | \ $GREP "(+${EX_3GB})" >/dev/null 2>&1 if [[ $? -ne 0 ]] ; then log_fail "pool $TESTPOOL1 is not autoexpand " \ "after LUN expansion" fi fi else log_fail "pool $TESTPOOL1 is not autoexpanded after LUN " \ "expansion" fi log_must $ZPOOL destroy $TESTPOOL1 for i in 1 2 3; do log_must $ZFS set volsize=$org_size $VFS/vol$i done done log_pass "zpool can expand after zpool online -e zvol vdevs on LUN expansion" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_004_pos.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_004_pos.ksh index 91f2968817e8..14b9f219825c 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_004_pos.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_export/zpool_export_004_pos.ksh @@ -1,95 +1,99 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # Verify zpool export succeed or fail with spare. # # STRATEGY: # 1. Create two mirror pools with same spare. # 2. Verify zpool export one pool succeed. # 3. Import the pool. # 4. Replace one device with the spare and detach it in one pool. # 5. Verify zpool export the pool succeed. # 6. Import the pool. # 7. Replace one device with the spare in one pool. # 8. Verify zpool export the pool fail. # 9. Verify zpool export the pool with "-f" succeed. # 10. Import the pool. # verify_runnable "global" function cleanup { mntpnt=$(get_prop mountpoint $TESTPOOL) datasetexists $TESTPOOL1 || log_must $ZPOOL import -d $mntpnt $TESTPOOL1 datasetexists $TESTPOOL1 && destroy_pool $TESTPOOL1 datasetexists $TESTPOOL2 && destroy_pool $TESTPOOL2 typeset -i i=0 while ((i < 5)); do if [[ -e $mntpnt/vdev$i ]]; then log_must $RM -f $mntpnt/vdev$i fi ((i += 1)) done } log_assert "Verify zpool export succeed or fail with spare." log_onexit cleanup mntpnt=$(get_prop mountpoint $TESTPOOL) typeset -i i=0 while ((i < 5)); do - log_must $MKFILE 64M $mntpnt/vdev$i + log_must $MKFILE $MINVDEVSIZE $mntpnt/vdev$i eval vdev$i=$mntpnt/vdev$i ((i += 1)) done log_must $ZPOOL create $TESTPOOL1 mirror $vdev0 $vdev1 spare $vdev4 log_must $ZPOOL create $TESTPOOL2 mirror $vdev2 $vdev3 spare $vdev4 log_must $ZPOOL export $TESTPOOL1 log_must $ZPOOL import -d $mntpnt $TESTPOOL1 log_must $ZPOOL replace $TESTPOOL1 $vdev0 $vdev4 log_must $ZPOOL detach $TESTPOOL1 $vdev4 log_must $ZPOOL export $TESTPOOL1 log_must $ZPOOL import -d $mntpnt $TESTPOOL1 log_must $ZPOOL replace $TESTPOOL1 $vdev0 $vdev4 log_mustnot $ZPOOL export $TESTPOOL1 log_must $ZPOOL export -f $TESTPOOL1 log_must $ZPOOL import -d $mntpnt $TESTPOOL1 log_pass "Verify zpool export succeed or fail with spare." diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.cfg b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.cfg index 017874ec0703..b49281f9268c 100644 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.cfg +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_import/zpool_import.cfg @@ -1,137 +1,137 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib export DISKSARRAY=$DISKS export DISK_ARRAY_NUM=$($ECHO ${DISKS} | $NAWK '{print NF}') typeset -a disk_array=($(find_disks $DISKS)) case "${#disk_array[*]}" in 0) # # on stf_configure, disk_freelist returns empty. # DISK_COUNT=0 ;; 1) # We need to repartition the single disk to two slices. if is_linux; then DISK_COUNT=1 ZFS_DISK1=${disk_array[0]} ZFS_DISK2=${disk_array[0]} if is_mpath_device $ZFS_DISK1; then export DEV_DSKDIR=$DEV_MPATHDIR else export DEV_DSKDIR=$DEV_RDSKDIR fi if ( is_mpath_device $ZFS_DISK1 ) && [[ -z $($ECHO $ZFS_DISK1 | awk 'substr($1,18,1)\ ~ /^[[:digit:]]+$/') ]] || ( is_real_device $ZFS_DISK1 ); then ZFSSIDE_DISK1=${ZFS_DISK1}1 ZFSSIDE_DISK2=${ZFS_DISK2}2 elif ( is_mpath_device $ZFS_DISK1 || is_loop_device $ZFS_DISK1 ); then ZFSSIDE_DISK1=${ZFS_DISK1}p1 ZFSSIDE_DISK2=${ZFS_DISK2}p2 else log_fail "$ZFS_DISK1 not supported for partitioning." fi else export DEV_DSKDIR="/dev" DISK_COUNT=1 ZFS_DISK1=${disk_array[0]} ZFSSIDE_DISK1=${ZFS_DISK1}s0 ZFS_DISK2=${disk_array[0]} ZFSSIDE_DISK2=${ZFS_DISK2}s1 fi ;; *) # We need to repartition the single disk to two slices. if is_linux; then DISK_COUNT=2 ZFS_DISK1=${disk_array[0]} if is_mpath_device $ZFS_DISK1; then export DEV_DSKDIR=$DEV_MPATHDIR else export DEV_DSKDIR=$DEV_RDSKDIR fi if ( is_mpath_device $ZFS_DISK1 ) && [[ -z $($ECHO $ZFS_DISK1 | awk 'substr($1,18,1)\ ~ /^[[:digit:]]+$/') ]] || ( is_real_device $ZFS_DISK1 ); then ZFSSIDE_DISK1=${ZFS_DISK1}1 elif ( is_mpath_device $ZFS_DISK1 || is_loop_device $ZFS_DISK1 ); then ZFSSIDE_DISK1=${ZFS_DISK1}p1 else log_fail "$ZFS_DISK1 not supported for partitioning." fi ZFS_DISK2=${disk_array[1]} if ( is_mpath_device $ZFS_DISK2 ) && [[ -z $($ECHO $ZFS_DISK2 | awk 'substr($1,18,1)\ ~ /^[[:digit:]]+$/') ]] || ( is_real_device $ZFS_DISK2 ); then ZFSSIDE_DISK2=${ZFS_DISK2}1 elif ( is_mpath_device $ZFS_DISK2 || is_loop_device $ZFS_DISK2 ); then ZFSSIDE_DISK2=${ZFS_DISK2}p1 else log_fail "$ZFS_DISK2 not supported for partitioning." fi else export DEV_DSKDIR="/dev" DISK_COUNT=2 ZFS_DISK1=${disk_array[0]} ZFSSIDE_DISK1=${ZFS_DISK1}s0 ZFS_DISK2=${disk_array[1]} ZFSSIDE_DISK2=${ZFS_DISK2}s0 fi ;; esac export DISK_COUNT ZFS_DISK1 ZFSSIDE_DISK1 ZFS_DISK2 ZFSSIDE_DISK2 -export FS_SIZE=1g -export FILE_SIZE=64m -export SLICE_SIZE=128m +export FS_SIZE="$((($MINVDEVSIZE / (1024 * 1024)) * 16))m" +export FILE_SIZE="$(($MINVDEVSIZE / 2))" +export SLICE_SIZE="$((($MINVDEVSIZE / (1024 * 1024)) * 2))m" export MAX_NUM=5 export GROUP_NUM=3 export DEVICE_DIR=$TEST_BASE_DIR/dev_import-test export BACKUP_DEVICE_DIR=$TEST_BASE_DIR/bakdev_import-test export DEVICE_FILE=disk export DEVICE_ARCHIVE=archive_import-test export MYTESTFILE=$STF_SUITE/include/libtest.shlib typeset -i num=0 while (( num < $GROUP_NUM )); do DEVICE_FILES="$DEVICE_FILES ${DEVICE_DIR}/${DEVICE_FILE}$num" (( num = num + 1 )) done export DEVICE_FILES export VDEV0=$DEVICE_DIR/${DEVICE_FILE}0 export VDEV1=$DEVICE_DIR/${DEVICE_FILE}1 export VDEV2=$DEVICE_DIR/${DEVICE_FILE}2 export VDEV3=$DEVICE_DIR/${DEVICE_FILE}3 export VDEV4=$DEVICE_DIR/${DEVICE_FILE}4 export ALTER_ROOT=/alter_import-test diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_002_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_002_neg.ksh index 4f8e548043a6..a4c8a28992d9 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_002_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_002_neg.ksh @@ -1,119 +1,123 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # +# +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. +# + . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # Malformed zpool set commands are rejected # # STRATEGY: # 1. Create an array of many different malformed zfs set arguments # 2. Run zpool set for each arg checking each will exit with status code 1 # # verify_runnable "global" # note to self - need to make sure there isn't a pool called bootfs # before running this test... function cleanup { $ZPOOL destroy bootfs $RM /tmp/zpool_set_002.$$.dat } log_assert "Malformed zpool set commands are rejected" if poolexists bootfs then log_unsupported "Unable to run test on a machine with a pool called \ bootfs" fi log_onexit cleanup # build up an array of bad arguments. set -A arguments "rubbish " \ "foo@bar= " \ "@@@= +pool " \ "zpool bootfs " \ "bootfs " \ "bootfs +" \ "bootfs=bootfs/123 " \ "bootfs=bootfs@val " \ "Bootfs=bootfs " \ "- " \ "== " \ "set " \ "@@ " \ "12345 " \ "€にほんご " \ "/ " \ "bootfs=bootfs /" \ "bootfs=a%d%s " # here, we build up a large string. # a word to the ksh-wary, ${#array[@]} gives you the # total number of entries in an array, so array[${#array[@]}] # will index the last entry+1, ksh arrays start at index 0. COUNT=0 while [ $COUNT -le 1025 ] do bigname="${bigname}o" COUNT=$(( $COUNT + 1 )) done # add an argument of maximum length property name arguments[${#arguments[@]}]="$bigname=value" # add an argument of maximum length property value arguments[${#arguments[@]}]="bootfs=$bigname" # Create a pool called bootfs (so-called, so as to trip any clashes between # property name, and pool name) # Also create a filesystem in this pool -log_must $MKFILE 64m /tmp/zpool_set_002.$$.dat +log_must $MKFILE $MINVDEVSIZE /tmp/zpool_set_002.$$.dat log_must $ZPOOL create bootfs /tmp/zpool_set_002.$$.dat log_must $ZFS create bootfs/root typeset -i i=0; while [ $i -lt "${#arguments[@]}" ] do log_mustnot eval "$ZPOOL set ${arguments[$i]} > /dev/null 2>&1" # now also try with a valid pool in the argument list log_mustnot eval "$ZPOOL set ${arguments[$i]}bootfs > /dev/null 2>&1" # now also try with two valid pools in the argument list log_mustnot eval "$ZPOOL set ${arguments[$i]}bootfs bootfs > /dev/null" i=$(( $i + 1)) done log_pass "Malformed zpool set commands are rejected" diff --git a/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_003_neg.ksh b/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_003_neg.ksh index 614939b380ac..09c9ef195381 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_003_neg.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zpool_set/zpool_set_003_neg.ksh @@ -1,72 +1,72 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2012 by Delphix. All rights reserved. +# Copyright (c) 2012, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # zpool set cannot set a readonly property # # STRATEGY: # 1. Create a pool # 2. Verify that we can't set readonly properties on that pool # verify_runnable "global" function cleanup { $ZPOOL destroy $TESTPOOL $RM /tmp/zpool_set_003.$$.dat } set -A props "available" "capacity" "guid" "health" "size" "used" set -A vals "100" "10" "12345" "HEALTHY" "10" "10" log_onexit cleanup log_assert "zpool set cannot set a readonly property" -log_must $MKFILE 64m /tmp/zpool_set_003.$$.dat +log_must $MKFILE $MINVDEVSIZE /tmp/zpool_set_003.$$.dat log_must $ZPOOL create $TESTPOOL /tmp/zpool_set_003.$$.dat typeset -i i=0; while [ $i -lt "${#props[@]}" ] do # try to set each property in the prop list with it's corresponding val log_mustnot eval "$ZPOOL set ${props[$i]}=${vals[$i]} $TESTPOOL \ > /dev/null 2>&1" i=$(( $i + 1)) done log_pass "zpool set cannot set a readonly property" diff --git a/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh b/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh index ac5b93472a0b..2deafcd01abf 100755 --- a/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh +++ b/tests/zfs-tests/tests/functional/cli_user/misc/setup.ksh @@ -1,161 +1,161 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/cli_user/misc/misc.cfg # This setup script is moderately complex, as it creates scenarios for all # of the tests included in this directory. Usually we'd want each test case # to setup/teardown it's own configuration, but this would be time consuming # given the nature of these tests. However, as a side-effect, one test # leaving the system in an unknown state could impact other test cases. DISK=${DISKS%% *} VOLSIZE=150m TESTVOL=testvol # Create a default setup that includes a volume default_setup_noexit "$DISK" "" "volume" # # The rest of this setup script creates a ZFS filesystem configuration # that is used to test the rest of the zfs subcommands in this directory. # # create a snapshot and a clone to test clone promote log_must $ZFS snapshot $TESTPOOL/$TESTFS@snap log_must $ZFS clone $TESTPOOL/$TESTFS@snap $TESTPOOL/$TESTFS/clone # create a file in the filesystem that isn't in the above snapshot $TOUCH /$TESTDIR/file.txt # create a non-default property and a child we can use to test inherit log_must $ZFS create $TESTPOOL/$TESTFS/$TESTFS2 log_must $ZFS set snapdir=hidden $TESTPOOL/$TESTFS # create an unmounted filesystem to test unmount log_must $ZFS create $TESTPOOL/$TESTFS/$TESTFS2.unmounted log_must $ZFS unmount $TESTPOOL/$TESTFS/$TESTFS2.unmounted # send our snapshot to a known file in /tmp $ZFS send $TESTPOOL/$TESTFS@snap > /tmp/zfstest_datastream.dat if [ ! -s /tmp/zfstest_datastream.dat ] then log_fail "ZFS send datafile was not created!" fi log_must $CHMOD 644 /tmp/zfstest_datastream.dat # create a filesystem that has particular properties to test set/get log_must $ZFS create -o version=1 $TESTPOOL/$TESTFS/prop set -A props $PROP_NAMES set -A prop_vals $PROP_VALS typeset -i i=0 while [[ $i -lt ${#props[*]} ]] do prop_name=${props[$i]} prop_val=${prop_vals[$i]} log_must $ZFS set $prop_name=$prop_val $TESTPOOL/$TESTFS/prop i=$(( $i + 1 )) done # create a filesystem we don't mind renaming log_must $ZFS create $TESTPOOL/$TESTFS/renameme if is_global_zone && !is_linux then # create a filesystem we can share log_must $ZFS create $TESTPOOL/$TESTFS/unshared log_must $ZFS set sharenfs=off $TESTPOOL/$TESTFS/unshared # create a filesystem that we can unshare log_must $ZFS create $TESTPOOL/$TESTFS/shared log_must $ZFS set sharenfs=on $TESTPOOL/$TESTFS/shared fi log_must $ZFS create -o version=1 $TESTPOOL/$TESTFS/version1 log_must $ZFS create -o version=1 $TESTPOOL/$TESTFS/allowed log_must $ZFS allow everyone create $TESTPOOL/$TESTFS/allowed if is_global_zone then # Now create several virtual disks to test zpool with - $MKFILE 100m /$TESTDIR/disk1.dat - $MKFILE 100m /$TESTDIR/disk2.dat - $MKFILE 100m /$TESTDIR/disk3.dat - $MKFILE 100m /$TESTDIR/disk-additional.dat - $MKFILE 100m /$TESTDIR/disk-export.dat - $MKFILE 100m /$TESTDIR/disk-offline.dat - $MKFILE 100m /$TESTDIR/disk-spare1.dat - $MKFILE 100m /$TESTDIR/disk-spare2.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk1.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk2.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk3.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk-additional.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk-export.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk-offline.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk-spare1.dat + $MKFILE $MINVDEVSIZE /$TESTDIR/disk-spare2.dat # and create a pool we can perform attach remove replace, # etc. operations with log_must $ZPOOL create $TESTPOOL.virt mirror /$TESTDIR/disk1.dat \ /$TESTDIR/disk2.dat /$TESTDIR/disk3.dat /$TESTDIR/disk-offline.dat \ spare /$TESTDIR/disk-spare1.dat # Offline one of the disks to test online log_must $ZPOOL offline $TESTPOOL.virt /$TESTDIR/disk-offline.dat # create an exported pool to test import log_must $ZPOOL create $TESTPOOL.exported /$TESTDIR/disk-export.dat log_must $ZPOOL export $TESTPOOL.exported set -A props $POOL_PROPS set -A prop_vals $POOL_VALS typeset -i i=0 while [[ $i -lt ${#props[*]} ]] do prop_name=${props[$i]} prop_val=${prop_vals[$i]} log_must $ZPOOL set $prop_name=$prop_val $TESTPOOL i=$(( $i + 1 )) done # copy a v1 pool from cli_root $CP $STF_SUITE/tests/functional/cli_root/zpool_upgrade/zfs-pool-v1.dat.bz2 \ /$TESTDIR log_must $BUNZIP2 /$TESTDIR/zfs-pool-v1.dat.bz2 log_must $ZPOOL import -d /$TESTDIR v1-pool fi log_pass diff --git a/tests/zfs-tests/tests/functional/history/history_001_pos.ksh b/tests/zfs-tests/tests/functional/history/history_001_pos.ksh index e63d5ac505cd..26170cb630af 100755 --- a/tests/zfs-tests/tests/functional/history/history_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/history/history_001_pos.ksh @@ -1,122 +1,122 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/history/history_common.kshlib # # DESCRIPTION: # Create a scenario to verify the following zpool subcommands are logged. # create, destroy, add, remove, offline, online, attach, detach, replace, # scrub, export, import, clear, upgrade. # # STRATEGY: # 1. Create three virtual disk files and create a mirror. # 2. Run and verify pool commands, with special casing for destroy/export. # 3. Import a pool and upgrade it, verifying 'upgrade' was logged. # verify_runnable "global" function cleanup { destroy_pool $MPOOL destroy_pool $upgrade_pool [[ -d $import_dir ]] && $RM -rf $import_dir for file in $VDEV1 $VDEV2 $VDEV3 $VDEV4; do [[ -f $file ]] && $RM -f $file done } log_assert "Verify zpool sub-commands which modify state are logged." log_onexit cleanup mntpnt=$(get_prop mountpoint $TESTPOOL) (( $? != 0)) && log_fail "get_prop($TESTPOOL mountpoint)" VDEV1=$mntpnt/vdev1; VDEV2=$mntpnt/vdev2; VDEV3=$mntpnt/vdev3; VDEV4=$mntpnt/vdev4; -log_must $MKFILE 64m $VDEV1 $VDEV2 $VDEV3 -log_must $MKFILE 100m $VDEV4 +log_must $MKFILE $MINVDEVSIZE $VDEV1 $VDEV2 $VDEV3 +log_must $MKFILE $(($MINVDEVSIZE * 2)) $VDEV4 run_and_verify -p "$MPOOL" "$ZPOOL create $MPOOL mirror $VDEV1 $VDEV2" run_and_verify -p "$MPOOL" "$ZPOOL add -f $MPOOL spare $VDEV3" run_and_verify -p "$MPOOL" "$ZPOOL remove $MPOOL $VDEV3" run_and_verify -p "$MPOOL" "$ZPOOL offline $MPOOL $VDEV1" run_and_verify -p "$MPOOL" "$ZPOOL online $MPOOL $VDEV1" run_and_verify -p "$MPOOL" "$ZPOOL attach $MPOOL $VDEV1 $VDEV4" run_and_verify -p "$MPOOL" "$ZPOOL detach $MPOOL $VDEV4" run_and_verify -p "$MPOOL" "$ZPOOL replace -f $MPOOL $VDEV1 $VDEV4" run_and_verify -p "$MPOOL" "$ZPOOL scrub $MPOOL" run_and_verify -p "$MPOOL" "$ZPOOL clear $MPOOL" # For export and destroy, mimic the behavior of run_and_verify using two # commands since the history will be unavailable until the pool is imported # again. commands=("$ZPOOL export $MPOOL" "$ZPOOL import -d $mntpnt $MPOOL" "$ZPOOL destroy $MPOOL" "$ZPOOL import -D -f -d $mntpnt $MPOOL") for i in 0 2; do cmd1="${commands[$i]}" cmd2="${commands[(($i + 1 ))]}" $ZPOOL history $MPOOL > $OLD_HISTORY 2>/dev/null log_must $cmd1 log_must $cmd2 $ZPOOL history $MPOOL > $TMP_HISTORY 2>/dev/null $DIFF $OLD_HISTORY $TMP_HISTORY | $GREP "^> " | $SED 's/^> //g' > \ $NEW_HISTORY if is_linux; then $GREP "$($ECHO "$cmd1" | $SED 's/^.*\/\(zpool .*\).*$/\1/')" \ $NEW_HISTORY >/dev/null 2>&1 || \ log_fail "Didn't find \"$cmd1\" in pool history" $GREP "$($ECHO "$cmd2" | $SED 's/^.*\/\(zpool .*\).*$/\1/')" \ $NEW_HISTORY >/dev/null 2>&1 || \ log_fail "Didn't find \"$cmd2\" in pool history" else $GREP "$($ECHO "$cmd1" | $SED 's/\/usr\/sbin\///g')" \ $NEW_HISTORY >/dev/null 2>&1 || \ log_fail "Didn't find \"$cmd1\" in pool history" $GREP "$($ECHO "$cmd2" | $SED 's/\/usr\/sbin\///g')" \ $NEW_HISTORY >/dev/null 2>&1 || \ log_fail "Didn't find \"$cmd2\" in pool history" fi done run_and_verify -p "$MPOOL" "$ZPOOL split $MPOOL ${MPOOL}_split" import_dir=/var/tmp/import_dir.$$ log_must $MKDIR $import_dir log_must $CP $STF_SUITE/tests/functional/history/zfs-pool-v4.dat.Z $import_dir log_must $UNCOMPRESS $import_dir/zfs-pool-v4.dat.Z upgrade_pool=$($ZPOOL import -d $import_dir | $GREP "pool:" | $AWK '{print $2}') log_must $ZPOOL import -d $import_dir $upgrade_pool run_and_verify -p "$upgrade_pool" "$ZPOOL upgrade $upgrade_pool" log_pass "zpool sub-commands which modify state are logged passed. " diff --git a/tests/zfs-tests/tests/functional/history/history_003_pos.ksh b/tests/zfs-tests/tests/functional/history/history_003_pos.ksh index 224ee159e0ff..01bba0c96631 100755 --- a/tests/zfs-tests/tests/functional/history/history_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/history/history_003_pos.ksh @@ -1,103 +1,101 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # zpool history will truncate on small pools, leaving pool creation intact # # STRATEGY: -# 1. Create two 100M virtual disk files. -# 2. Create test pool using the two virtual files. -# 3. Loop 100 times to set and remove compression to test dataset. -# 4. Make sure 'zpool history' output is truncated -# 5. Verify that the initial pool creation is preserved. +# 1. Create a test pool on a file. +# 2. Loop 300 times to set and remove compression to test dataset. +# 3. Make sure 'zpool history' output is truncated +# 4. Verify that the initial pool creation is preserved. # verify_runnable "global" function cleanup { datasetexists $spool && log_must $ZPOOL destroy $spool [[ -f $VDEV0 ]] && log_must $RM -f $VDEV0 - [[ -f $VDEV1 ]] && log_must $RM -f $VDEV1 [[ -f $TMPFILE ]] && log_must $RM -f $TMPFILE } log_assert "zpool history limitation test." log_onexit cleanup mntpnt=$(get_prop mountpoint $TESTPOOL) (( $? != 0 )) && log_fail "get_prop mountpoint $TESTPOOL" -VDEV0=$mntpnt/vdev0; VDEV1=$mntpnt/vdev1 -log_must $MKFILE 100m $VDEV0 $VDEV1 +VDEV0=$mntpnt/vdev0 +log_must $MKFILE $MINVDEVSIZE $VDEV0 spool=smallpool.$$; sfs=smallfs.$$ -log_must $ZPOOL create $spool $VDEV0 $VDEV1 +log_must $ZPOOL create $spool $VDEV0 log_must $ZFS create $spool/$sfs typeset -i orig_count=$($ZPOOL history $spool | $WC -l) typeset orig_md5=$($ZPOOL history $spool | $HEAD -2 | $MD5SUM | \ $AWK '{print $1}') typeset -i i=0 -while ((i < 100)); do +while ((i < 300)); do $ZFS set compression=off $spool/$sfs $ZFS set compression=on $spool/$sfs $ZFS set compression=off $spool/$sfs $ZFS set compression=on $spool/$sfs $ZFS set compression=off $spool/$sfs ((i += 1)) done TMPFILE=/tmp/spool.$$ $ZPOOL history $spool >$TMPFILE typeset -i entry_count=$($WC -l $TMPFILE | $AWK '{print $1}') typeset final_md5=$($HEAD -2 $TMPFILE | $MD5SUM | $AWK '{print $1}') $GREP 'zpool create' $TMPFILE >/dev/null 2>&1 || log_fail "'zpool create' was not found in pool history" $GREP 'zfs create' $TMPFILE >/dev/null 2>&1 && log_fail "'zfs create' was found in pool history" $GREP 'zfs set compress' $TMPFILE >/dev/null 2>&1 || log_fail "'zfs set compress' was found in pool history" # Verify that the creation of the pool was preserved in the history. if [[ $orig_md5 != $final_md5 ]]; then log_fail "zpool creation history was not preserved." fi log_pass "zpool history limitation test passed." diff --git a/tests/zfs-tests/tests/functional/online_offline/online_offline_003_neg.ksh b/tests/zfs-tests/tests/functional/online_offline/online_offline_003_neg.ksh index a9d0c337002f..398c19c22a9d 100755 --- a/tests/zfs-tests/tests/functional/online_offline/online_offline_003_neg.ksh +++ b/tests/zfs-tests/tests/functional/online_offline/online_offline_003_neg.ksh @@ -1,81 +1,81 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013, 2014 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # Offlining disks in a non-redundant pool should fail. # # STRATEGY: # 1. Create a multidisk stripe and start some random I/O # 2. zpool offline should fail on each disk. # verify_runnable "global" function cleanup { if poolexists $TESTPOOL1; then destroy_pool $TESTPOOL1 fi $KILL $killpid >/dev/null 2>&1 [[ -e $TESTDIR ]] && log_must $RM -rf $TESTDIR/* } log_assert "Offlining disks in a non-redundant pool should fail." log_onexit cleanup specials_list="" for i in 0 1 2; do - $MKFILE 64m $TESTDIR/$TESTFILE1.$i + $MKFILE $MINVDEVSIZE $TESTDIR/$TESTFILE1.$i specials_list="$specials_list $TESTDIR/$TESTFILE1.$i" done disk=($specials_list) create_pool $TESTPOOL1 $specials_list log_must $ZFS create $TESTPOOL1/$TESTFS1 log_must $ZFS set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1 $FILE_TRUNC -f $((64 * 1024 * 1024)) -b 8192 -c 0 -r $TESTDIR/$TESTFILE1 & typeset killpid="$! " for i in 0 1 2; do log_mustnot $ZPOOL offline $TESTPOOL1 ${disk[$i]} check_state $TESTPOOL1 ${disk[$i]} "online" done log_must $KILL $killpid $SYNC log_pass diff --git a/tests/zfs-tests/tests/functional/poolversion/setup.ksh b/tests/zfs-tests/tests/functional/poolversion/setup.ksh index cfe14a030a21..aadaa906063e 100755 --- a/tests/zfs-tests/tests/functional/poolversion/setup.ksh +++ b/tests/zfs-tests/tests/functional/poolversion/setup.ksh @@ -1,45 +1,45 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib verify_runnable "global" # create a version 1 pool -log_must $MKFILE 64m /tmp/zpool_version_1.dat +log_must $MKFILE $MINVDEVSIZE /tmp/zpool_version_1.dat log_must $ZPOOL create -o version=1 $TESTPOOL /tmp/zpool_version_1.dat # create another version 1 pool -log_must $MKFILE 64m /tmp/zpool2_version_1.dat +log_must $MKFILE $MINVDEVSIZE /tmp/zpool2_version_1.dat log_must $ZPOOL create -o version=1 $TESTPOOL2 /tmp/zpool2_version_1.dat log_pass diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy.cfg b/tests/zfs-tests/tests/functional/redundancy/redundancy.cfg index 079ff8b73052..f49b7e76aedd 100644 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy.cfg +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy.cfg @@ -1,40 +1,38 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # export BASEDIR=/var/tmp/basedir.$$ export TESTFILE=testfile.$$ export PRE_RECORD_FILE=$BASEDIR/pre-record-file.$$ export PST_RECORD_FILE=$BASEDIR/pst-record-file.$$ -export DEV_SIZE=64M - export BLOCKSZ=$(( 1024 * 1024 )) export NUM_WRITES=40 diff --git a/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib b/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib index 56e2bd19d876..64c3a04928e1 100644 --- a/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib +++ b/tests/zfs-tests/tests/functional/redundancy/redundancy.kshlib @@ -1,339 +1,342 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/redundancy/redundancy.cfg function cleanup { if poolexists $TESTPOOL; then destroy_pool $TESTPOOL fi typeset dir for dir in $TESTDIR $BASEDIR; do if [[ -d $dir ]]; then log_must $RM -rf $dir fi done } # # Get random number between min and max number. # # $1 Minimal value # $2 Maximal value # function random { typeset -i min=$1 typeset -i max=$2 typeset -i value while true; do ((value = RANDOM % (max + 1))) if ((value >= min)); then break fi done $ECHO $value } # # Record the directories construction and checksum all the files which reside # within the specified pool # # $1 The specified pool # $2 The file which save the record. # function record_data { typeset pool=$1 typeset recordfile=$2 [[ -z $pool ]] && log_fail "No specified pool." [[ -f $recordfile ]] && log_must $RM -f $recordfile typeset mntpnt mntpnt=$(get_prop mountpoint $pool) log_must eval "$DU -a $mntpnt > $recordfile 2>&1" # # When the data was damaged, checksum is failing and return 1 # So, will not use log_must # $FIND $mntpnt -type f -exec $CKSUM {} + >> $recordfile 2>&1 } # # Create test pool and fill with files and directories. # # $1 pool name # $2 pool type # $3 virtual devices number # function setup_test_env { typeset pool=$1 typeset keyword=$2 typeset -i vdev_cnt=$3 typeset vdevs typeset -i i=0 while (( i < vdev_cnt )); do vdevs="$vdevs $BASEDIR/vdev$i" ((i += 1)) done if [[ ! -d $BASEDIR ]]; then log_must $MKDIR $BASEDIR fi if poolexists $pool ; then destroy_pool $pool fi - log_must $MKFILE $DEV_SIZE $vdevs + log_must $MKFILE $MINVDEVSIZE $vdevs log_must $ZPOOL create -m $TESTDIR $pool $keyword $vdevs log_note "Filling up the filesystem ..." typeset -i ret=0 typeset -i i=0 typeset file=$TESTDIR/file while $TRUE ; do $FILE_WRITE -o create -f $file.$i \ -b $BLOCKSZ -c $NUM_WRITES ret=$? (( $ret != 0 )) && break (( i = i + 1 )) done (($ret != 28 )) && log_note "$FILE_WRITE return value($ret) is unexpected." record_data $TESTPOOL $PRE_RECORD_FILE } # # Check pool status is healthy # # $1 pool # function is_healthy { typeset pool=$1 typeset healthy_output="pool '$pool' is healthy" typeset real_output=$($ZPOOL status -x $pool) if [[ "$real_output" == "$healthy_output" ]]; then return 0 else typeset -i ret $ZPOOL status -x $pool | $GREP "state:" | \ $GREP "FAULTED" >/dev/null 2>&1 ret=$? (( $ret == 0 )) && return 1 typeset l_scan typeset errnum l_scan=$($ZPOOL status -x $pool | $GREP "scan:") l_scan=${l_scan##*"with"} errnum=$($ECHO $l_scan | $AWK '{print $1}') return $errnum fi } # # Check pool data is valid # # $1 pool # function is_data_valid { typeset pool=$1 record_data $pool $PST_RECORD_FILE if ! $DIFF $PRE_RECORD_FILE $PST_RECORD_FILE > /dev/null 2>&1; then return 1 fi return 0 } # # Get the specified count devices name # # $1 pool name # $2 devices count # function get_vdevs #pool cnt { typeset pool=$1 typeset -i cnt=$2 typeset all_devs=$($ZPOOL iostat -v $pool | $AWK '{print $1}'| \ $EGREP -v "^pool$|^capacity$|^mirror$|^raidz1$|^raidz2$|---" | \ $EGREP -v "/old$|^$pool$") typeset -i i=0 typeset vdevs while ((i < cnt)); do typeset dev=$($ECHO $all_devs | $AWK '{print $1}') eval all_devs=\${all_devs##*$dev} vdevs="$dev $vdevs" ((i += 1)) done $ECHO "$vdevs" } # # Create and replace the same name virtual device files # # $1 pool name # $2-n virtual device files # function replace_missing_devs { typeset pool=$1 shift typeset vdev for vdev in $@; do - log_must $MKFILE $DEV_SIZE $vdev + log_must $GNUDD if=/dev/zero of=$vdev \ + bs=1024k count=$(($MINDEVSIZE / (1024 * 1024))) \ + oflag=fdatasync log_must $ZPOOL replace -f $pool $vdev $vdev while true; do if ! is_pool_resilvered $pool ; then log_must $SLEEP 2 else break fi done done } # # Damage the pool's virtual device files. # # $1 pool name # $2 Failing devices count # $3 damage vdevs method, if not null, we keep # the label for the vdevs # function damage_devs { typeset pool=$1 typeset -i cnt=$2 typeset label="$3" typeset vdevs - typeset -i bs_count + typeset -i bs_count=$((64 * 1024)) vdevs=$(get_vdevs $pool $cnt) + typeset dev if [[ -n $label ]]; then - typeset dev for dev in $vdevs; do - bs_count=$($LS -l $dev | $AWK '{print $5}') - (( bs_count = bs_count/1024 - 512 )) $DD if=/dev/zero of=$dev seek=512 bs=1024 \ - count=$bs_count conv=notrunc >/dev/null 2>&1 + count=$bs_count conv=notrunc >/dev/null 2>&1 done else - log_must $MKFILE $DEV_SIZE $vdevs + for dev in $vdevs; do + $DD if=/dev/zero of=$dev bs=1024 count=$bs_count \ + conv=notrunc >/dev/null 2>&1 + done fi sync_pool $pool } # # Clear errors in the pool caused by data corruptions # # $1 pool name # function clear_errors { typeset pool=$1 log_must $ZPOOL clear $pool if ! is_healthy $pool ; then log_note "$pool should be healthy." return 1 fi if ! is_data_valid $pool ; then log_note "Data should be valid in $pool." return 1 fi return 0 } # # Remove the specified pool's virtual device files # # $1 Pool name # $2 Missing devices count # function remove_devs { typeset pool=$1 typeset -i cnt=$2 typeset vdevs vdevs=$(get_vdevs $pool $cnt) log_must $RM -f $vdevs sync_pool $pool } # # Recover the bad or missing device files in the pool # # $1 Pool name # $2 Missing devices count # function recover_bad_missing_devs { typeset pool=$1 typeset -i cnt=$2 typeset vdevs vdevs=$(get_vdevs $pool $cnt) replace_missing_devs $pool $vdevs if ! is_healthy $pool ; then log_note "$pool should be healthy." return 1 fi if ! is_data_valid $pool ; then log_note "Data should be valid in $pool." return 1 fi return 0 } diff --git a/tests/zfs-tests/tests/functional/replacement/replacement_001_pos.ksh b/tests/zfs-tests/tests/functional/replacement/replacement_001_pos.ksh index fcc575f832aa..9e8285f17b82 100755 --- a/tests/zfs-tests/tests/functional/replacement/replacement_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/replacement/replacement_001_pos.ksh @@ -1,157 +1,157 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/replacement/replacement.cfg # # DESCRIPTION: # Replacing disks during I/O should pass for supported pools. # # STRATEGY: # 1. Create multidisk pools (stripe/mirror/raidz) and # start some random I/O # 2. Replace a disk in the pool with anbother disk. # 3. Verify the integrity of the file system and the resilvering. # verify_runnable "global" function cleanup { if [[ -n "$child_pids" ]]; then for wait_pid in $child_pids do $KILL $wait_pid done fi if poolexists $TESTPOOL1; then destroy_pool $TESTPOOL1 fi [[ -e $TESTDIR ]] && log_must $RM -rf $TESTDIR/* } log_assert "Replacing a disk during I/O completes." options="" options_display="default options" log_onexit cleanup [[ -n "$HOLES_FILESIZE" ]] && options=" $options -f $HOLES_FILESIZE " [[ -n "$HOLES_BLKSIZE" ]] && options="$options -b $HOLES_BLKSIZE " [[ -n "$HOLES_COUNT" ]] && options="$options -c $HOLES_COUNT " [[ -n "$HOLES_SEED" ]] && options="$options -s $HOLES_SEED " [[ -n "$HOLES_FILEOFFSET" ]] && options="$options -o $HOLES_FILEOFFSET " options="$options -r " [[ -n "$options" ]] && options_display=$options child_pids="" function replace_test { typeset -i iters=2 typeset -i index=0 typeset opt=$1 typeset disk1=$2 typeset disk2=$3 typeset i=0 while [[ $i -lt $iters ]]; do log_note "Invoking $FILE_TRUNC with: $options_display" $FILE_TRUNC $options $TESTDIR/$TESTFILE.$i & typeset pid=$! $SLEEP 1 child_pids="$child_pids $pid" ((i = i + 1)) done log_must $ZPOOL replace $opt $TESTPOOL1 $disk1 $disk2 $SLEEP 10 for wait_pid in $child_pids do $KILL $wait_pid done child_pids="" log_must $ZPOOL export $TESTPOOL1 log_must $ZPOOL import -d $TESTDIR $TESTPOOL1 log_must $ZFS umount $TESTPOOL1/$TESTFS1 log_must $ZDB -cdui $TESTPOOL1/$TESTFS1 log_must $ZFS mount $TESTPOOL1/$TESTFS1 } specials_list="" i=0 while [[ $i != 2 ]]; do - $MKFILE 100m $TESTDIR/$TESTFILE1.$i + $MKFILE $MINVDEVSIZE $TESTDIR/$TESTFILE1.$i specials_list="$specials_list $TESTDIR/$TESTFILE1.$i" ((i = i + 1)) done # # Create a replacement disk special file. # -$MKFILE 100m $TESTDIR/$REPLACEFILE +$MKFILE $MINVDEVSIZE $TESTDIR/$REPLACEFILE for type in "" "raidz" "raidz1" "mirror"; do for op in "" "-f"; do create_pool $TESTPOOL1 $type $specials_list log_must $ZFS create $TESTPOOL1/$TESTFS1 log_must $ZFS set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1 replace_test "$opt" $TESTDIR/$TESTFILE1.1 $TESTDIR/$REPLACEFILE $ZPOOL iostat -v $TESTPOOL1 | grep "$TESTDIR/$REPLACEFILE" if [[ $? -ne 0 ]]; then log_fail "$REPLACEFILE is not present." fi destroy_pool $TESTPOOL1 log_must $RM -rf /$TESTPOOL1 done done log_pass diff --git a/tests/zfs-tests/tests/functional/replacement/replacement_002_pos.ksh b/tests/zfs-tests/tests/functional/replacement/replacement_002_pos.ksh index aae6d78a712a..713891d12f7c 100755 --- a/tests/zfs-tests/tests/functional/replacement/replacement_002_pos.ksh +++ b/tests/zfs-tests/tests/functional/replacement/replacement_002_pos.ksh @@ -1,174 +1,174 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/replacement/replacement.cfg # # DESCRIPTION: # Attaching disks during I/O should pass for supported pools. # # STRATEGY: # 1. Create multidisk pools (stripe/mirror/raidz) and # start some random I/O # 2. Attach a disk to the pool. # 3. Verify the integrity of the file system and the resilvering. # verify_runnable "global" function cleanup { if [[ -n "$child_pids" ]]; then for wait_pid in $child_pids do $KILL $wait_pid done fi if poolexists $TESTPOOL1; then destroy_pool $TESTPOOL1 fi [[ -e $TESTDIR ]] && log_must $RM -rf $TESTDIR/* } log_assert "Replacing a disk during I/O completes." options="" options_display="default options" log_onexit cleanup [[ -n "$HOLES_FILESIZE" ]] && options=" $options -f $HOLES_FILESIZE " [[ -n "$HOLES_BLKSIZE" ]] && options="$options -b $HOLES_BLKSIZE " [[ -n "$HOLES_COUNT" ]] && options="$options -c $HOLES_COUNT " [[ -n "$HOLES_SEED" ]] && options="$options -s $HOLES_SEED " [[ -n "$HOLES_FILEOFFSET" ]] && options="$options -o $HOLES_FILEOFFSET " options="$options -r " [[ -n "$options" ]] && options_display=$options child_pids="" function attach_test { typeset -i iters=2 typeset -i index=0 typeset opt=$1 typeset disk1=$2 typeset disk2=$3 typeset i=0 while [[ $i -lt $iters ]]; do log_note "Invoking $FILE_TRUNC with: $options_display" $FILE_TRUNC $options $TESTDIR/$TESTFILE.$i & typeset pid=$! $SLEEP 1 child_pids="$child_pids $pid" ((i = i + 1)) done log_must $ZPOOL attach $opt $TESTPOOL1 $disk1 $disk2 $SLEEP 10 for wait_pid in $child_pids do $KILL $wait_pid done child_pids="" log_must $ZPOOL export $TESTPOOL1 log_must $ZPOOL import -d $TESTDIR $TESTPOOL1 log_must $ZFS umount $TESTPOOL1/$TESTFS1 log_must $ZDB -cdui $TESTPOOL1/$TESTFS1 log_must $ZFS mount $TESTPOOL1/$TESTFS1 } specials_list="" i=0 while [[ $i != 2 ]]; do - $MKFILE 100m $TESTDIR/$TESTFILE1.$i + $MKFILE $MINVDEVSIZE $TESTDIR/$TESTFILE1.$i specials_list="$specials_list $TESTDIR/$TESTFILE1.$i" ((i = i + 1)) done # # Create a replacement disk special file. # -$MKFILE 100m $TESTDIR/$REPLACEFILE +$MKFILE $MINVDEVSIZE $TESTDIR/$REPLACEFILE for op in "" "-f"; do create_pool $TESTPOOL1 mirror $specials_list log_must $ZFS create $TESTPOOL1/$TESTFS1 log_must $ZFS set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1 attach_test "$opt" $TESTDIR/$TESTFILE1.1 $TESTDIR/$REPLACEFILE $ZPOOL iostat -v $TESTPOOL1 | grep "$TESTDIR/$REPLACEFILE" if [[ $? -ne 0 ]]; then log_fail "$REPLACEFILE is not present." fi destroy_pool $TESTPOOL1 done log_note "Verify 'zpool attach' fails with non-mirrors." for type in "" "raidz" "raidz1"; do for op in "" "-f"; do create_pool $TESTPOOL1 $type $specials_list log_must $ZFS create $TESTPOOL1/$TESTFS1 log_must $ZFS set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1 log_mustnot $ZPOOL attach "$opt" $TESTDIR/$TESTFILE1.1 \ $TESTDIR/$REPLACEFILE $ZPOOL iostat -v $TESTPOOL1 | grep "$TESTDIR/$REPLACEFILE" if [[ $? -eq 0 ]]; then log_fail "$REPLACEFILE should not be present." fi destroy_pool $TESTPOOL1 done done log_pass diff --git a/tests/zfs-tests/tests/functional/replacement/replacement_003_pos.ksh b/tests/zfs-tests/tests/functional/replacement/replacement_003_pos.ksh index 1a7a7d87b396..1bfacafb29c3 100755 --- a/tests/zfs-tests/tests/functional/replacement/replacement_003_pos.ksh +++ b/tests/zfs-tests/tests/functional/replacement/replacement_003_pos.ksh @@ -1,161 +1,161 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/replacement/replacement.cfg # # DESCRIPTION: # Detaching disks during I/O should pass for supported pools. # # STRATEGY: # 1. Create multidisk pools (stripe/mirror/raidz) and # start some random I/O # 2. Detach a disk from the pool. # 3. Verify the integrity of the file system and the resilvering. # verify_runnable "global" function cleanup { if [[ -n "$child_pids" ]]; then for wait_pid in $child_pids do $KILL $wait_pid done fi if poolexists $TESTPOOL1; then destroy_pool $TESTPOOL1 fi [[ -e $TESTDIR ]] && log_must $RM -rf $TESTDIR/* } log_assert "Replacing a disk during I/O completes." options="" options_display="default options" log_onexit cleanup [[ -n "$HOLES_FILESIZE" ]] && options=" $options -f $HOLES_FILESIZE " [[ -n "$HOLES_BLKSIZE" ]] && options="$options -b $HOLES_BLKSIZE " [[ -n "$HOLES_COUNT" ]] && options="$options -c $HOLES_COUNT " [[ -n "$HOLES_SEED" ]] && options="$options -s $HOLES_SEED " [[ -n "$HOLES_FILEOFFSET" ]] && options="$options -o $HOLES_FILEOFFSET " ptions="$options -r " [[ -n "$options" ]] && options_display=$options child_pids="" function detach_test { typeset -i iters=2 typeset -i index=0 typeset disk1=$1 typeset i=0 while [[ $i -lt $iters ]]; do log_note "Invoking $FILE_TRUNC with: $options_display" $FILE_TRUNC $options $TESTDIR/$TESTFILE.$i & typeset pid=$! $SLEEP 1 child_pids="$child_pids $pid" ((i = i + 1)) done log_must $ZPOOL detach $TESTPOOL1 $disk1 $SLEEP 10 for wait_pid in $child_pids do $KILL $wait_pid done child_pids="" log_must $ZPOOL export $TESTPOOL1 log_must $ZPOOL import -d $TESTDIR $TESTPOOL1 log_must $ZFS umount $TESTPOOL1/$TESTFS1 log_must $ZDB -cdui $TESTPOOL1/$TESTFS1 log_must $ZFS mount $TESTPOOL1/$TESTFS1 } specials_list="" i=0 while [[ $i != 2 ]]; do - $MKFILE 100m $TESTDIR/$TESTFILE1.$i + $MKFILE $MINVDEVSIZE $TESTDIR/$TESTFILE1.$i specials_list="$specials_list $TESTDIR/$TESTFILE1.$i" ((i = i + 1)) done create_pool $TESTPOOL1 mirror $specials_list log_must $ZFS create $TESTPOOL1/$TESTFS1 log_must $ZFS set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1 detach_test $TESTDIR/$TESTFILE1.1 $ZPOOL iostat -v $TESTPOOL1 | grep "$TESTDIR/$TESTFILE1.1" if [[ $? -eq 0 ]]; then log_fail "$TESTFILE1.1 should no longer be present." fi destroy_pool $TESTPOOL1 log_note "Verify 'zpool detach' fails with non-mirrors." for type in "" "raidz" "raidz1" ; do create_pool $TESTPOOL1 $type $specials_list log_must $ZFS create $TESTPOOL1/$TESTFS1 log_must $ZFS set mountpoint=$TESTDIR1 $TESTPOOL1/$TESTFS1 log_mustnot $ZPOOL detach $TESTDIR/$TESTFILE1.1 $ZPOOL iostat -v $TESTPOOL1 | grep "$TESTDIR/$TESTFILE1.1" if [[ $? -ne 0 ]]; then log_fail "$TESTFILE1.1 is not present." fi destroy_pool $TESTPOOL1 done log_pass diff --git a/tests/zfs-tests/tests/functional/reservation/reservation.shlib b/tests/zfs-tests/tests/functional/reservation/reservation.shlib index 27e9a87e01be..157a41b6be00 100644 --- a/tests/zfs-tests/tests/functional/reservation/reservation.shlib +++ b/tests/zfs-tests/tests/functional/reservation/reservation.shlib @@ -1,201 +1,201 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2016 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/reservation/reservation.cfg # # Function to set the reservation property of a dataset to # 'none' and verify that it is correctly set using both the # "normal" 'zfs get reservation' and the '-p' option which # gives a numerical value. # function zero_reservation { typeset resv_val dataset=$1 log_must $ZFS set reservation=none $dataset resv_val=`$ZFS get -H reservation $dataset | awk '{print $3}'` if [[ $? -ne 0 ]]; then log_fail "Unable to get reservation prop on $dataset" elif [[ $resv_val != "none" ]]; then log_fail "Reservation not 'none' ($resv_val) as expected" fi resv_val=`$ZFS get -pH reservation $dataset | awk '{print $3}'` if [[ $? -ne 0 ]]; then log_fail "Unable to get reservation prop on $dataset" elif [[ $resv_val -ne 0 ]]; then log_fail "Reservation not 0 ($resv_val) as expected" fi return 0 } # # Utility function to see if two values are within a certain specified # limit of each other. Used primarily to check that a dataset's parent # is correctly accounting for space used/available. Need this function as # currently there is some slop in the way space is accounted (i.e. can't # do a direct comparison). # function within_limits { typeset valA=$1 typeset valB=$2 typeset delta=$3 if ((valA <= valB)); then if (((valB - valA) <= delta)); then return 0 fi elif ((valB <= valA)); then if (((valA - valB) <= delta)); then return 0 fi fi return 1 } # # Function to create and mount multiple filesystems. The filesystem # will be named according to the name specified with a suffix value # taken from the loop counter. # function create_multiple_fs # num_fs base_fs_name base_mnt_name { typeset -i iter=0 typeset -i count=$1 typeset FS_NAME=$2 typeset MNT_NAME=$3 while (($iter < $count)); do log_must $ZFS create ${FS_NAME}$iter log_must $ZFS set mountpoint=${MNT_NAME}$iter ${FS_NAME}$iter ((iter = iter + 1)) done } # # This function compute the largest volume size which is multiple of volume # block size (default 8K) and not greater than the largest expected volsize. # # $1 The largest expected volume size. # $2 The volume block size # function floor_volsize # [volblksize] { typeset largest_volsize=$1 typeset volblksize=${2:-8192} if ((largest_volsize < volblksize)); then log_fail "The largest_volsize must be greater than volblksize." fi typeset real_volsize typeset n ((n = largest_volsize / volblksize)) ((largest_volsize = volblksize * n)) print $largest_volsize } # # This function is a copy of a function by the same name in libzfs_dataset.c # Its purpose is to reserve additional space for volume metadata so volumes # don't unexpectedly run out of room. # # Note: This function can be used to do an estimate for a volume that has not # yet been created. In this case, $vol is not a volume, but rather a pool in # which a volume is going to be created. In this case, use default properties. # function volsize_to_reservation { typeset vol=$1 typeset volsize=$2 - typeset DN_MAX_INDBLKSHIFT=14 - typeset SPA_BLKPTRSHIFT=7 - typeset SPA_DVAS_PER_BP=3 + typeset -i DN_MAX_INDBLKSHIFT=17 + typeset -i SPA_BLKPTRSHIFT=7 + typeset -i SPA_DVAS_PER_BP=3 typeset DNODES_PER_LEVEL_SHIFT=$((DN_MAX_INDBLKSHIFT - \ SPA_BLKPTRSHIFT)) typeset DNODES_PER_LEVEL=$((1 << $DNODES_PER_LEVEL_SHIFT)) if ds_is_volume $vol; then typeset ncopies=$(get_prop copies $vol) typeset volblocksize=$(get_prop volblocksize $vol) else typeset ncopies=1 typeset volblocksize=8192 fi typeset nblocks=$((volsize / volblocksize)) typeset numdb=7 while ((nblocks > 1)); do ((nblocks += DNODES_PER_LEVEL - 1)) ((nblocks /= DNODES_PER_LEVEL)) ((numdb += nblocks)) done ((numdb *= SPA_DVAS_PER_BP < ncopies + 1 ? SPA_DVAS_PER_BP : \ ncopies + 1)) ((volsize *= ncopies)) ((numdb *= 1 << DN_MAX_INDBLKSHIFT)) ((volsize += numdb)) echo $volsize } # # This function takes a pool name as an argument, and returns the largest (give # or take some slop) -V value that can be used to create a volume in that pool. # This is necessary because during volume creation, a reservation is created # that will be larger than the value specified with -V, and potentially larger # than the available space in the pool. See volsize_to_reservation(). # function largest_volsize_from_pool { typeset pool=$1 typeset poolsize=$(get_prop available $pool) typeset volsize=$poolsize typeset nvolsize while :; do # knock 50M off the volsize each time through ((volsize -= 50 * 1024 * 1024)) nvolsize=$(volsize_to_reservation $pool $volsize) nvolsize=$(floor_volsize $nvolsize) ((nvolsize < poolsize)) && break done echo $volsize } diff --git a/tests/zfs-tests/tests/functional/rsend/rsend_009_pos.ksh b/tests/zfs-tests/tests/functional/rsend/rsend_009_pos.ksh index 0e6fcf1323d9..8f201fde7f4b 100755 --- a/tests/zfs-tests/tests/functional/rsend/rsend_009_pos.ksh +++ b/tests/zfs-tests/tests/functional/rsend/rsend_009_pos.ksh @@ -1,94 +1,94 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2008 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # # Copyright (c) 2013 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/rsend/rsend.kshlib # # DESCRIPTION: # zfs receive can handle out of space correctly. # # STRATEGY: # 1. Create two pools, one is big and another is small. # 2. Fill the big pool with data. # 3. Take snapshot and backup the whole pool. # 4. Receive this stream in small pool. # 5. Verify zfs receive can handle the out of space error correctly. # verify_runnable "global" function cleanup { if datasetexists bpool ; then log_must $ZPOOL destroy -f bpool fi if datasetexists spool ; then log_must $ZPOOL destroy -f spool fi } log_assert "Verify zfs receive can handle out of space correctly." log_onexit cleanup -log_must $MKFILE 100M $TESTDIR/bfile -log_must $MKFILE 64M $TESTDIR/sfile -log_must $ZPOOL create bpool $TESTDIR/bfile -log_must $ZPOOL create spool $TESTDIR/sfile +log_must $MKFILE $MINVDEVSIZE $TESTDIR/bfile +log_must $MKFILE $SPA_MINDEVSIZE $TESTDIR/sfile +log_must zpool create bpool $TESTDIR/bfile +log_must zpool create spool $TESTDIR/sfile # # Test out of space on sub-filesystem # log_must $ZFS create bpool/fs mntpnt=$(get_prop mountpoint bpool/fs) log_must $MKFILE 30M $mntpnt/file log_must $ZFS snapshot bpool/fs@snap log_must eval "$ZFS send -R bpool/fs@snap > $BACKDIR/fs-R" log_mustnot eval "$ZFS receive -d -F spool < $BACKDIR/fs-R" log_must datasetnonexists spool/fs log_must ismounted spool # # Test out of space on top filesystem # mntpnt2=$(get_prop mountpoint bpool) log_must $MV $mntpnt/file $mntpnt2 log_must $ZFS destroy -rf bpool/fs log_must $ZFS snapshot bpool@snap log_must eval "$ZFS send -R bpool@snap > $BACKDIR/bpool-R" log_mustnot eval "$ZFS receive -d -F spool < $BACKDIR/bpool-R" log_must datasetnonexists spool/fs log_must ismounted spool log_pass "zfs receive can handle out of space correctly." diff --git a/tests/zfs-tests/tests/functional/slog/setup.ksh b/tests/zfs-tests/tests/functional/slog/setup.ksh index 28c07c961409..bf205e48e1a3 100755 --- a/tests/zfs-tests/tests/functional/slog/setup.ksh +++ b/tests/zfs-tests/tests/functional/slog/setup.ksh @@ -1,50 +1,50 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/slog/slog.cfg verify_runnable "global" if ! verify_slog_support ; then log_unsupported "This system doesn't support separate intent logs" fi if [[ -d $VDEV ]]; then log_must $RM -rf $VDIR fi if [[ -d $VDEV2 ]]; then log_must $RM -rf $VDIR2 fi log_must $MKDIR -p $VDIR $VDIR2 -log_must $MKFILE $SIZE $VDEV $SDEV $LDEV $VDEV2 $SDEV2 $LDEV2 +log_must $MKFILE $MINVDEVSIZE $VDEV $SDEV $LDEV $VDEV2 $SDEV2 $LDEV2 log_pass diff --git a/tests/zfs-tests/tests/functional/slog/slog.cfg b/tests/zfs-tests/tests/functional/slog/slog.cfg index 5ac9a46c51c0..d0d25fde151e 100644 --- a/tests/zfs-tests/tests/functional/slog/slog.cfg +++ b/tests/zfs-tests/tests/functional/slog/slog.cfg @@ -1,41 +1,39 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # -export SIZE=64M - export VDIR=$TEST_BASE_DIR/disk-slog export VDIR2=$TEST_BASE_DIR/disk2-slog export VDEV="$VDIR/a $VDIR/b $VDIR/c" export SDEV="$VDIR/d" export LDEV="$VDIR/e $VDIR/f" export VDEV2="$VDIR2/a $VDIR2/b $VDIR2/c" export SDEV2="$VDIR2/d" export LDEV2="$VDIR2/e $VDIR2/f" diff --git a/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh b/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh index 1227f2b8a884..cfe8329211c5 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_012_neg.ksh @@ -1,73 +1,73 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2007 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/slog/slog.kshlib # # DESCRIPTION: # Pool can survive when one of mirror log device get corrupted # # STRATEGY: # 1. Create pool with mirror slog devices # 2. Make corrupted on one disk # 3. Verify the pool is fine # verify_runnable "global" log_assert "Pool can survive when one of mirror log device get corrupted." log_onexit cleanup for type in "" "mirror" "raidz" "raidz2" do for spare in "" "spare" do log_must $ZPOOL create $TESTPOOL $type $VDEV $spare $SDEV \ log mirror $LDEV mntpnt=$(get_prop mountpoint $TESTPOOL) # # Create file in pool to trigger writing in slog devices # log_must $DD if=/dev/urandom of=$mntpnt/testfile.$$ count=100 ldev=$(random_get $LDEV) - log_must $MKFILE $SIZE $ldev + log_must $MKFILE $MINVDEVSIZE $ldev log_must $ZPOOL scrub $TESTPOOL log_must display_status $TESTPOOL log_must verify_slog_device $TESTPOOL $ldev 'UNAVAIL' 'mirror' log_must $ZPOOL destroy -f $TESTPOOL done done log_pass "Pool can survive when one of mirror log device get corrupted." diff --git a/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh b/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh index b2a16b0af8ee..c2d14894440f 100755 --- a/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh +++ b/tests/zfs-tests/tests/functional/slog/slog_013_pos.ksh @@ -1,94 +1,92 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2009 Sun Microsystems, Inc. All rights reserved. # Use is subject to license terms. # # -# Copyright (c) 2013 by Delphix. All rights reserved. +# Copyright (c) 2013, 2015 by Delphix. All rights reserved. # . $STF_SUITE/tests/functional/slog/slog.kshlib # # DESCRIPTION: # Verify slog device can be disk, file, lofi device or any device that # presents a block interface. # # STRATEGY: # 1. Create a pool # 2. Loop to add different object as slog # 3. Verify it passes # verify_runnable "global" function cleanup_testenv { cleanup if datasetexists $TESTPOOL2 ; then log_must $ZPOOL destroy -f $TESTPOOL2 fi if [[ -n $lofidev ]]; then $LOFIADM -d $lofidev fi } log_assert "Verify slog device can be disk, file, lofi device or any device " \ "that presents a block interface." verify_disk_count "$DISKS" 2 log_onexit cleanup_testenv dsk1=${DISKS%% *} log_must $ZPOOL create $TESTPOOL ${DISKS#$dsk1} # Add nomal disk log_must $ZPOOL add $TESTPOOL log $dsk1 log_must verify_slog_device $TESTPOOL $dsk1 'ONLINE' # Add nomal file log_must $ZPOOL add $TESTPOOL log $LDEV ldev=$(random_get $LDEV) log_must verify_slog_device $TESTPOOL $ldev 'ONLINE' # Add lofi device lofidev=${LDEV2%% *} log_must $LOFIADM -a $lofidev lofidev=$($LOFIADM $lofidev) log_must $ZPOOL add $TESTPOOL log $lofidev log_must verify_slog_device $TESTPOOL $lofidev 'ONLINE' log_pass "Verify slog device can be disk, file, lofi device or any device " \ "that presents a block interface." -# Temp disable fore bug 6569095 # Add file which reside in the itself mntpnt=$(get_prop mountpoint $TESTPOOL) -log_must $MKFILE 100M $mntpnt/vdev +log_must $MKFILE $MINVDEVSIZE $mntpnt/vdev log_must $ZPOOL add $TESTPOOL $mntpnt/vdev -# Temp disable fore bug 6569072 # Add ZFS volume vol=$TESTPOOL/vol -log_must $ZPOOL create -V 64M $vol -log_must $ZPOOL add $TESTPOOL ${ZVOL_DEVDIR}/$vol +log_must $ZPOOL create -V $MINVDEVSIZE $vol +log_must $ZPOOL add $TESTPOOL ${ZVOL_DEVDIR}/$vol \ No newline at end of file