diff --git a/tests/test-runner/bin/zts-report.py.in b/tests/test-runner/bin/zts-report.py.in index edfdd47ee6d7..ecc50f487152 100755 --- a/tests/test-runner/bin/zts-report.py.in +++ b/tests/test-runner/bin/zts-report.py.in @@ -1,505 +1,525 @@ #!/usr/bin/env @PYTHON_SHEBANG@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2017 by Delphix. All rights reserved. # Copyright (c) 2018 by Lawrence Livermore National Security, LLC. # # This script must remain compatible with Python 3.6+. # import os import re import sys import argparse # # This script parses the stdout of zfstest, which has this format: # # Test: /path/to/testa (run as root) [00:00] [PASS] # Test: /path/to/testb (run as jkennedy) [00:00] [PASS] # Test: /path/to/testc (run as root) [00:00] [FAIL] # [...many more results...] # # Results Summary # FAIL 22 # SKIP 32 # PASS 1156 # # Running Time: 02:50:31 # Percent passed: 95.5% # Log directory: /var/tmp/test_results/20180615T205926 # # # Common generic reasons for a test or test group to be skipped. # # Some test cases are known to fail in ways which are not harmful or dangerous. # In these cases simply mark the test as a known failure until it can be # updated and the issue resolved. Note that it's preferable to open a unique # issue on the GitHub issue tracker for each test case failure. # known_reason = 'Known issue' # # Some tests require that a test user be able to execute the zfs utilities. # This may not be possible when testing in-tree due to the default permissions # on the user's home directory. When testing this can be resolved by granting # group read access. # # chmod 0750 $HOME # exec_reason = 'Test user execute permissions required for utilities' # # Some tests require that the kernel supports renameat2 syscall. # renameat2_reason = 'Kernel renameat2 support required' # # Some tests require the O_TMPFILE flag which was first introduced in the # 3.11 kernel. # tmpfile_reason = 'Kernel O_TMPFILE support required' # # Some tests require the statx(2) system call on Linux which was first # introduced in the 4.11 kernel. # statx_reason = 'Kernel statx(2) system call required on Linux' # # Some tests require that the lsattr utility support the project id feature. # project_id_reason = 'lsattr with set/show project ID required' # # Some tests require that the kernel support user namespaces. # user_ns_reason = 'Kernel user namespace support required' # # Some rewind tests can fail since nothing guarantees that old MOS blocks # are not overwritten. Snapshots protect datasets and data files but not # the MOS. Reasonable efforts are made in the test case to increase the # odds that some txgs will have their MOS data left untouched, but it is # never a sure thing. # rewind_reason = 'Arbitrary pool rewind is not guaranteed' # # Some tests require a minimum version of the fio benchmark utility. # Older distributions such as CentOS 6.x only provide fio-2.0.13. # fio_reason = 'Fio v2.3 or newer required' # # Some tests require that the DISKS provided support the discard operation. # Normally this is not an issue because loop back devices are used for DISKS # and they support discard (TRIM/UNMAP). # trim_reason = 'DISKS must support discard (TRIM/UNMAP)' # # Some tests on FreeBSD require the fspacectl(2) system call and the # truncate(1) utility supporting the -d option. The system call was first # introduced in FreeBSD version 1400032. # fspacectl_reason = 'fspacectl(2) and truncate -d support required' # # Some tests are not applicable to a platform or need to be updated to operate # in the manor required by the platform. Any tests which are skipped for this # reason will be suppressed in the final analysis output. # na_reason = "Not applicable" # # Some test cases doesn't have all requirements to run on Github actions CI. # ci_reason = 'CI runner doesn\'t have all requirements' # # Idmapped mount is only supported in kernel version >= 5.12 # idmap_reason = 'Idmapped mount needs kernel 5.12+' # # copy_file_range() is not supported by all kernels # cfr_reason = 'Kernel copy_file_range support required' -cfr_cross_reason = 'copy_file_range(2) cross-filesystem needs kernel 5.3+' + +if sys.platform.startswith('freebsd'): + cfr_cross_reason = 'copy_file_range(2) cross-filesystem needs FreeBSD 14+' +else: + cfr_cross_reason = 'copy_file_range(2) cross-filesystem needs kernel 5.3+' # # These tests are known to fail, thus we use this list to prevent these # failures from failing the job as a whole; only unexpected failures # bubble up to cause this script to exit with a non-zero exit status. # # Format: { 'test-name': ['expected result', 'issue-number | reason'] } # # For each known failure it is recommended to link to a GitHub issue by # setting the reason to the issue number. Alternately, one of the generic # reasons listed above can be used. # known = { 'casenorm/mixed_none_lookup_ci': ['FAIL', 7633], 'casenorm/mixed_formd_lookup_ci': ['FAIL', 7633], 'cli_root/zpool_import/import_rewind_device_replaced': ['FAIL', rewind_reason], 'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason], 'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason], 'pool_checkpoint/checkpoint_discard_busy': ['SKIP', 12053], 'privilege/setup': ['SKIP', na_reason], 'refreserv/refreserv_004_pos': ['FAIL', known_reason], 'rootpool/setup': ['SKIP', na_reason], 'rsend/rsend_008_pos': ['SKIP', 6066], 'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason], } if sys.platform.startswith('freebsd'): known.update({ 'cli_root/zfs_receive/receive-o-x_props_override': ['FAIL', known_reason], 'cli_root/zpool_resilver/zpool_resilver_concurrent': ['SKIP', na_reason], 'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason], 'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason], 'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason], 'cli_root/zfs_unshare/zfs_unshare_008_pos': ['SKIP', na_reason], 'cp_files/cp_files_002_pos': ['SKIP', na_reason], 'link_count/link_count_001': ['SKIP', na_reason], 'casenorm/mixed_create_failure': ['FAIL', 13215], 'mmap/mmap_sync_001_pos': ['SKIP', na_reason], 'rsend/send_raw_ashift': ['SKIP', 14961], }) elif sys.platform.startswith('linux'): known.update({ 'casenorm/mixed_formd_lookup': ['FAIL', 7633], 'casenorm/mixed_formd_delete': ['FAIL', 7633], 'casenorm/sensitive_formd_lookup': ['FAIL', 7633], 'casenorm/sensitive_formd_delete': ['FAIL', 7633], 'removal/removal_with_zdb': ['SKIP', known_reason], 'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason], }) # # These tests may occasionally fail or be skipped. We want there failures # to be reported but only unexpected failures should bubble up to cause # this script to exit with a non-zero exit status. # # Format: { 'test-name': ['expected result', 'issue-number | reason'] } # # For each known failure it is recommended to link to a GitHub issue by # setting the reason to the issue number. Alternately, one of the generic # reasons listed above can be used. # maybe = { 'append/threadsappend_001_pos': ['FAIL', 6136], 'chattr/setup': ['SKIP', exec_reason], 'crtime/crtime_001_pos': ['SKIP', statx_reason], 'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason], 'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense': ['FAIL', known_reason], 'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason], 'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', 5479], 'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason], 'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason], 'cli_root/zfs_share/zfs_share_concurrent_shares': ['FAIL', known_reason], 'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason], 'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason], 'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason], 'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', 6145], 'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', 6839], 'cli_root/zpool_initialize/zpool_initialize_import_export': ['FAIL', 11948], 'cli_root/zpool_labelclear/zpool_labelclear_removed': ['FAIL', known_reason], 'cli_root/zpool_trim/setup': ['SKIP', trim_reason], 'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', 6141], 'delegate/setup': ['SKIP', exec_reason], 'fallocate/fallocate_punch-hole': ['SKIP', fspacectl_reason], 'history/history_004_pos': ['FAIL', 7026], 'history/history_005_neg': ['FAIL', 6680], 'history/history_006_neg': ['FAIL', 5657], 'history/history_008_pos': ['FAIL', known_reason], 'history/history_010_pos': ['SKIP', exec_reason], 'io/mmap': ['SKIP', fio_reason], 'largest_pool/largest_pool_001_pos': ['FAIL', known_reason], 'mmp/mmp_on_uberblocks': ['FAIL', known_reason], 'pam/setup': ['SKIP', "pamtester might be not available"], 'pool_checkpoint/checkpoint_discard_busy': ['FAIL', 11946], 'projectquota/setup': ['SKIP', exec_reason], 'removal/removal_condense_export': ['FAIL', known_reason], 'renameat2/setup': ['SKIP', renameat2_reason], 'reservation/reservation_008_pos': ['FAIL', 7741], 'reservation/reservation_018_pos': ['FAIL', 5642], 'snapshot/clone_001_pos': ['FAIL', known_reason], 'snapshot/snapshot_009_pos': ['FAIL', 7961], 'snapshot/snapshot_010_pos': ['FAIL', 7961], 'snapused/snapused_004_pos': ['FAIL', 5513], 'tmpfile/setup': ['SKIP', tmpfile_reason], 'trim/setup': ['SKIP', trim_reason], 'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason], 'user_namespace/setup': ['SKIP', user_ns_reason], 'userquota/setup': ['SKIP', exec_reason], 'vdev_zaps/vdev_zaps_004_pos': ['FAIL', known_reason], 'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', 5848], } if sys.platform.startswith('freebsd'): maybe.update({ 'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason], 'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason], 'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason], 'delegate/zfs_allow_003_pos': ['FAIL', known_reason], 'inheritance/inherit_001_pos': ['FAIL', 11829], 'pool_checkpoint/checkpoint_big_rewind': ['FAIL', 12622], 'pool_checkpoint/checkpoint_indirect': ['FAIL', 12623], 'resilver/resilver_restart_001': ['FAIL', known_reason], 'snapshot/snapshot_002_pos': ['FAIL', '14831'], + 'bclone/bclone_crossfs_corner_cases': ['SKIP', cfr_cross_reason], + 'bclone/bclone_crossfs_corner_cases_limited': + ['SKIP', cfr_cross_reason], + 'bclone/bclone_crossfs_data': ['SKIP', cfr_cross_reason], + 'bclone/bclone_crossfs_embedded': ['SKIP', cfr_cross_reason], + 'bclone/bclone_crossfs_hole': ['SKIP', cfr_cross_reason], + 'bclone/bclone_diffprops_all': ['SKIP', cfr_cross_reason], + 'bclone/bclone_diffprops_checksum': ['SKIP', cfr_cross_reason], + 'bclone/bclone_diffprops_compress': ['SKIP', cfr_cross_reason], + 'bclone/bclone_diffprops_copies': ['SKIP', cfr_cross_reason], + 'bclone/bclone_diffprops_recordsize': ['SKIP', cfr_cross_reason], + 'bclone/bclone_prop_sync': ['SKIP', cfr_cross_reason], + 'block_cloning/block_cloning_cross_enc_dataset': + ['SKIP', cfr_cross_reason], + 'block_cloning/block_cloning_copyfilerange_cross_dataset': + ['SKIP', cfr_cross_reason] }) elif sys.platform.startswith('linux'): maybe.update({ 'bclone/bclone_crossfs_corner_cases': ['SKIP', cfr_cross_reason], 'bclone/bclone_crossfs_corner_cases_limited': ['SKIP', cfr_cross_reason], 'bclone/bclone_crossfs_data': ['SKIP', cfr_cross_reason], 'bclone/bclone_crossfs_embedded': ['SKIP', cfr_cross_reason], 'bclone/bclone_crossfs_hole': ['SKIP', cfr_cross_reason], 'bclone/bclone_diffprops_all': ['SKIP', cfr_cross_reason], 'bclone/bclone_diffprops_checksum': ['SKIP', cfr_cross_reason], 'bclone/bclone_diffprops_compress': ['SKIP', cfr_cross_reason], 'bclone/bclone_diffprops_copies': ['SKIP', cfr_cross_reason], 'bclone/bclone_diffprops_recordsize': ['SKIP', cfr_cross_reason], 'bclone/bclone_prop_sync': ['SKIP', cfr_cross_reason], 'bclone/bclone_samefs_corner_cases': ['SKIP', cfr_reason], 'bclone/bclone_samefs_corner_cases_limited': ['SKIP', cfr_reason], 'bclone/bclone_samefs_data': ['SKIP', cfr_reason], 'bclone/bclone_samefs_embedded': ['SKIP', cfr_reason], 'bclone/bclone_samefs_hole': ['SKIP', cfr_reason], 'block_cloning/block_cloning_clone_mmap_cached': ['SKIP', cfr_reason], 'block_cloning/block_cloning_clone_mmap_write': ['SKIP', cfr_reason], 'block_cloning/block_cloning_copyfilerange': ['SKIP', cfr_reason], 'block_cloning/block_cloning_copyfilerange_cross_dataset': ['SKIP', cfr_cross_reason], 'block_cloning/block_cloning_copyfilerange_fallback': ['SKIP', cfr_reason], 'block_cloning/block_cloning_copyfilerange_fallback_same_txg': ['SKIP', cfr_cross_reason], 'block_cloning/block_cloning_copyfilerange_partial': ['SKIP', cfr_reason], 'block_cloning/block_cloning_cross_enc_dataset': ['SKIP', cfr_cross_reason], 'block_cloning/block_cloning_disabled_copyfilerange': ['SKIP', cfr_reason], 'block_cloning/block_cloning_lwb_buffer_overflow': ['SKIP', cfr_reason], 'block_cloning/block_cloning_replay': ['SKIP', cfr_reason], 'block_cloning/block_cloning_replay_encrypted': ['SKIP', cfr_reason], 'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason], 'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason], 'cp_files/cp_files_002_pos': ['SKIP', cfr_reason], 'fault/auto_online_002_pos': ['FAIL', 11889], 'fault/auto_replace_001_pos': ['FAIL', 14851], 'fault/auto_spare_002_pos': ['FAIL', 11889], 'fault/auto_spare_multiple': ['FAIL', 11889], 'fault/auto_spare_shared': ['FAIL', 11889], 'fault/decompress_fault': ['FAIL', 11889], 'idmap_mount/idmap_mount_001': ['SKIP', idmap_reason], 'idmap_mount/idmap_mount_002': ['SKIP', idmap_reason], 'idmap_mount/idmap_mount_003': ['SKIP', idmap_reason], 'idmap_mount/idmap_mount_004': ['SKIP', idmap_reason], 'idmap_mount/idmap_mount_005': ['SKIP', idmap_reason], 'io/io_uring': ['SKIP', 'io_uring support required'], 'limits/filesystem_limit': ['SKIP', known_reason], 'limits/snapshot_limit': ['SKIP', known_reason], 'mmp/mmp_active_import': ['FAIL', known_reason], 'mmp/mmp_exported_import': ['FAIL', known_reason], 'mmp/mmp_inactive_import': ['FAIL', known_reason], 'zvol/zvol_misc/zvol_misc_fua': ['SKIP', 14872], 'zvol/zvol_misc/zvol_misc_snapdev': ['FAIL', 12621], 'zvol/zvol_misc/zvol_misc_trim': ['SKIP', 14872], 'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason], }) # Not all Github actions runners have scsi_debug module, so we may skip # some tests which use it. if os.environ.get('CI') == 'true': known.update({ 'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason], 'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason], 'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/setup': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason], 'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason], 'fault/auto_offline_001_pos': ['SKIP', ci_reason], 'fault/auto_online_001_pos': ['SKIP', ci_reason], 'fault/auto_online_002_pos': ['SKIP', ci_reason], 'fault/auto_replace_001_pos': ['SKIP', ci_reason], 'fault/auto_replace_002_pos': ['SKIP', ci_reason], 'fault/auto_spare_ashift': ['SKIP', ci_reason], 'fault/auto_spare_shared': ['SKIP', ci_reason], 'procfs/pool_state': ['SKIP', ci_reason], }) maybe.update({ 'events/events_002_pos': ['FAIL', 11546], }) def process_results(pathname): try: f = open(pathname) except IOError as e: print('Error opening file:', e) sys.exit(1) prefix = '/zfs-tests/tests/(?:functional|perf/regression)/' pattern = \ r'^Test(?:\s+\(\S+\))?:' + \ rf'\s*\S*{prefix}(\S+)' + \ r'\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]' pattern_log = r'^\s*Log directory:\s*(\S*)' d = {} logdir = 'Could not determine log directory.' for line in f.readlines(): m = re.match(pattern, line) if m and len(m.groups()) == 4: d[m.group(1)] = m.group(4) continue m = re.match(pattern_log, line) if m: logdir = m.group(1) return d, logdir class ListMaybesAction(argparse.Action): def __init__(self, option_strings, dest="SUPPRESS", default="SUPPRESS", help="list flaky tests and exit"): super(ListMaybesAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): for test in maybe: print(test) sys.exit(0) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Analyze ZTS logs') parser.add_argument('logfile') parser.add_argument('--list-maybes', action=ListMaybesAction) parser.add_argument('--no-maybes', action='store_false', dest='maybes') args = parser.parse_args() results, logdir = process_results(args.logfile) if not results: print("\n\nNo test results were found.") print("Log directory:", logdir) sys.exit(0) expected = [] unexpected = [] all_maybes = True for test in list(results.keys()): if results[test] == "PASS": continue setup = test.replace(os.path.basename(test), "setup") if results[test] == "SKIP" and test != setup: if setup in known and known[setup][0] == "SKIP": continue if setup in maybe and maybe[setup][0] == "SKIP": continue if (test in known and results[test] in known[test][0]): expected.append(test) elif test in maybe and results[test] in maybe[test][0]: if results[test] == 'SKIP' or args.maybes: expected.append(test) elif not args.maybes: unexpected.append(test) else: unexpected.append(test) all_maybes = False print("\nTests with results other than PASS that are expected:") for test in sorted(expected): issue_url = 'https://github.com/openzfs/zfs/issues/' # Include the reason why the result is expected, given the following: # 1. Suppress test results which set the "Not applicable" reason. # 2. Numerical reasons are assumed to be GitHub issue numbers. # 3. When an entire test group is skipped only report the setup reason. if test in known: if known[test][1] == na_reason: continue elif isinstance(known[test][1], int): expect = f"{issue_url}{known[test][1]}" else: expect = known[test][1] elif test in maybe: if isinstance(maybe[test][1], int): expect = f"{issue_url}{maybe[test][1]}" else: expect = maybe[test][1] elif setup in known and known[setup][0] == "SKIP" and setup != test: continue elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test: continue else: expect = "UNKNOWN REASON" print(f" {results[test]} {test} ({expect})") print("\nTests with result of PASS that are unexpected:") for test in sorted(known.keys()): # We probably should not be silently ignoring the case # where "test" is not in "results". if test not in results or results[test] != "PASS": continue print(f" {results[test]} {test} (expected {known[test][0]})") print("\nTests with results other than PASS that are unexpected:") for test in sorted(unexpected): expect = "PASS" if test not in known else known[test][0] print(f" {results[test]} {test} (expected {expect})") if len(unexpected) == 0: sys.exit(0) elif not args.maybes and all_maybes: sys.exit(2) else: sys.exit(1) diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index b4d2b91dd476..dfab48d2cdaf 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -1,3896 +1,3909 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2009, Sun Microsystems Inc. All rights reserved. # Copyright (c) 2012, 2020, Delphix. All rights reserved. # Copyright (c) 2017, Tim Chase. All rights reserved. # Copyright (c) 2017, Nexenta Systems Inc. All rights reserved. # Copyright (c) 2017, Lawrence Livermore National Security LLC. # Copyright (c) 2017, Datto Inc. All rights reserved. # Copyright (c) 2017, Open-E Inc. All rights reserved. # Copyright (c) 2021, The FreeBSD Foundation. # Use is subject to license terms. # . ${STF_SUITE}/include/tunables.cfg . ${STF_TOOLS}/include/logapi.shlib . ${STF_SUITE}/include/math.shlib . ${STF_SUITE}/include/blkdev.shlib # On AlmaLinux 9 we will see $PWD = '.' instead of the full path. This causes # some tests to fail. Fix it up here. if [ "$PWD" = "." ] ; then PWD="$(readlink -f $PWD)" fi # # Apply constrained path when available. This is required since the # PATH may have been modified by sudo's secure_path behavior. # if [ -n "$STF_PATH" ]; then export PATH="$STF_PATH" fi # # Generic dot version comparison function # # Returns success when version $1 is greater than or equal to $2. # function compare_version_gte { [ "$(printf "$1\n$2" | sort -V | tail -n1)" = "$1" ] } -# Linux kernel version comparison function -# -# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version -# -# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ] -# -function linux_version +# Helper function used by linux_version() and freebsd_version() +function kernel_version { typeset ver="$1" [ -z "$ver" ] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+") typeset version major minor _ IFS='.' read -r version major minor _ <<<"$ver" [ -z "$version" ] && version=0 [ -z "$major" ] && major=0 [ -z "$minor" ] && minor=0 echo $((version * 100000 + major * 1000 + minor)) } +# Linux kernel version comparison function +# +# $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version +# +# Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ] +function linux_version { + kernel_version "$1" +} + +# FreeBSD version comparison function +# +# $1 FreeBSD version ("13.2", "14.0") or blank for installed FreeBSD version +# +# Used for comparison: if [ $(freebsd_version) -ge $(freebsd_version "13.2") ] +function freebsd_version { + kernel_version "$1" +} + # Determine if this is a Linux test system # # Return 0 if platform Linux, 1 if otherwise function is_linux { [ "$UNAME" = "Linux" ] } # Determine if this is an illumos test system # # Return 0 if platform illumos, 1 if otherwise function is_illumos { [ "$UNAME" = "illumos" ] } # Determine if this is a FreeBSD test system # # Return 0 if platform FreeBSD, 1 if otherwise function is_freebsd { [ "$UNAME" = "FreeBSD" ] } # Determine if this is a 32-bit system # # Return 0 if platform is 32-bit, 1 if otherwise function is_32bit { [ $(getconf LONG_BIT) = "32" ] } # Determine if kmemleak is enabled # # Return 0 if kmemleak is enabled, 1 if otherwise function is_kmemleak { is_linux && [ -e /sys/kernel/debug/kmemleak ] } # Determine whether a dataset is mounted # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs # # Return 0 if dataset is mounted; 1 if unmounted; 2 on error function ismounted { typeset fstype=$2 [[ -z $fstype ]] && fstype=zfs typeset out dir name case $fstype in zfs) if [[ "$1" == "/"* ]] ; then ! zfs mount | awk -v fs="$1" '$2 == fs {exit 1}' else ! zfs mount | awk -v ds="$1" '$1 == ds {exit 1}' fi ;; ufs|nfs) if is_freebsd; then mount -pt $fstype | while read dev dir _t _flags; do [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0 done else out=$(df -F $fstype $1 2>/dev/null) || return dir=${out%%\(*} dir=${dir%% *} name=${out##*\(} name=${name%%\)*} name=${name%% *} [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0 fi ;; ext*) df -t $fstype $1 > /dev/null 2>&1 ;; zvol) if [[ -L "$ZVOL_DEVDIR/$1" ]]; then link=$(readlink -f $ZVOL_DEVDIR/$1) [[ -n "$link" ]] && \ mount | grep -q "^$link" && \ return 0 fi ;; *) false ;; esac } # Return 0 if a dataset is mounted; 1 otherwise # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs function mounted { ismounted $1 $2 } # Return 0 if a dataset is unmounted; 1 otherwise # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs function unmounted { ! ismounted $1 $2 } function default_setup { default_setup_noexit "$@" log_pass } function default_setup_no_mountpoint { default_setup_noexit "$1" "$2" "$3" "yes" log_pass } # # Given a list of disks, setup storage pools and datasets. # function default_setup_noexit { typeset disklist=$1 typeset container=$2 typeset volume=$3 typeset no_mountpoint=$4 log_note begin default_setup_noexit if is_global_zone; then if poolexists $TESTPOOL ; then destroy_pool $TESTPOOL fi [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL log_must zpool create -f $TESTPOOL $disklist else reexport_pool fi rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR log_must zfs create $TESTPOOL/$TESTFS if [[ -z $no_mountpoint ]]; then log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS fi if [[ -n $container ]]; then rm -rf $TESTDIR1 || \ log_unresolved Could not remove $TESTDIR1 mkdir -p $TESTDIR1 || \ log_unresolved Could not create $TESTDIR1 log_must zfs create $TESTPOOL/$TESTCTR log_must zfs set canmount=off $TESTPOOL/$TESTCTR log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1 if [[ -z $no_mountpoint ]]; then log_must zfs set mountpoint=$TESTDIR1 \ $TESTPOOL/$TESTCTR/$TESTFS1 fi fi if [[ -n $volume ]]; then if is_global_zone ; then log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL block_device_wait else log_must zfs create $TESTPOOL/$TESTVOL fi fi } # # Given a list of disks, setup a storage pool, file system and # a container. # function default_container_setup { typeset disklist=$1 default_setup "$disklist" "true" } # # Given a list of disks, setup a storage pool,file system # and a volume. # function default_volume_setup { typeset disklist=$1 default_setup "$disklist" "" "true" } # # Given a list of disks, setup a storage pool,file system, # a container and a volume. # function default_container_volume_setup { typeset disklist=$1 default_setup "$disklist" "true" "true" } # # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on # filesystem # # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS # $2 snapshot name. Default, $TESTSNAP # function create_snapshot { typeset fs_vol=${1:-$TESTPOOL/$TESTFS} typeset snap=${2:-$TESTSNAP} [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." [[ -z $snap ]] && log_fail "Snapshot's name is undefined." if snapexists $fs_vol@$snap; then log_fail "$fs_vol@$snap already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." log_must zfs snapshot $fs_vol@$snap } # # Create a clone from a snapshot, default clone name is $TESTCLONE. # # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default. # $2 Clone name, $TESTPOOL/$TESTCLONE is default. # function create_clone # snapshot clone { typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} typeset clone=${2:-$TESTPOOL/$TESTCLONE} [[ -z $snap ]] && \ log_fail "Snapshot name is undefined." [[ -z $clone ]] && \ log_fail "Clone name is undefined." log_must zfs clone $snap $clone } # # Create a bookmark of the given snapshot. Defaultly create a bookmark on # filesystem. # # $1 Existing filesystem or volume name. Default, $TESTFS # $2 Existing snapshot name. Default, $TESTSNAP # $3 bookmark name. Default, $TESTBKMARK # function create_bookmark { typeset fs_vol=${1:-$TESTFS} typeset snap=${2:-$TESTSNAP} typeset bkmark=${3:-$TESTBKMARK} [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." [[ -z $snap ]] && log_fail "Snapshot's name is undefined." [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined." if bkmarkexists $fs_vol#$bkmark; then log_fail "$fs_vol#$bkmark already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." snapexists $fs_vol@$snap || \ log_fail "$fs_vol@$snap must exist." log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark } # # Create a temporary clone result of an interrupted resumable 'zfs receive' # $1 Destination filesystem name. Must not exist, will be created as the result # of this function along with its %recv temporary clone # $2 Source filesystem name. Must not exist, will be created and destroyed # function create_recv_clone { typeset recvfs="$1" typeset sendfs="${2:-$TESTPOOL/create_recv_clone}" typeset snap="$sendfs@snap1" typeset incr="$sendfs@snap2" typeset mountpoint="$TESTDIR/create_recv_clone" typeset sendfile="$TESTDIR/create_recv_clone.zsnap" [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined." datasetexists $recvfs && log_fail "Recv filesystem must not exist." datasetexists $sendfs && log_fail "Send filesystem must not exist." log_must zfs create -o compression=off -o mountpoint="$mountpoint" $sendfs log_must zfs snapshot $snap log_must eval "zfs send $snap | zfs recv -u $recvfs" log_must mkfile 1m "$mountpoint/data" log_must zfs snapshot $incr log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \ iflag=fullblock > $sendfile" log_mustnot eval "zfs recv -su $recvfs < $sendfile" destroy_dataset "$sendfs" "-r" log_must rm -f "$sendfile" if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then log_fail "Error creating temporary $recvfs/%recv clone" fi } function default_mirror_setup { default_mirror_setup_noexit $1 $2 $3 log_pass } # # Given a pair of disks, set up a storage pool and dataset for the mirror # @parameters: $1 the primary side of the mirror # $2 the secondary side of the mirror # @uses: ZPOOL ZFS TESTPOOL TESTFS function default_mirror_setup_noexit { readonly func="default_mirror_setup_noexit" typeset primary=$1 typeset secondary=$2 [[ -z $primary ]] && \ log_fail "$func: No parameters passed" [[ -z $secondary ]] && \ log_fail "$func: No secondary partition passed" [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL log_must zpool create -f $TESTPOOL mirror $@ log_must zfs create $TESTPOOL/$TESTFS log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS } # # Destroy the configured testpool mirrors. # the mirrors are of the form ${TESTPOOL}{number} # @uses: ZPOOL ZFS TESTPOOL function destroy_mirrors { default_cleanup_noexit log_pass } function default_raidz_setup { default_raidz_setup_noexit "$*" log_pass } # # Given a minimum of two disks, set up a storage pool and dataset for the raid-z # $1 the list of disks # function default_raidz_setup_noexit { typeset disklist="$*" disks=(${disklist[*]}) if [[ ${#disks[*]} -lt 2 ]]; then log_fail "A raid-z requires a minimum of two disks." fi [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL log_must zpool create -f $TESTPOOL raidz $disklist log_must zfs create $TESTPOOL/$TESTFS log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS } # # Common function used to cleanup storage pools and datasets. # # Invoked at the start of the test suite to ensure the system # is in a known state, and also at the end of each set of # sub-tests to ensure errors from one set of tests doesn't # impact the execution of the next set. function default_cleanup { default_cleanup_noexit log_pass } # # Utility function used to list all available pool names. # # NOTE: $KEEP is a variable containing pool names, separated by a newline # character, that must be excluded from the returned list. # function get_all_pools { zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS" } function default_cleanup_noexit { typeset pool="" # # Destroying the pool will also destroy any # filesystems it contains. # if is_global_zone; then zfs unmount -a > /dev/null 2>&1 ALL_POOLS=$(get_all_pools) # Here, we loop through the pools we're allowed to # destroy, only destroying them if it's safe to do # so. while [ ! -z ${ALL_POOLS} ] do for pool in ${ALL_POOLS} do if safe_to_destroy_pool $pool ; then destroy_pool $pool fi done ALL_POOLS=$(get_all_pools) done zfs mount -a else typeset fs="" for fs in $(zfs list -H -o name \ | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do destroy_dataset "$fs" "-Rf" done # Need cleanup here to avoid garbage dir left. for fs in $(zfs list -H -o name); do [[ $fs == /$ZONE_POOL ]] && continue [[ -d $fs ]] && log_must rm -rf $fs/* done # # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to # the default value # for fs in $(zfs list -H -o name); do if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then log_must zfs set reservation=none $fs log_must zfs set recordsize=128K $fs log_must zfs set mountpoint=/$fs $fs typeset enc=$(get_prop encryption $fs) if [ -z "$enc" ] || [ "$enc" = "off" ]; then log_must zfs set checksum=on $fs fi log_must zfs set compression=off $fs log_must zfs set atime=on $fs log_must zfs set devices=off $fs log_must zfs set exec=on $fs log_must zfs set setuid=on $fs log_must zfs set readonly=off $fs log_must zfs set snapdir=hidden $fs log_must zfs set aclmode=groupmask $fs log_must zfs set aclinherit=secure $fs fi done fi [[ -d $TESTDIR ]] && \ log_must rm -rf $TESTDIR disk1=${DISKS%% *} if is_mpath_device $disk1; then delete_partitions fi rm -f $TEST_BASE_DIR/{err,out} } # # Common function used to cleanup storage pools, file systems # and containers. # function default_container_cleanup { if ! is_global_zone; then reexport_pool fi ismounted $TESTPOOL/$TESTCTR/$TESTFS1 && log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R" destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf" [[ -e $TESTDIR1 ]] && \ log_must rm -rf $TESTDIR1 default_cleanup } # # Common function used to cleanup snapshot of file system or volume. Default to # delete the file system's snapshot # # $1 snapshot name # function destroy_snapshot { typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} if ! snapexists $snap; then log_fail "'$snap' does not exist." fi # # For the sake of the value which come from 'get_prop' is not equal # to the really mountpoint when the snapshot is unmounted. So, firstly # check and make sure this snapshot's been mounted in current system. # typeset mtpt="" if ismounted $snap; then mtpt=$(get_prop mountpoint $snap) fi destroy_dataset "$snap" [[ $mtpt != "" && -d $mtpt ]] && \ log_must rm -rf $mtpt } # # Common function used to cleanup clone. # # $1 clone name # function destroy_clone { typeset clone=${1:-$TESTPOOL/$TESTCLONE} if ! datasetexists $clone; then log_fail "'$clone' does not existed." fi # With the same reason in destroy_snapshot typeset mtpt="" if ismounted $clone; then mtpt=$(get_prop mountpoint $clone) fi destroy_dataset "$clone" [[ $mtpt != "" && -d $mtpt ]] && \ log_must rm -rf $mtpt } # # Common function used to cleanup bookmark of file system or volume. Default # to delete the file system's bookmark. # # $1 bookmark name # function destroy_bookmark { typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK} if ! bkmarkexists $bkmark; then log_fail "'$bkmarkp' does not existed." fi destroy_dataset "$bkmark" } # Return 0 if a snapshot exists; $? otherwise # # $1 - snapshot name function snapexists { zfs list -H -t snapshot "$1" > /dev/null 2>&1 } # # Return 0 if a bookmark exists; $? otherwise # # $1 - bookmark name # function bkmarkexists { zfs list -H -t bookmark "$1" > /dev/null 2>&1 } # # Return 0 if a hold exists; $? otherwise # # $1 - hold tag # $2 - snapshot name # function holdexists { ! zfs holds "$2" | awk -v t="$1" '$2 ~ t { exit 1 }' } # # Set a property to a certain value on a dataset. # Sets a property of the dataset to the value as passed in. # @param: # $1 dataset who's property is being set # $2 property to set # $3 value to set property to # @return: # 0 if the property could be set. # non-zero otherwise. # @use: ZFS # function dataset_setprop { typeset fn=dataset_setprop if (($# < 3)); then log_note "$fn: Insufficient parameters (need 3, had $#)" return 1 fi typeset output= output=$(zfs set $2=$3 $1 2>&1) typeset rv=$? if ((rv != 0)); then log_note "Setting property on $1 failed." log_note "property $2=$3" log_note "Return Code: $rv" log_note "Output: $output" return $rv fi return 0 } # # Check a numeric assertion # @parameter: $@ the assertion to check # @output: big loud notice if assertion failed # @use: log_fail # function assert { (($@)) || log_fail "$@" } # # Function to format partition size of a disk # Given a disk cxtxdx reduces all partitions # to 0 size # function zero_partitions # { typeset diskname=$1 typeset i if is_freebsd; then gpart destroy -F $diskname elif is_linux; then DSK=$DEV_DSKDIR/$diskname DSK=$(echo $DSK | sed -e "s|//|/|g") log_must parted $DSK -s -- mklabel gpt blockdev --rereadpt $DSK 2>/dev/null block_device_wait else for i in 0 1 3 4 5 6 7 do log_must set_partition $i "" 0mb $diskname done fi return 0 } # # Given a slice, size and disk, this function # formats the slice to the specified size. # Size should be specified with units as per # the `format` command requirements eg. 100mb 3gb # # NOTE: This entire interface is problematic for the Linux parted utility # which requires the end of the partition to be specified. It would be # best to retire this interface and replace it with something more flexible. # At the moment a best effort is made. # # arguments: function set_partition { typeset -i slicenum=$1 typeset start=$2 typeset size=$3 typeset disk=${4#$DEV_DSKDIR/} disk=${disk#$DEV_RDSKDIR/} case "$UNAME" in Linux) if [[ -z $size || -z $disk ]]; then log_fail "The size or disk name is unspecified." fi disk=$DEV_DSKDIR/$disk typeset size_mb=${size%%[mMgG]} size_mb=${size_mb%%[mMgG][bB]} if [[ ${size:1:1} == 'g' ]]; then ((size_mb = size_mb * 1024)) fi # Create GPT partition table when setting slice 0 or # when the device doesn't already contain a GPT label. parted $disk -s -- print 1 >/dev/null typeset ret_val=$? if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then if ! parted $disk -s -- mklabel gpt; then log_note "Failed to create GPT partition table on $disk" return 1 fi fi # When no start is given align on the first cylinder. if [[ -z "$start" ]]; then start=1 fi # Determine the cylinder size for the device and using # that calculate the end offset in cylinders. typeset -i cly_size_kb=0 cly_size_kb=$(parted -m $disk -s -- unit cyl print | awk -F '[:k.]' 'NR == 3 {print $4}') ((end = (size_mb * 1024 / cly_size_kb) + start)) parted $disk -s -- \ mkpart part$slicenum ${start}cyl ${end}cyl typeset ret_val=$? if [[ $ret_val -ne 0 ]]; then log_note "Failed to create partition $slicenum on $disk" return 1 fi blockdev --rereadpt $disk 2>/dev/null block_device_wait $disk ;; FreeBSD) if [[ -z $size || -z $disk ]]; then log_fail "The size or disk name is unspecified." fi disk=$DEV_DSKDIR/$disk if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then gpart destroy -F $disk >/dev/null 2>&1 if ! gpart create -s GPT $disk; then log_note "Failed to create GPT partition table on $disk" return 1 fi fi typeset index=$((slicenum + 1)) if [[ -n $start ]]; then start="-b $start" fi gpart add -t freebsd-zfs $start -s $size -i $index $disk if [[ $ret_val -ne 0 ]]; then log_note "Failed to create partition $slicenum on $disk" return 1 fi block_device_wait $disk ;; *) if [[ -z $slicenum || -z $size || -z $disk ]]; then log_fail "The slice, size or disk name is unspecified." fi typeset format_file=/var/tmp/format_in.$$ echo "partition" >$format_file echo "$slicenum" >> $format_file echo "" >> $format_file echo "" >> $format_file echo "$start" >> $format_file echo "$size" >> $format_file echo "label" >> $format_file echo "" >> $format_file echo "q" >> $format_file echo "q" >> $format_file format -e -s -d $disk -f $format_file typeset ret_val=$? rm -f $format_file ;; esac if [[ $ret_val -ne 0 ]]; then log_note "Unable to format $disk slice $slicenum to $size" return 1 fi return 0 } # # Delete all partitions on all disks - this is specifically for the use of multipath # devices which currently can only be used in the test suite as raw/un-partitioned # devices (ie a zpool cannot be created on a whole mpath device that has partitions) # function delete_partitions { typeset disk if [[ -z $DISKSARRAY ]]; then DISKSARRAY=$DISKS fi if is_linux; then typeset -i part for disk in $DISKSARRAY; do for (( part = 1; part < MAX_PARTITIONS; part++ )); do typeset partition=${disk}${SLICE_PREFIX}${part} parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1 if lsblk | grep -qF ${partition}; then log_fail "Partition ${partition} not deleted" else log_note "Partition ${partition} deleted" fi done done elif is_freebsd; then for disk in $DISKSARRAY; do if gpart destroy -F $disk; then log_note "Partitions for ${disk} deleted" else log_fail "Partitions for ${disk} not deleted" fi done fi } # # Get the end cyl of the given slice # function get_endslice # { typeset disk=$1 typeset slice=$2 if [[ -z $disk || -z $slice ]] ; then log_fail "The disk name or slice number is unspecified." fi case "$UNAME" in Linux) endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \ awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}') ((endcyl = (endcyl + 1))) ;; FreeBSD) disk=${disk#/dev/zvol/} disk=${disk%p*} slice=$((slice + 1)) endcyl=$(gpart show $disk | \ awk -v slice=$slice '$3 == slice { print $1 + $2 }') ;; *) disk=${disk#/dev/dsk/} disk=${disk#/dev/rdsk/} disk=${disk%s*} typeset -i ratio=0 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \ awk '/sectors\/cylinder/ {print $2}') if ((ratio == 0)); then return fi typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 | awk -v token="$slice" '$1 == token {print $6}') ((endcyl = (endcyl + 1) / ratio)) ;; esac echo $endcyl } # # Given a size,disk and total slice number, this function formats the # disk slices from 0 to the total slice number with the same specified # size. # function partition_disk # { typeset -i i=0 typeset slice_size=$1 typeset disk_name=$2 typeset total_slices=$3 typeset cyl zero_partitions $disk_name while ((i < $total_slices)); do if ! is_linux; then if ((i == 2)); then ((i = i + 1)) continue fi fi log_must set_partition $i "$cyl" $slice_size $disk_name cyl=$(get_endslice $disk_name $i) ((i = i+1)) done } # # This function continues to write to a filenum number of files into dirnum # number of directories until either file_write returns an error or the # maximum number of files per directory have been written. # # Usage: # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data] # # Return value: 0 on success # non 0 on error # # Where : # destdir: is the directory where everything is to be created under # dirnum: the maximum number of subdirectories to use, -1 no limit # filenum: the maximum number of files per subdirectory # bytes: number of bytes to write # num_writes: number of types to write out bytes # data: the data that will be written # # E.g. # fill_fs /testdir 20 25 1024 256 0 # # Note: bytes * num_writes equals the size of the testfile # function fill_fs # destdir dirnum filenum bytes num_writes data { typeset destdir=${1:-$TESTDIR} typeset -i dirnum=${2:-50} typeset -i filenum=${3:-50} typeset -i bytes=${4:-8192} typeset -i num_writes=${5:-10240} typeset data=${6:-0} mkdir -p $destdir/{1..$dirnum} for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do file_write -o create -f $f -b $bytes -c $num_writes -d $data \ || return done } # Get the specified dataset property in parsable format or fail function get_prop # property dataset { typeset prop=$1 typeset dataset=$2 zfs get -Hpo value "$prop" "$dataset" || log_fail "zfs get $prop $dataset" } # Get the specified pool property in parsable format or fail function get_pool_prop # property pool { typeset prop=$1 typeset pool=$2 zpool get -Hpo value "$prop" "$pool" || log_fail "zpool get $prop $pool" } # Return 0 if a pool exists; $? otherwise # # $1 - pool name function poolexists { typeset pool=$1 if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi zpool get name "$pool" > /dev/null 2>&1 } # Return 0 if all the specified datasets exist; $? otherwise # # $1-n dataset name function datasetexists { if (($# == 0)); then log_note "No dataset name given." return 1 fi zfs get name "$@" > /dev/null 2>&1 } # return 0 if none of the specified datasets exists, otherwise return 1. # # $1-n dataset name function datasetnonexists { if (($# == 0)); then log_note "No dataset name given." return 1 fi while (($# > 0)); do zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \ && return 1 shift done return 0 } # FreeBSD breaks exports(5) at whitespace and doesn't process escapes # Solaris just breaks # # cf. https://github.com/openzfs/zfs/pull/13165#issuecomment-1059845807 # # Linux can have spaces (which are \OOO-escaped), # but can't have backslashes because they're parsed recursively function shares_can_have_whitespace { is_linux } function is_shared_freebsd { typeset fs=$1 pgrep -q mountd && showmount -E | grep -qx "$fs" } function is_shared_illumos { typeset fs=$1 typeset mtpt for mtpt in `share | awk '{print $2}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done typeset stat=$(svcs -H -o STA nfs/server:default) if [[ $stat != "ON" ]]; then log_note "Current nfs/server status: $stat" fi return 1 } function is_shared_linux { typeset fs=$1 ! exportfs -s | awk -v fs="${fs//\\/\\\\}" '/^\// && $1 == fs {exit 1}' } # # Given a mountpoint, or a dataset name, determine if it is shared via NFS. # # Returns 0 if shared, 1 otherwise. # function is_shared { typeset fs=$1 typeset mtpt if [[ $fs != "/"* ]] ; then if datasetnonexists "$fs" ; then return 1 else mtpt=$(get_prop mountpoint "$fs") case "$mtpt" in none|legacy|-) return 1 ;; *) fs=$mtpt ;; esac fi fi case "$UNAME" in FreeBSD) is_shared_freebsd "$fs" ;; Linux) is_shared_linux "$fs" ;; *) is_shared_illumos "$fs" ;; esac } function is_exported_illumos { typeset fs=$1 typeset mtpt _ while read -r mtpt _; do [ "$mtpt" = "$fs" ] && return done < /etc/dfs/sharetab return 1 } function is_exported_freebsd { typeset fs=$1 typeset mtpt _ while read -r mtpt _; do [ "$mtpt" = "$fs" ] && return done < /etc/zfs/exports return 1 } function is_exported_linux { typeset fs=$1 typeset mtpt _ while read -r mtpt _; do [ "$(printf "$mtpt")" = "$fs" ] && return done < /etc/exports.d/zfs.exports return 1 } # # Given a mountpoint, or a dataset name, determine if it is exported via # the os-specific NFS exports file. # # Returns 0 if exported, 1 otherwise. # function is_exported { typeset fs=$1 typeset mtpt if [[ $fs != "/"* ]] ; then if datasetnonexists "$fs" ; then return 1 else mtpt=$(get_prop mountpoint "$fs") case $mtpt in none|legacy|-) return 1 ;; *) fs=$mtpt ;; esac fi fi case "$UNAME" in FreeBSD) is_exported_freebsd "$fs" ;; Linux) is_exported_linux "$fs" ;; *) is_exported_illumos "$fs" ;; esac } # # Given a dataset name determine if it is shared via SMB. # # Returns 0 if shared, 1 otherwise. # function is_shared_smb { typeset fs=$1 datasetexists "$fs" || return if is_linux; then net usershare list | grep -xFq "${fs//[-\/]/_}" else log_note "SMB on $UNAME currently unsupported by the test framework" return 1 fi } # # Given a mountpoint, determine if it is not shared via NFS. # # Returns 0 if not shared, 1 otherwise. # function not_shared { ! is_shared $1 } # # Given a dataset determine if it is not shared via SMB. # # Returns 0 if not shared, 1 otherwise. # function not_shared_smb { ! is_shared_smb $1 } # # Helper function to unshare a mountpoint. # function unshare_fs #fs { typeset fs=$1 if is_shared $fs || is_shared_smb $fs; then log_must zfs unshare $fs fi } # # Helper function to share a NFS mountpoint. # function share_nfs #fs { typeset fs=$1 is_shared "$fs" && return case "$UNAME" in Linux) log_must exportfs "*:$fs" ;; FreeBSD) typeset mountd read -r mountd < /var/run/mountd.pid log_must eval "printf '%s\t\n' \"$fs\" >> /etc/zfs/exports" log_must kill -s HUP "$mountd" ;; *) log_must share -F nfs "$fs" ;; esac return 0 } # # Helper function to unshare a NFS mountpoint. # function unshare_nfs #fs { typeset fs=$1 ! is_shared "$fs" && return case "$UNAME" in Linux) log_must exportfs -u "*:$fs" ;; FreeBSD) typeset mountd read -r mountd < /var/run/mountd.pid awk -v fs="${fs//\\/\\\\}" '$1 != fs' /etc/zfs/exports > /etc/zfs/exports.$$ log_must mv /etc/zfs/exports.$$ /etc/zfs/exports log_must kill -s HUP "$mountd" ;; *) log_must unshare -F nfs $fs ;; esac return 0 } # # Helper function to show NFS shares. # function showshares_nfs { case "$UNAME" in Linux) exportfs -v ;; FreeBSD) showmount ;; *) share -F nfs ;; esac } function check_nfs { case "$UNAME" in Linux) exportfs -s ;; FreeBSD) showmount -e ;; *) log_unsupported "Unknown platform" ;; esac || log_unsupported "The NFS utilities are not installed" } # # Check NFS server status and trigger it online. # function setup_nfs_server { # Cannot share directory in non-global zone. # if ! is_global_zone; then log_note "Cannot trigger NFS server by sharing in LZ." return fi if is_linux; then # # Re-synchronize /var/lib/nfs/etab with /etc/exports and # /etc/exports.d./* to provide a clean test environment. # log_must exportfs -r log_note "NFS server must be started prior to running ZTS." return elif is_freebsd; then log_must kill -s HUP $(/dev/null) [ $cur_zone = "global" ] fi } # # Verify whether test is permitted to run from # global zone, local zone, or both # # $1 zone limit, could be "global", "local", or "both"(no limit) # # Return 0 if permitted, otherwise exit with log_unsupported # function verify_runnable # zone limit { typeset limit=$1 [[ -z $limit ]] && return 0 if is_global_zone ; then case $limit in global|both) ;; local) log_unsupported "Test is unable to run from "\ "global zone." ;; *) log_note "Warning: unknown limit $limit - " \ "use both." ;; esac else case $limit in local|both) ;; global) log_unsupported "Test is unable to run from "\ "local zone." ;; *) log_note "Warning: unknown limit $limit - " \ "use both." ;; esac reexport_pool fi return 0 } # Return 0 if create successfully or the pool exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - pool name # $2-n - [keyword] devs_list function create_pool #pool devs_list { typeset pool=${1%%/*} shift if [[ -z $pool ]]; then log_note "Missing pool name." return 1 fi if poolexists $pool ; then destroy_pool $pool fi if is_global_zone ; then [[ -d /$pool ]] && rm -rf /$pool log_must zpool create -f $pool $@ fi return 0 } # Return 0 if destroy successfully or the pool exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - pool name # Destroy pool with the given parameters. function destroy_pool #pool { typeset pool=${1%%/*} typeset mtpt if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi if is_global_zone ; then if poolexists "$pool" ; then mtpt=$(get_prop mountpoint "$pool") # At times, syseventd/udev activity can cause attempts # to destroy a pool to fail with EBUSY. We retry a few # times allowing failures before requiring the destroy # to succeed. log_must_busy zpool destroy -f $pool [[ -d $mtpt ]] && \ log_must rm -rf $mtpt else log_note "Pool does not exist. ($pool)" return 1 fi fi return 0 } # Return 0 if created successfully; $? otherwise # # $1 - dataset name # $2-n - dataset options function create_dataset #dataset dataset_options { typeset dataset=$1 shift if [[ -z $dataset ]]; then log_note "Missing dataset name." return 1 fi if datasetexists $dataset ; then destroy_dataset $dataset fi log_must zfs create $@ $dataset return 0 } # Return 0 if destroy successfully or the dataset exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - dataset name # $2 - custom arguments for zfs destroy # Destroy dataset with the given parameters. function destroy_dataset # dataset [args] { typeset dataset=$1 typeset mtpt typeset args=${2:-""} if [[ -z $dataset ]]; then log_note "No dataset name given." return 1 fi if is_global_zone ; then if datasetexists "$dataset" ; then mtpt=$(get_prop mountpoint "$dataset") log_must_busy zfs destroy $args $dataset [ -d $mtpt ] && log_must rm -rf $mtpt else log_note "Dataset does not exist. ($dataset)" return 1 fi fi return 0 } # # Reexport TESTPOOL & TESTPOOL(1-4) # function reexport_pool { typeset -i cntctr=5 typeset -i i=0 while ((i < cntctr)); do if ((i == 0)); then TESTPOOL=$ZONE_POOL/$ZONE_CTR$i if ! ismounted $TESTPOOL; then log_must zfs mount $TESTPOOL fi else eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i if eval ! ismounted \$TESTPOOL$i; then log_must eval zfs mount \$TESTPOOL$i fi fi ((i += 1)) done } # # Verify a given disk or pool state # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_state # pool disk state{online,offline,degraded} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 [[ -z $pool ]] || [[ -z $state ]] \ && log_fail "Arguments invalid or missing" if [[ -z $disk ]]; then #check pool state only zpool get -H -o value health $pool | grep -qi "$state" else zpool status -v $pool | grep "$disk" | grep -qi "$state" fi } # # Get the mountpoint of snapshot # For the snapshot use /.zfs/snapshot/ # as its mountpoint # function snapshot_mountpoint { typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} if [[ $dataset != *@* ]]; then log_fail "Error name of snapshot '$dataset'." fi typeset fs=${dataset%@*} typeset snap=${dataset#*@} if [[ -z $fs || -z $snap ]]; then log_fail "Error name of snapshot '$dataset'." fi echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap } # # Given a device and 'ashift' value verify it's correctly set on every label # function verify_ashift # device ashift { typeset device="$1" typeset ashift="$2" zdb -e -lll $device | awk -v ashift=$ashift ' /ashift: / { if (ashift != $2) exit 1; else count++; } END { exit (count != 4); }' } # # Given a pool and file system, this function will verify the file system # using the zdb internal tool. Note that the pool is exported and imported # to ensure it has consistent state. # function verify_filesys # pool filesystem dir { typeset pool="$1" typeset filesys="$2" typeset zdbout="/tmp/zdbout.$$" shift shift typeset dirs=$@ typeset search_path="" log_note "Calling zdb to verify filesystem '$filesys'" zfs unmount -a > /dev/null 2>&1 log_must zpool export $pool if [[ -n $dirs ]] ; then for dir in $dirs ; do search_path="$search_path -d $dir" done fi log_must zpool import $search_path $pool if ! zdb -cudi $filesys > $zdbout 2>&1; then log_note "Output: zdb -cudi $filesys" cat $zdbout rm -f $zdbout log_fail "zdb detected errors with: '$filesys'" fi log_must zfs mount -a log_must rm -rf $zdbout } # # Given a pool issue a scrub and verify that no checksum errors are reported. # function verify_pool { typeset pool=${1:-$TESTPOOL} log_must zpool scrub $pool log_must wait_scrubbed $pool typeset -i cksum=$(zpool status $pool | awk ' !NF { isvdev = 0 } isvdev { errors += $NF } /CKSUM$/ { isvdev = 1 } END { print errors } ') if [[ $cksum != 0 ]]; then log_must zpool status -v log_fail "Unexpected CKSUM errors found on $pool ($cksum)" fi } # # Given a pool, and this function list all disks in the pool # function get_disklist # pool { echo $(zpool iostat -v $1 | awk '(NR > 4) {print $1}' | \ grep -vEe '^-----' -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$") } # # Given a pool, and this function list all disks in the pool with their full # path (like "/dev/sda" instead of "sda"). # function get_disklist_fullpath # pool { get_disklist "-P $1" } # /** # This function kills a given list of processes after a time period. We use # this in the stress tests instead of STF_TIMEOUT so that we can have processes # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT # would be listed as FAIL, which we don't want : we're happy with stress tests # running for a certain amount of time, then finishing. # # @param $1 the time in seconds after which we should terminate these processes # @param $2..$n the processes we wish to terminate. # */ function stress_timeout { typeset -i TIMEOUT=$1 shift typeset cpids="$@" log_note "Waiting for child processes($cpids). " \ "It could last dozens of minutes, please be patient ..." log_must sleep $TIMEOUT log_note "Killing child processes after ${TIMEOUT} stress timeout." typeset pid for pid in $cpids; do ps -p $pid > /dev/null 2>&1 && log_must kill -USR1 $pid done } # # Verify a given hotspare disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_hotspare_state # pool disk state{inuse,avail} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk "spares") [ $state = $cur_state ] } # # Wait until a hotspare transitions to a given state or times out. # # Return 0 when pool/disk matches expected state, 1 on timeout. # function wait_hotspare_state # pool disk state timeout { typeset pool=$1 typeset disk=${2#*$DEV_DSKDIR/} typeset state=$3 typeset timeout=${4:-60} typeset -i i=0 while [[ $i -lt $timeout ]]; do if check_hotspare_state $pool $disk $state; then return 0 fi i=$((i+1)) sleep 1 done return 1 } # # Verify a given vdev disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_vdev_state # pool disk state{online,offline,unavail,removed} { typeset pool=$1 typeset disk=${2#*$DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk) [ $state = $cur_state ] } # # Wait until a vdev transitions to a given state or times out. # # Return 0 when pool/disk matches expected state, 1 on timeout. # function wait_vdev_state # pool disk state timeout { typeset pool=$1 typeset disk=${2#*$DEV_DSKDIR/} typeset state=$3 typeset timeout=${4:-60} typeset -i i=0 while [[ $i -lt $timeout ]]; do if check_vdev_state $pool $disk $state; then return 0 fi i=$((i+1)) sleep 1 done return 1 } # # Check the output of 'zpool status -v ', # and to see if the content of contain the specified. # # Return 0 is contain, 1 otherwise # function check_pool_status # pool token keyword { typeset pool=$1 typeset token=$2 typeset keyword=$3 typeset verbose=${4:-false} scan=$(zpool status -v "$pool" 2>/dev/null | awk -v token="$token:" '$1==token') if [[ $verbose == true ]]; then log_note $scan fi echo $scan | grep -qi "$keyword" } # # The following functions are instance of check_pool_status() # is_pool_resilvering - to check if the pool resilver is in progress # is_pool_resilvered - to check if the pool resilver is completed # is_pool_scrubbing - to check if the pool scrub is in progress # is_pool_scrubbed - to check if the pool scrub is completed # is_pool_scrub_stopped - to check if the pool scrub is stopped # is_pool_scrub_paused - to check if the pool scrub has paused # is_pool_removing - to check if the pool removing is a vdev # is_pool_removed - to check if the pool remove is completed # is_pool_discarding - to check if the pool checkpoint is being discarded # is_pool_replacing - to check if the pool is performing a replacement # function is_pool_resilvering #pool { check_pool_status "$1" "scan" \ "resilver[ ()0-9A-Za-z:_-]* in progress since" $2 } function is_pool_resilvered #pool { check_pool_status "$1" "scan" "resilvered " $2 } function is_pool_scrubbing #pool { check_pool_status "$1" "scan" "scrub in progress since " $2 } function is_pool_error_scrubbing #pool { check_pool_status "$1" "scrub" "error scrub in progress since " $2 return $? } function is_pool_scrubbed #pool { check_pool_status "$1" "scan" "scrub repaired" $2 } function is_pool_scrub_stopped #pool { check_pool_status "$1" "scan" "scrub canceled" $2 } function is_pool_error_scrub_stopped #pool { check_pool_status "$1" "scrub" "error scrub canceled on " $2 return $? } function is_pool_scrub_paused #pool { check_pool_status "$1" "scan" "scrub paused since " $2 } function is_pool_error_scrub_paused #pool { check_pool_status "$1" "scrub" "error scrub paused since " $2 return $? } function is_pool_removing #pool { check_pool_status "$1" "remove" "in progress since " } function is_pool_removed #pool { check_pool_status "$1" "remove" "completed on" } function is_pool_discarding #pool { check_pool_status "$1" "checkpoint" "discarding" } function is_pool_replacing #pool { zpool status "$1" | grep -qE 'replacing-[0-9]+' } function wait_for_degraded { typeset pool=$1 typeset timeout=${2:-30} typeset t0=$SECONDS while :; do [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break log_note "$pool is not yet degraded." sleep 1 if ((SECONDS - t0 > $timeout)); then log_note "$pool not degraded after $timeout seconds." return 1 fi done return 0 } # # Use create_pool()/destroy_pool() to clean up the information in # in the given disk to avoid slice overlapping. # function cleanup_devices #vdevs { typeset pool="foopool$$" for vdev in $@; do zero_partitions $vdev done poolexists $pool && destroy_pool $pool create_pool $pool $@ destroy_pool $pool return 0 } #/** # A function to find and locate free disks on a system or from given # disks as the parameter. It works by locating disks that are in use # as swap devices and dump devices, and also disks listed in /etc/vfstab # # $@ given disks to find which are free, default is all disks in # the test system # # @return a string containing the list of available disks #*/ function find_disks { # Trust provided list, no attempt is made to locate unused devices. if is_linux || is_freebsd; then echo "$@" return fi sfi=/tmp/swaplist.$$ dmpi=/tmp/dumpdev.$$ max_finddisksnum=${MAX_FINDDISKSNUM:-6} swap -l > $sfi dumpadm > $dmpi 2>/dev/null disks=${@:-$(echo "" | format -e 2>/dev/null | awk ' BEGIN { FS="."; } /^Specify disk/{ searchdisks=0; } { if (searchdisks && $2 !~ "^$"){ split($2,arr," "); print arr[1]; } } /^AVAILABLE DISK SELECTIONS:/{ searchdisks=1; } ')} unused="" for disk in $disks; do # Check for mounted grep -q "${disk}[sp]" /etc/mnttab && continue # Check for swap grep -q "${disk}[sp]" $sfi && continue # check for dump device grep -q "${disk}[sp]" $dmpi && continue # check to see if this disk hasn't been explicitly excluded # by a user-set environment variable echo "${ZFS_HOST_DEVICES_IGNORE}" | grep -q "${disk}" && continue unused_candidates="$unused_candidates $disk" done rm $sfi $dmpi # now just check to see if those disks do actually exist # by looking for a device pointing to the first slice in # each case. limit the number to max_finddisksnum count=0 for disk in $unused_candidates; do if is_disk_device $DEV_DSKDIR/${disk}s0 && \ [ $count -lt $max_finddisksnum ]; then unused="$unused $disk" # do not impose limit if $@ is provided [[ -z $@ ]] && ((count = count + 1)) fi done # finally, return our disk list echo $unused } function add_user_freebsd # { typeset group=$1 typeset user=$2 typeset basedir=$3 # Check to see if the user exists. if id $user > /dev/null 2>&1; then return 0 fi # Assign 1000 as the base uid typeset -i uid=1000 while true; do pw useradd -u $uid -g $group -d $basedir/$user -m -n $user case $? in 0) break ;; # The uid is not unique 65) ((uid += 1)) ;; *) return 1 ;; esac if [[ $uid == 65000 ]]; then log_fail "No user id available under 65000 for $user" fi done # Silence MOTD touch $basedir/$user/.hushlogin return 0 } # # Delete the specified user. # # $1 login name # function del_user_freebsd # { typeset user=$1 if id $user > /dev/null 2>&1; then log_must pw userdel $user fi return 0 } # # Select valid gid and create specified group. # # $1 group name # function add_group_freebsd # { typeset group=$1 # See if the group already exists. if pw groupshow $group >/dev/null 2>&1; then return 0 fi # Assign 1000 as the base gid typeset -i gid=1000 while true; do pw groupadd -g $gid -n $group > /dev/null 2>&1 case $? in 0) return 0 ;; # The gid is not unique 65) ((gid += 1)) ;; *) return 1 ;; esac if [[ $gid == 65000 ]]; then log_fail "No user id available under 65000 for $group" fi done } # # Delete the specified group. # # $1 group name # function del_group_freebsd # { typeset group=$1 pw groupdel -n $group > /dev/null 2>&1 case $? in # Group does not exist, or was deleted successfully. 0|6|65) return 0 ;; # Name already exists as a group name 9) log_must pw groupdel $group ;; *) return 1 ;; esac return 0 } function add_user_illumos # { typeset group=$1 typeset user=$2 typeset basedir=$3 log_must useradd -g $group -d $basedir/$user -m $user return 0 } function del_user_illumos # { typeset user=$1 if id $user > /dev/null 2>&1; then log_must_retry "currently used" 6 userdel $user fi return 0 } function add_group_illumos # { typeset group=$1 typeset -i gid=100 while true; do groupadd -g $gid $group > /dev/null 2>&1 case $? in 0) return 0 ;; # The gid is not unique 4) ((gid += 1)) ;; *) return 1 ;; esac done } function del_group_illumos # { typeset group=$1 groupmod -n $grp $grp > /dev/null 2>&1 case $? in # Group does not exist. 6) return 0 ;; # Name already exists as a group name 9) log_must groupdel $grp ;; *) return 1 ;; esac } function add_user_linux # { typeset group=$1 typeset user=$2 typeset basedir=$3 log_must useradd -g $group -d $basedir/$user -m $user # Add new users to the same group and the command line utils. # This allows them to be run out of the original users home # directory as long as it permissioned to be group readable. cmd_group=$(stat --format="%G" $(command -v zfs)) log_must usermod -a -G $cmd_group $user return 0 } function del_user_linux # { typeset user=$1 if id $user > /dev/null 2>&1; then log_must_retry "currently used" 6 userdel $user fi } function add_group_linux # { typeset group=$1 # Assign 100 as the base gid, a larger value is selected for # Linux because for many distributions 1000 and under are reserved. while true; do groupadd $group > /dev/null 2>&1 case $? in 0) return 0 ;; *) return 1 ;; esac done } function del_group_linux # { typeset group=$1 getent group $group > /dev/null 2>&1 case $? in # Group does not exist. 2) return 0 ;; # Name already exists as a group name 0) log_must groupdel $group ;; *) return 1 ;; esac return 0 } # # Add specified user to specified group # # $1 group name # $2 user name # $3 base of the homedir (optional) # function add_user # { typeset group=$1 typeset user=$2 typeset basedir=${3:-"/var/tmp"} if ((${#group} == 0 || ${#user} == 0)); then log_fail "group name or user name are not defined." fi case "$UNAME" in FreeBSD) add_user_freebsd "$group" "$user" "$basedir" ;; Linux) add_user_linux "$group" "$user" "$basedir" ;; *) add_user_illumos "$group" "$user" "$basedir" ;; esac return 0 } # # Delete the specified user. # # $1 login name # $2 base of the homedir (optional) # function del_user # { typeset user=$1 typeset basedir=${2:-"/var/tmp"} if ((${#user} == 0)); then log_fail "login name is necessary." fi case "$UNAME" in FreeBSD) del_user_freebsd "$user" ;; Linux) del_user_linux "$user" ;; *) del_user_illumos "$user" ;; esac [[ -d $basedir/$user ]] && rm -fr $basedir/$user return 0 } # # Select valid gid and create specified group. # # $1 group name # function add_group # { typeset group=$1 if ((${#group} == 0)); then log_fail "group name is necessary." fi case "$UNAME" in FreeBSD) add_group_freebsd "$group" ;; Linux) add_group_linux "$group" ;; *) add_group_illumos "$group" ;; esac return 0 } # # Delete the specified group. # # $1 group name # function del_group # { typeset group=$1 if ((${#group} == 0)); then log_fail "group name is necessary." fi case "$UNAME" in FreeBSD) del_group_freebsd "$group" ;; Linux) del_group_linux "$group" ;; *) del_group_illumos "$group" ;; esac return 0 } # # This function will return true if it's safe to destroy the pool passed # as argument 1. It checks for pools based on zvols and files, and also # files contained in a pool that may have a different mountpoint. # function safe_to_destroy_pool { # $1 the pool name typeset pool="" typeset DONT_DESTROY="" # We check that by deleting the $1 pool, we're not # going to pull the rug out from other pools. Do this # by looking at all other pools, ensuring that they # aren't built from files or zvols contained in this pool. for pool in $(zpool list -H -o name) do ALTMOUNTPOOL="" # this is a list of the top-level directories in each of the # files that make up the path to the files the pool is based on FILEPOOL=$(zpool status -v $pool | awk -v pool="/$1/" '$0 ~ pool {print $1}') # this is a list of the zvols that make up the pool ZVOLPOOL=$(zpool status -v $pool | awk -v zvols="$ZVOL_DEVDIR/$1$" '$0 ~ zvols {print $1}') # also want to determine if it's a file-based pool using an # alternate mountpoint... POOL_FILE_DIRS=$(zpool status -v $pool | \ awk '/\// {print $1}' | \ awk -F/ '!/dev/ {print $2}') for pooldir in $POOL_FILE_DIRS do OUTPUT=$(zfs list -H -r -o mountpoint $1 | \ awk -v pd="${pooldir}$" '$0 ~ pd {print $1}') ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}" done if [ ! -z "$ZVOLPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $ZVOLPOOL on $1" fi if [ ! -z "$FILEPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $FILEPOOL on $1" fi if [ ! -z "$ALTMOUNTPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $ALTMOUNTPOOL on $1" fi done if [ -z "${DONT_DESTROY}" ] then return 0 else log_note "Warning: it is not safe to destroy $1!" return 1 fi } # # Verify zfs operation with -p option work as expected # $1 operation, value could be create, clone or rename # $2 dataset type, value could be fs or vol # $3 dataset name # $4 new dataset name # function verify_opt_p_ops { typeset ops=$1 typeset datatype=$2 typeset dataset=$3 typeset newdataset=$4 if [[ $datatype != "fs" && $datatype != "vol" ]]; then log_fail "$datatype is not supported." fi # check parameters accordingly case $ops in create) newdataset=$dataset dataset="" if [[ $datatype == "vol" ]]; then ops="create -V $VOLSIZE" fi ;; clone) if [[ -z $newdataset ]]; then log_fail "newdataset should not be empty" \ "when ops is $ops." fi log_must datasetexists $dataset log_must snapexists $dataset ;; rename) if [[ -z $newdataset ]]; then log_fail "newdataset should not be empty" \ "when ops is $ops." fi log_must datasetexists $dataset ;; *) log_fail "$ops is not supported." ;; esac # make sure the upper level filesystem does not exist destroy_dataset "${newdataset%/*}" "-rRf" # without -p option, operation will fail log_mustnot zfs $ops $dataset $newdataset log_mustnot datasetexists $newdataset ${newdataset%/*} # with -p option, operation should succeed log_must zfs $ops -p $dataset $newdataset block_device_wait if ! datasetexists $newdataset ; then log_fail "-p option does not work for $ops" fi # when $ops is create or clone, redo the operation still return zero if [[ $ops != "rename" ]]; then log_must zfs $ops -p $dataset $newdataset fi return 0 } # # Get configuration of pool # $1 pool name # $2 config name # function get_config { typeset pool=$1 typeset config=$2 if ! poolexists "$pool" ; then return 1 fi if [ "$(get_pool_prop cachefile "$pool")" = "none" ]; then zdb -e $pool else zdb -C $pool fi | awk -F: -v cfg="$config:" '$0 ~ cfg {sub(/^'\''/, $2); sub(/'\''$/, $2); print $2}' } # # Privated function. Random select one of items from arguments. # # $1 count # $2-n string # function _random_get { typeset cnt=$1 shift typeset str="$@" typeset -i ind ((ind = RANDOM % cnt + 1)) echo "$str" | cut -f $ind -d ' ' } # # Random select one of item from arguments which include NONE string # function random_get_with_non { typeset -i cnt=$# ((cnt =+ 1)) _random_get "$cnt" "$@" } # # Random select one of item from arguments which doesn't include NONE string # function random_get { _random_get "$#" "$@" } # # The function will generate a dataset name with specific length # $1, the length of the name # $2, the base string to construct the name # function gen_dataset_name { typeset -i len=$1 typeset basestr="$2" typeset -i baselen=${#basestr} typeset -i iter=0 typeset l_name="" if ((len % baselen == 0)); then ((iter = len / baselen)) else ((iter = len / baselen + 1)) fi while ((iter > 0)); do l_name="${l_name}$basestr" ((iter -= 1)) done echo $l_name } # # Get cksum tuple of dataset # $1 dataset name # # sample zdb output: # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744 function datasetcksum { typeset cksum sync sync_all_pools zdb -vvv $1 | awk -F= -v ds="^Dataset $1 "'\\[' '$0 ~ ds && /cksum/ {print $7}' } # # Get the given disk/slice state from the specific field of the pool # function get_device_state #pool disk field("", "spares","logs") { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset field=${3:-$pool} zpool status -v "$pool" 2>/dev/null | \ awk -v device=$disk -v pool=$pool -v field=$field \ 'BEGIN {startconfig=0; startfield=0; } /config:/ {startconfig=1} (startconfig==1) && ($1==field) {startfield=1; next;} (startfield==1) && ($1==device) {print $2; exit;} (startfield==1) && ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}' } # # get the root filesystem name if it's zfsroot system. # # return: root filesystem name function get_rootfs { typeset rootfs="" if is_freebsd; then rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}') elif ! is_linux; then rootfs=$(awk '$2 == "/" && $3 == "zfs" {print $1}' \ /etc/mnttab) fi if [[ -z "$rootfs" ]]; then log_fail "Can not get rootfs" fi if datasetexists $rootfs; then echo $rootfs else log_fail "This is not a zfsroot system." fi } # # get the rootfs's pool name # return: # rootpool name # function get_rootpool { typeset rootfs=$(get_rootfs) echo ${rootfs%%/*} } # # To verify if the require numbers of disks is given # function verify_disk_count { typeset -i min=${2:-1} typeset -i count=$(echo "$1" | wc -w) if ((count < min)); then log_untested "A minimum of $min disks is required to run." \ " You specified $count disk(s)" fi } function ds_is_volume { typeset type=$(get_prop type $1) [ $type = "volume" ] } function ds_is_filesystem { typeset type=$(get_prop type $1) [ $type = "filesystem" ] } # # Check if Trusted Extensions are installed and enabled # function is_te_enabled { svcs -H -o state labeld 2>/dev/null | grep -q "enabled" } # Return the number of CPUs (cross-platform) function get_num_cpus { if is_linux ; then grep -c '^processor' /proc/cpuinfo elif is_freebsd; then sysctl -n kern.smp.cpus else psrinfo | wc -l fi } # Utility function to determine if a system has multiple cpus. function is_mp { [[ $(get_num_cpus) -gt 1 ]] } function get_cpu_freq { if is_linux; then lscpu | awk '/CPU MHz/ { print $3 }' elif is_freebsd; then sysctl -n hw.clockrate else psrinfo -v 0 | awk '/processor operates at/ {print $6}' fi } # Run the given command as the user provided. function user_run { typeset user=$1 shift log_note "user: $user" log_note "cmd: $*" typeset out=$TEST_BASE_DIR/out typeset err=$TEST_BASE_DIR/err sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err typeset res=$? log_note "out: $(<$out)" log_note "err: $(<$err)" return $res } # # Check if the pool contains the specified vdevs # # $1 pool # $2..n ... # # Return 0 if the vdevs are contained in the pool, 1 if any of the specified # vdevs is not in the pool, and 2 if pool name is missing. # function vdevs_in_pool { typeset pool=$1 typeset vdev if [[ -z $pool ]]; then log_note "Missing pool name." return 2 fi shift # We could use 'zpool list' to only get the vdevs of the pool but we # can't reference a mirror/raidz vdev using its ID (i.e mirror-0), # therefore we use the 'zpool status' output. typeset tmpfile=$(mktemp) zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile for vdev in "$@"; do grep -wq ${vdev##*/} $tmpfile || return 1 done rm -f $tmpfile return 0 } function get_max { typeset -l i max=$1 shift for i in "$@"; do max=$((max > i ? max : i)) done echo $max } # Write data that can be compressed into a directory function write_compressible { typeset dir=$1 typeset megs=$2 typeset nfiles=${3:-1} typeset bs=${4:-1024k} typeset fname=${5:-file} [[ -d $dir ]] || log_fail "No directory: $dir" # Under Linux fio is not currently used since its behavior can # differ significantly across versions. This includes missing # command line options and cases where the --buffer_compress_* # options fail to behave as expected. if is_linux; then typeset file_bytes=$(to_bytes $megs) typeset bs_bytes=4096 typeset blocks=$(($file_bytes / $bs_bytes)) for (( i = 0; i < $nfiles; i++ )); do truncate -s $file_bytes $dir/$fname.$i # Write every third block to get 66% compression. for (( j = 0; j < $blocks; j += 3 )); do dd if=/dev/urandom of=$dir/$fname.$i \ seek=$j bs=$bs_bytes count=1 \ conv=notrunc >/dev/null 2>&1 done done else command -v fio > /dev/null || log_unsupported "fio missing" log_must eval fio \ --name=job \ --fallocate=0 \ --minimal \ --randrepeat=0 \ --buffer_compress_percentage=66 \ --buffer_compress_chunk=4096 \ --directory="$dir" \ --numjobs="$nfiles" \ --nrfiles="$nfiles" \ --rw=write \ --bs="$bs" \ --filesize="$megs" \ "--filename_format='$fname.\$jobnum' >/dev/null" fi } function get_objnum { typeset pathname=$1 typeset objnum [[ -e $pathname ]] || log_fail "No such file or directory: $pathname" if is_freebsd; then objnum=$(stat -f "%i" $pathname) else objnum=$(stat -c %i $pathname) fi echo $objnum } # # Sync data to the pool # # $1 pool name # $2 boolean to force uberblock (and config including zpool cache file) update # function sync_pool #pool { typeset pool=${1:-$TESTPOOL} typeset force=${2:-false} if [[ $force == true ]]; then log_must zpool sync -f $pool else log_must zpool sync $pool fi return 0 } # # Sync all pools # # $1 boolean to force uberblock (and config including zpool cache file) update # function sync_all_pools # { typeset force=${1:-false} if [[ $force == true ]]; then log_must zpool sync -f else log_must zpool sync fi return 0 } # # Wait for zpool 'freeing' property drops to zero. # # $1 pool name # function wait_freeing #pool { typeset pool=${1:-$TESTPOOL} while true; do [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break log_must sleep 1 done } # # Wait for every device replace operation to complete # # $1 pool name # $2 timeout # function wait_replacing #pool timeout { typeset timeout=${2:-300} typeset pool=${1:-$TESTPOOL} for (( timer = 0; timer < $timeout; timer++ )); do is_pool_replacing $pool || break; sleep 1; done } # Wait for a pool to be scrubbed # # $1 pool name # $2 timeout # function wait_scrubbed #pool timeout { typeset timeout=${2:-300} typeset pool=${1:-$TESTPOOL} for (( timer = 0; timer < $timeout; timer++ )); do is_pool_scrubbed $pool && break; sleep 1; done } # Backup the zed.rc in our test directory so that we can edit it for our test. # # Returns: Backup file name. You will need to pass this to zed_rc_restore(). function zed_rc_backup { zedrc_backup="$(mktemp)" cp $ZEDLET_DIR/zed.rc $zedrc_backup echo $zedrc_backup } function zed_rc_restore { mv $1 $ZEDLET_DIR/zed.rc } # # Setup custom environment for the ZED. # # $@ Optional list of zedlets to run under zed. function zed_setup { if ! is_linux; then log_unsupported "No zed on $UNAME" fi if [[ ! -d $ZEDLET_DIR ]]; then log_must mkdir $ZEDLET_DIR fi if [[ ! -e $VDEVID_CONF ]]; then log_must touch $VDEVID_CONF fi if [[ -e $VDEVID_CONF_ETC ]]; then log_fail "Must not have $VDEVID_CONF_ETC file present on system" fi EXTRA_ZEDLETS=$@ # Create a symlink for /etc/zfs/vdev_id.conf file. log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC # Setup minimal ZED configuration. Individual test cases should # add additional ZEDLETs as needed for their specific test. log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR # Scripts must only be user writable. if [[ -n "$EXTRA_ZEDLETS" ]] ; then saved_umask=$(umask) log_must umask 0022 for i in $EXTRA_ZEDLETS ; do log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR done log_must umask $saved_umask fi # Customize the zed.rc file to enable the full debug log. log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc } # # Cleanup custom ZED environment. # # $@ Optional list of zedlets to remove from our test zed.d directory. function zed_cleanup { if ! is_linux; then return fi for extra_zedlet; do log_must rm -f ${ZEDLET_DIR}/$extra_zedlet done log_must rm -fd ${ZEDLET_DIR}/zed.rc ${ZEDLET_DIR}/zed-functions.sh ${ZEDLET_DIR}/all-syslog.sh ${ZEDLET_DIR}/all-debug.sh ${ZEDLET_DIR}/state \ $ZED_LOG $ZED_DEBUG_LOG $VDEVID_CONF_ETC $VDEVID_CONF \ $ZEDLET_DIR } # # Check if ZED is currently running; if so, returns PIDs # function zed_check { if ! is_linux; then return fi zedpids="$(pgrep -x zed)" zedpids2="$(pgrep -x lt-zed)" echo ${zedpids} ${zedpids2} } # # Check if ZED is currently running, if not start ZED. # function zed_start { if ! is_linux; then return fi # ZEDLET_DIR=/var/tmp/zed if [[ ! -d $ZEDLET_DIR ]]; then log_must mkdir $ZEDLET_DIR fi # Verify the ZED is not already running. zedpids=$(zed_check) if [ -n "$zedpids" ]; then # We never, ever, really want it to just keep going if zed # is already running - usually this implies our test cases # will break very strangely because whatever we wanted to # configure zed for won't be listening to our changes in the # tmpdir log_fail "ZED already running - ${zedpids}" else log_note "Starting ZED" # run ZED in the background and redirect foreground logging # output to $ZED_LOG. log_must truncate -s 0 $ZED_DEBUG_LOG log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \ "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &" fi return 0 } # # Kill ZED process # function zed_stop { if ! is_linux; then return "" fi log_note "Stopping ZED" while true; do zedpids=$(zed_check) [ ! -n "$zedpids" ] && break log_must kill $zedpids sleep 1 done return 0 } # # Drain all zevents # function zed_events_drain { while [ $(zpool events -H | wc -l) -ne 0 ]; do sleep 1 zpool events -c >/dev/null done } # Set a variable in zed.rc to something, un-commenting it in the process. # # $1 variable # $2 value function zed_rc_set { var="$1" val="$2" # Remove the line cmd="'/$var/d'" eval sed -i $cmd $ZEDLET_DIR/zed.rc # Add it at the end echo "$var=$val" >> $ZEDLET_DIR/zed.rc } # # Check is provided device is being active used as a swap device. # function is_swap_inuse { typeset device=$1 if [[ -z $device ]] ; then log_note "No device specified." return 1 fi case "$UNAME" in Linux) swapon -s | grep -wq $(readlink -f $device) ;; FreeBSD) swapctl -l | grep -wq $device ;; *) swap -l | grep -wq $device ;; esac } # # Setup a swap device using the provided device. # function swap_setup { typeset swapdev=$1 case "$UNAME" in Linux) log_must eval "mkswap $swapdev > /dev/null 2>&1" log_must swapon $swapdev ;; FreeBSD) log_must swapctl -a $swapdev ;; *) log_must swap -a $swapdev ;; esac return 0 } # # Cleanup a swap device on the provided device. # function swap_cleanup { typeset swapdev=$1 if is_swap_inuse $swapdev; then if is_linux; then log_must swapoff $swapdev elif is_freebsd; then log_must swapoff $swapdev else log_must swap -d $swapdev fi fi return 0 } # # Set a global system tunable (64-bit value) # # $1 tunable name (use a NAME defined in tunables.cfg) # $2 tunable values # function set_tunable64 { set_tunable_impl "$1" "$2" Z } # # Set a global system tunable (32-bit value) # # $1 tunable name (use a NAME defined in tunables.cfg) # $2 tunable values # function set_tunable32 { set_tunable_impl "$1" "$2" W } function set_tunable_impl { typeset name="$1" typeset value="$2" typeset mdb_cmd="$3" eval "typeset tunable=\$$name" case "$tunable" in UNSUPPORTED) log_unsupported "Tunable '$name' is unsupported on $UNAME" ;; "") log_fail "Tunable '$name' must be added to tunables.cfg" ;; *) ;; esac [[ -z "$value" ]] && return 1 [[ -z "$mdb_cmd" ]] && return 1 case "$UNAME" in Linux) typeset zfs_tunables="/sys/module/zfs/parameters" echo "$value" >"$zfs_tunables/$tunable" ;; FreeBSD) sysctl vfs.zfs.$tunable=$value ;; SunOS) echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw ;; esac } function save_tunable { [[ ! -d $TEST_BASE_DIR ]] && return 1 [[ -e $TEST_BASE_DIR/tunable-$1 ]] && return 2 echo "$(get_tunable """$1""")" > "$TEST_BASE_DIR"/tunable-"$1" } function restore_tunable { [[ ! -e $TEST_BASE_DIR/tunable-$1 ]] && return 1 val="$(cat $TEST_BASE_DIR/tunable-"""$1""")" set_tunable64 "$1" "$val" rm $TEST_BASE_DIR/tunable-$1 } # # Get a global system tunable # # $1 tunable name (use a NAME defined in tunables.cfg) # function get_tunable { get_tunable_impl "$1" } function get_tunable_impl { typeset name="$1" typeset module="${2:-zfs}" typeset check_only="$3" eval "typeset tunable=\$$name" case "$tunable" in UNSUPPORTED) if [ -z "$check_only" ] ; then log_unsupported "Tunable '$name' is unsupported on $UNAME" else return 1 fi ;; "") if [ -z "$check_only" ] ; then log_fail "Tunable '$name' must be added to tunables.cfg" else return 1 fi ;; *) ;; esac case "$UNAME" in Linux) typeset zfs_tunables="/sys/module/$module/parameters" cat $zfs_tunables/$tunable ;; FreeBSD) sysctl -n vfs.zfs.$tunable ;; SunOS) [[ "$module" -eq "zfs" ]] || return 1 ;; esac } # Does a tunable exist? # # $1: Tunable name function tunable_exists { get_tunable_impl $1 "zfs" 1 } # # Compute MD5 digest for given file or stdin if no file given. # Note: file path must not contain spaces # function md5digest { typeset file=$1 case "$UNAME" in FreeBSD) md5 -q $file ;; *) typeset sum _ read -r sum _ < <(md5sum -b $file) echo $sum ;; esac } # # Compute SHA256 digest for given file or stdin if no file given. # Note: file path must not contain spaces # function sha256digest { typeset file=$1 case "$UNAME" in FreeBSD) sha256 -q $file ;; *) typeset sum _ read -r sum _ < <(sha256sum -b $file) echo $sum ;; esac } function new_fs # { case "$UNAME" in FreeBSD) newfs "$@" ;; *) echo y | newfs -v "$@" ;; esac } function stat_size # { typeset path=$1 case "$UNAME" in FreeBSD) stat -f %z "$path" ;; *) stat -c %s "$path" ;; esac } function stat_mtime # { typeset path=$1 case "$UNAME" in FreeBSD) stat -f %m "$path" ;; *) stat -c %Y "$path" ;; esac } function stat_ctime # { typeset path=$1 case "$UNAME" in FreeBSD) stat -f %c "$path" ;; *) stat -c %Z "$path" ;; esac } function stat_crtime # { typeset path=$1 case "$UNAME" in FreeBSD) stat -f %B "$path" ;; *) stat -c %W "$path" ;; esac } function stat_generation # { typeset path=$1 case "$UNAME" in Linux) getversion "${path}" ;; *) stat -f %v "${path}" ;; esac } # Run a command as if it was being run in a TTY. # # Usage: # # faketty command # function faketty { if is_freebsd; then script -q /dev/null env "$@" else script --return --quiet -c "$*" /dev/null fi } # # Produce a random permutation of the integers in a given range (inclusive). # function range_shuffle # begin end { typeset -i begin=$1 typeset -i end=$2 seq ${begin} ${end} | sort -R } # # Cross-platform xattr helpers # function get_xattr # name path { typeset name=$1 typeset path=$2 case "$UNAME" in FreeBSD) getextattr -qq user "${name}" "${path}" ;; *) attr -qg "${name}" "${path}" ;; esac } function set_xattr # name value path { typeset name=$1 typeset value=$2 typeset path=$3 case "$UNAME" in FreeBSD) setextattr user "${name}" "${value}" "${path}" ;; *) attr -qs "${name}" -V "${value}" "${path}" ;; esac } function set_xattr_stdin # name value { typeset name=$1 typeset path=$2 case "$UNAME" in FreeBSD) setextattr -i user "${name}" "${path}" ;; *) attr -qs "${name}" "${path}" ;; esac } function rm_xattr # name path { typeset name=$1 typeset path=$2 case "$UNAME" in FreeBSD) rmextattr -q user "${name}" "${path}" ;; *) attr -qr "${name}" "${path}" ;; esac } function ls_xattr # path { typeset path=$1 case "$UNAME" in FreeBSD) lsextattr -qq user "${path}" ;; *) attr -ql "${path}" ;; esac } function kstat # stat flags? { typeset stat=$1 typeset flags=${2-"-n"} case "$UNAME" in FreeBSD) sysctl $flags kstat.zfs.misc.$stat ;; Linux) cat "/proc/spl/kstat/zfs/$stat" 2>/dev/null ;; *) false ;; esac } function get_arcstat # stat { typeset stat=$1 case "$UNAME" in FreeBSD) kstat arcstats.$stat ;; Linux) kstat arcstats | awk "/$stat/"' { print $3 }' ;; *) false ;; esac } function punch_hole # offset length file { typeset offset=$1 typeset length=$2 typeset file=$3 case "$UNAME" in FreeBSD) truncate -d -o $offset -l $length "$file" ;; Linux) fallocate --punch-hole --offset $offset --length $length "$file" ;; *) false ;; esac } function zero_range # offset length file { typeset offset=$1 typeset length=$2 typeset file=$3 case "$UNAME" in Linux) fallocate --zero-range --offset $offset --length $length "$file" ;; *) false ;; esac } # # Wait for the specified arcstat to reach non-zero quiescence. # If echo is 1 echo the value after reaching quiescence, otherwise # if echo is 0 print the arcstat we are waiting on. # function arcstat_quiescence # stat echo { typeset stat=$1 typeset echo=$2 typeset do_once=true if [[ $echo -eq 0 ]]; then echo "Waiting for arcstat $1 quiescence." fi while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do typeset stat1=$(get_arcstat $stat) sleep 0.5 typeset stat2=$(get_arcstat $stat) do_once=false done if [[ $echo -eq 1 ]]; then echo $stat2 fi } function arcstat_quiescence_noecho # stat { typeset stat=$1 arcstat_quiescence $stat 0 } function arcstat_quiescence_echo # stat { typeset stat=$1 arcstat_quiescence $stat 1 } # # Given an array of pids, wait until all processes # have completed and check their return status. # function wait_for_children #children { rv=0 children=("$@") for child in "${children[@]}" do child_exit=0 wait ${child} || child_exit=$? if [ $child_exit -ne 0 ]; then echo "child ${child} failed with ${child_exit}" rv=1 fi done return $rv } # # Compare two directory trees recursively in a manner similar to diff(1), but # using rsync. If there are any discrepancies, a summary of the differences are # output and a non-zero error is returned. # # If you're comparing a directory after a ZIL replay, you should set # LIBTEST_DIFF_ZIL_REPLAY=1 or use replay_directory_diff which will cause # directory_diff to ignore mtime changes (the ZIL replay won't fix up mtime # information). # function directory_diff # dir_a dir_b { dir_a="$1" dir_b="$2" zil_replay="${LIBTEST_DIFF_ZIL_REPLAY:-0}" # If one of the directories doesn't exist, return 2. This is to match the # semantics of diff. if ! [ -d "$dir_a" -a -d "$dir_b" ]; then return 2 fi # Run rsync with --dry-run --itemize-changes to get something akin to diff # output, but rsync is far more thorough in detecting differences (diff # doesn't compare file metadata, and cannot handle special files). # # Also make sure to filter out non-user.* xattrs when comparing. On # SELinux-enabled systems the copied tree will probably have different # SELinux labels. args=("-nicaAHX" '--filter=-x! user.*' "--delete") # NOTE: Quite a few rsync builds do not support --crtimes which would be # necessary to verify that creation times are being maintained properly. # Unfortunately because of this we cannot use it unconditionally but we can # check if this rsync build supports it and use it then. This check is # based on the same check in the rsync test suite (testsuite/crtimes.test). # # We check ctimes even with zil_replay=1 because the ZIL does store # creation times and we should make sure they match (if the creation times # do not match there is a "c" entry in one of the columns). if rsync --version | grep -q "[, ] crtimes"; then args+=("--crtimes") else log_note "This rsync package does not support --crtimes (-N)." fi # If we are testing a ZIL replay, we need to ignore timestamp changes. # Unfortunately --no-times doesn't do what we want -- it will still tell # you if the timestamps don't match but rsync will set the timestamps to # the current time (leading to an itemised change entry). It's simpler to # just filter out those lines. if [ "$zil_replay" -eq 0 ]; then filter=("cat") else # Different rsync versions have different numbers of columns. So just # require that aside from the first two, all other columns must be # blank (literal ".") or a timestamp field ("[tT]"). filter=("grep" "-v" '^\..[.Tt]\+ ') fi diff="$(rsync "${args[@]}" "$dir_a/" "$dir_b/" | "${filter[@]}")" rv=0 if [ -n "$diff" ]; then echo "$diff" rv=1 fi return $rv } # # Compare two directory trees recursively, without checking whether the mtimes # match (creation times will be checked if the available rsync binary supports # it). This is necessary for ZIL replay checks (because the ZIL does not # contain mtimes and thus after a ZIL replay, mtimes won't match). # # This is shorthand for LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff <...>. # function replay_directory_diff # dir_a dir_b { LIBTEST_DIFF_ZIL_REPLAY=1 directory_diff "$@" } # # Put coredumps into $1/core.{basename} # # Output must be saved and passed to pop_coredump_pattern on cleanup # function push_coredump_pattern # dir { ulimit -c unlimited case "$UNAME" in Linux) cat /proc/sys/kernel/core_pattern /proc/sys/kernel/core_uses_pid echo "$1/core.%e" >/proc/sys/kernel/core_pattern && echo 0 >/proc/sys/kernel/core_uses_pid ;; FreeBSD) sysctl -n kern.corefile sysctl kern.corefile="$1/core.%N" >/dev/null ;; *) # Nothing to output – set only for this shell coreadm -p "$1/core.%f" ;; esac } # # Put coredumps back into the default location # function pop_coredump_pattern { [ -s "$1" ] || return 0 case "$UNAME" in Linux) typeset pat pid { read -r pat; read -r pid; } < "$1" echo "$pat" >/proc/sys/kernel/core_pattern && echo "$pid" >/proc/sys/kernel/core_uses_pid ;; FreeBSD) sysctl kern.corefile="$(<"$1")" >/dev/null ;; esac } diff --git a/tests/zfs-tests/tests/functional/bclone/bclone_common.kshlib b/tests/zfs-tests/tests/functional/bclone/bclone_common.kshlib index beba01c0ed26..3b8eaea5bb54 100644 --- a/tests/zfs-tests/tests/functional/bclone/bclone_common.kshlib +++ b/tests/zfs-tests/tests/functional/bclone/bclone_common.kshlib @@ -1,280 +1,286 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2023 by Pawel Jakub Dawidek # . $STF_SUITE/tests/functional/bclone/bclone.cfg export RECORDSIZE=$(zfs get -Hp -o value recordsize $TESTPOOL/$TESTFS) MINBLKSIZE1=512 MINBLKSIZE2=1024 function verify_block_cloning { if is_linux && [[ $(linux_version) -lt $(linux_version "4.5") ]]; then log_unsupported "copy_file_range not available before Linux 4.5" fi } function verify_crossfs_block_cloning { if is_linux && [[ $(linux_version) -lt $(linux_version "5.3") ]]; then log_unsupported "copy_file_range can't copy cross-filesystem before Linux 5.3" fi + + # Cross dataset block cloning only supported on FreeBSD 14+ + # https://github.com/freebsd/freebsd-src/commit/969071be938c + if is_freebsd && [ $(freebsd_version) -lt $(freebsd_version 14.0) ] ; then + log_unsupported "Cloning across datasets not supported in $(uname -r)" + fi } # Unused. function size_to_dsize { typeset -r size=$1 typeset -r dir=$2 typeset -r dataset=$(df $dir | tail -1 | awk '{print $1}') typeset -r recordsize=$(get_prop recordsize $dataset) typeset -r copies=$(get_prop copies $dataset) typeset dsize if [[ $size -le $recordsize ]]; then dsize=$(( ((size - 1) / MINBLOCKSIZE + 1) * MINBLOCKSIZE )) else dsize=$(( ((size - 1) / recordsize + 1) * recordsize )) fi dsize=$((dsize*copies)) echo $dsize } function test_file_integrity { typeset -r original_checksum=$1 typeset -r clone=$2 typeset -r filesize=$3 typeset -r clone_checksum=$(sha256digest $clone) if [[ $original_checksum != $clone_checksum ]]; then log_fail "Clone $clone is corrupted with file size $filesize" fi } function verify_pool_prop_eq { typeset -r prop=$1 typeset -r expected=$2 typeset -r value=$(get_pool_prop $prop $TESTPOOL) if [[ $value != $expected ]]; then log_fail "Pool property $prop is incorrect: expected $expected, got $value" fi } function verify_pool_props { typeset -r dsize=$1 typeset -r ratio=$2 if [[ $dsize -eq 0 ]]; then verify_pool_prop_eq bcloneused 0 verify_pool_prop_eq bclonesaved 0 verify_pool_prop_eq bcloneratio 1.00 else if [[ $ratio -eq 1 ]]; then verify_pool_prop_eq bcloneused 0 else verify_pool_prop_eq bcloneused $dsize fi verify_pool_prop_eq bclonesaved $((dsize*(ratio-1))) verify_pool_prop_eq bcloneratio "${ratio}.00" fi } # Function to test file copying and integrity check. function bclone_test { typeset -r datatype=$1 typeset filesize=$2 typeset -r embedded=$3 typeset -r srcdir=$4 typeset -r dstdir=$5 typeset dsize typeset -r original="${srcdir}/original" typeset -r clone="${dstdir}/clone" log_note "Testing file copy with datatype $datatype, file size $filesize, embedded $embedded" # Create a test file with known content. case $datatype in random|text) sync_pool $TESTPOOL if [[ $datatype = "random" ]]; then dd if=/dev/urandom of=$original bs=$filesize count=1 2>/dev/null else filesize=$(((filesize/4)*4)) dd if=/dev/urandom bs=$(((filesize/4)*3)) count=1 | \ openssl base64 -A > $original fi sync_pool $TESTPOOL clonefile -f $original "${clone}-tmp" sync_pool $TESTPOOL # It is hard to predict block sizes that will be used, # so just do one clone and take it from bcloneused. filesize=$(zpool get -Hp -o value bcloneused $TESTPOOL) if [[ $embedded = "false" ]]; then log_must test $filesize -gt 0 fi rm -f "${clone}-tmp" sync_pool $TESTPOOL dsize=$filesize ;; hole) log_must truncate_test -s $filesize -f $original dsize=0 ;; *) log_fail "Unknown datatype $datatype" ;; esac if [[ $embedded = "true" ]]; then dsize=0 fi typeset -r original_checksum=$(sha256digest $original) sync_pool $TESTPOOL # Create a first clone of the entire file. clonefile -f $original "${clone}0" # Try to clone the clone in the same transaction group. clonefile -f "${clone}0" "${clone}2" # Clone the original again... clonefile -f $original "${clone}1" # ...and overwrite it in the same transaction group. clonefile -f $original "${clone}1" # Clone the clone... clonefile -f "${clone}1" "${clone}3" sync_pool $TESTPOOL # ...and overwrite in the new transaction group. clonefile -f "${clone}1" "${clone}3" sync_pool $TESTPOOL # Test removal of the pending clones (before they are committed to disk). clonefile -f $original "${clone}4" clonefile -f "${clone}4" "${clone}5" rm -f "${clone}4" "${clone}5" # Clone into one file, but remove another file, but with the same data in # the same transaction group. clonefile -f $original "${clone}5" sync_pool $TESTPOOL clonefile -f $original "${clone}4" rm -f "${clone}5" test_file_integrity $original_checksum "${clone}4" $filesize sync_pool $TESTPOOL test_file_integrity $original_checksum "${clone}4" $filesize clonefile -f "${clone}4" "${clone}5" # Verify integrity of the cloned file before it is committed to disk. test_file_integrity $original_checksum "${clone}5" $filesize sync_pool $TESTPOOL # Verify integrity in the new transaction group. test_file_integrity $original_checksum "${clone}0" $filesize test_file_integrity $original_checksum "${clone}1" $filesize test_file_integrity $original_checksum "${clone}2" $filesize test_file_integrity $original_checksum "${clone}3" $filesize test_file_integrity $original_checksum "${clone}4" $filesize test_file_integrity $original_checksum "${clone}5" $filesize verify_pool_props $dsize 7 # Clear cache and test after fresh import. log_must zpool export $TESTPOOL log_must zpool import $TESTPOOL # Cloned uncached file. clonefile -f $original "${clone}6" # Cloned uncached clone. clonefile -f "${clone}6" "${clone}7" # Cache the file. cat $original >/dev/null clonefile -f $original "${clone}8" clonefile -f "${clone}8" "${clone}9" test_file_integrity $original_checksum "${clone}6" $filesize test_file_integrity $original_checksum "${clone}7" $filesize test_file_integrity $original_checksum "${clone}8" $filesize test_file_integrity $original_checksum "${clone}9" $filesize sync_pool $TESTPOOL verify_pool_props $dsize 11 log_must zpool export $TESTPOOL log_must zpool import $TESTPOOL test_file_integrity $original_checksum "${clone}0" $filesize test_file_integrity $original_checksum "${clone}1" $filesize test_file_integrity $original_checksum "${clone}2" $filesize test_file_integrity $original_checksum "${clone}3" $filesize test_file_integrity $original_checksum "${clone}4" $filesize test_file_integrity $original_checksum "${clone}5" $filesize test_file_integrity $original_checksum "${clone}6" $filesize test_file_integrity $original_checksum "${clone}7" $filesize test_file_integrity $original_checksum "${clone}8" $filesize test_file_integrity $original_checksum "${clone}9" $filesize rm -f $original rm -f "${clone}1" "${clone}3" "${clone}5" "${clone}7" sync_pool $TESTPOOL test_file_integrity $original_checksum "${clone}0" $filesize test_file_integrity $original_checksum "${clone}2" $filesize test_file_integrity $original_checksum "${clone}4" $filesize test_file_integrity $original_checksum "${clone}6" $filesize test_file_integrity $original_checksum "${clone}8" $filesize test_file_integrity $original_checksum "${clone}9" $filesize verify_pool_props $dsize 6 rm -f "${clone}0" "${clone}2" "${clone}4" "${clone}8" "${clone}9" sync_pool $TESTPOOL test_file_integrity $original_checksum "${clone}6" $filesize verify_pool_props $dsize 1 rm -f "${clone}6" sync_pool $TESTPOOL verify_pool_props $dsize 1 } diff --git a/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh b/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh index 43323c207a62..ad83d30291ac 100755 --- a/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh +++ b/tests/zfs-tests/tests/functional/block_cloning/block_cloning_copyfilerange_cross_dataset.ksh @@ -1,65 +1,64 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2023, Klara Inc. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib +. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib verify_runnable "global" -if is_linux && [[ $(linux_version) -lt $(linux_version "5.3") ]]; then - log_unsupported "copy_file_range can't copy cross-filesystem before Linux 5.3" -fi +verify_crossfs_block_cloning claim="The copy_file_range syscall can clone across datasets." log_assert $claim function cleanup { datasetexists $TESTPOOL && destroy_pool $TESTPOOL } log_onexit cleanup log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS log_must zfs create $TESTPOOL/$TESTFS1 log_must zfs create $TESTPOOL/$TESTFS2 log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS1/file1 bs=128K count=4 log_must sync_pool $TESTPOOL log_must \ clonefile -f /$TESTPOOL/$TESTFS1/file1 /$TESTPOOL/$TESTFS2/file2 0 0 524288 log_must sync_pool $TESTPOOL log_must have_same_content /$TESTPOOL/$TESTFS1/file1 /$TESTPOOL/$TESTFS2/file2 typeset blocks=$(get_same_blocks \ $TESTPOOL/$TESTFS1 file1 $TESTPOOL/$TESTFS2 file2) log_must [ "$blocks" = "0 1 2 3" ] log_pass $claim diff --git a/tests/zfs-tests/tests/functional/block_cloning/block_cloning_cross_enc_dataset.ksh b/tests/zfs-tests/tests/functional/block_cloning/block_cloning_cross_enc_dataset.ksh index 34d3d2692555..702e23267f7e 100755 --- a/tests/zfs-tests/tests/functional/block_cloning/block_cloning_cross_enc_dataset.ksh +++ b/tests/zfs-tests/tests/functional/block_cloning/block_cloning_cross_enc_dataset.ksh @@ -1,170 +1,169 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2023, Kay Pedersen # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/block_cloning/block_cloning.kshlib +. $STF_SUITE/tests/functional/bclone/bclone_common.kshlib verify_runnable "global" -if is_linux && [[ $(linux_version) -lt $(linux_version "5.3") ]]; then - log_unsupported "copy_file_range can't copy cross-filesystem before Linux 5.3" -fi +verify_crossfs_block_cloning claim="Block cloning across encrypted datasets." log_assert $claim DS1="$TESTPOOL/encrypted1" DS2="$TESTPOOL/encrypted2" DS1_NC="$TESTPOOL/notcrypted1" PASSPHRASE="top_secret" function prepare_enc { log_must zpool create -o feature@block_cloning=enabled $TESTPOOL $DISKS log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \ "-o keyformat=passphrase -o keylocation=prompt $DS1" log_must eval "echo $PASSPHRASE | zfs create -o encryption=on" \ "-o keyformat=passphrase -o keylocation=prompt $DS2" log_must zfs create $DS1/child1 log_must zfs create $DS1/child2 log_must zfs create $DS1_NC log_note "Create test file" # we must wait until the src file txg is written to the disk otherwise we # will fallback to normal copy. See "dmu_read_l0_bps" in # "zfs/module/zfs/dmu.c" and "zfs_clone_range" in # "zfs/module/zfs/zfs_vnops.c" log_must dd if=/dev/urandom of=/$DS1/file bs=128K count=4 log_must dd if=/dev/urandom of=/$DS1/child1/file bs=128K count=4 log_must dd if=/dev/urandom of=/$DS1_NC/file bs=128K count=4 log_must sync_pool $TESTPOOL } function cleanup_enc { datasetexists $TESTPOOL && destroy_pool $TESTPOOL } function clone_and_check { I_FILE="$1" O_FILE=$2 I_DS=$3 O_DS=$4 SAME_BLOCKS=$5 # the CLONE option provides a choice between copy_file_range # which should clone and a dd which is a copy no matter what CLONE=$6 SNAPSHOT=$7 if [ ${#SNAPSHOT} -gt 0 ]; then I_FILE=".zfs/snapshot/$SNAPSHOT/$1" fi if [ $CLONE ]; then log_must clonefile -f "/$I_DS/$I_FILE" "/$O_DS/$O_FILE" 0 0 524288 else log_must dd if="/$I_DS/$I_FILE" of="/$O_DS/$O_FILE" bs=128K fi log_must sync_pool $TESTPOOL log_must have_same_content "/$I_DS/$I_FILE" "/$O_DS/$O_FILE" if [ ${#SNAPSHOT} -gt 0 ]; then I_DS="$I_DS@$SNAPSHOT" I_FILE="$1" fi typeset blocks=$(get_same_blocks \ $I_DS $I_FILE $O_DS $O_FILE $PASSPHRASE) log_must [ "$blocks" = "$SAME_BLOCKS" ] } log_onexit cleanup_enc prepare_enc log_note "Cloning entire file with copy_file_range across different enc" \ "roots, should fallback" # we are expecting no same block map. clone_and_check "file" "clone" $DS1 $DS2 "" true log_note "check if the file is still readable and the same after" \ "unmount and key unload, shouldn't fail" typeset hash1=$(md5digest "/$DS1/file") log_must zfs umount $DS1 && zfs unload-key $DS1 typeset hash2=$(md5digest "/$DS2/clone") log_must [ "$hash1" = "$hash2" ] cleanup_enc prepare_enc log_note "Cloning entire file with copy_file_range across different child datasets" # clone shouldn't work because of deriving a new master key for the child # we are expecting no same block map. clone_and_check "file" "clone" $DS1 "$DS1/child1" "" true clone_and_check "file" "clone" "$DS1/child1" "$DS1/child2" "" true cleanup_enc prepare_enc log_note "Copying entire file with copy_file_range across same snapshot" log_must zfs snapshot -r $DS1@s1 log_must sync_pool $TESTPOOL log_must rm -f "/$DS1/file" log_must sync_pool $TESTPOOL clone_and_check "file" "clone" "$DS1" "$DS1" "0 1 2 3" true "s1" cleanup_enc prepare_enc log_note "Copying entire file with copy_file_range across different snapshot" clone_and_check "file" "file" $DS1 $DS2 "" true log_must zfs snapshot -r $DS2@s1 log_must sync_pool $TESTPOOL log_must rm -f "/$DS1/file" "/$DS2/file" log_must sync_pool $TESTPOOL clone_and_check "file" "clone" "$DS2" "$DS1" "" true "s1" typeset hash1=$(md5digest "/$DS1/.zfs/snapshot/s1/file") log_note "destroy the snapshot and check if the file is still readable and" \ "has the same content" log_must zfs destroy -r $DS2@s1 log_must sync_pool $TESTPOOL typeset hash2=$(md5digest "/$DS1/file") log_must [ "$hash1" = "$hash2" ] cleanup_enc prepare_enc log_note "Copying with copy_file_range from non encrypted to encrypted" clone_and_check "file" "copy" $DS1_NC $DS1 "" true cleanup_enc prepare_enc log_note "Copying with copy_file_range from encrypted to non encrypted" clone_and_check "file" "copy" $DS1 $DS1_NC "" true log_must sync_pool $TESTPOOL log_pass $claim