diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh index edb9c9f106c2..ac28788582f9 100755 --- a/scripts/zfs-tests.sh +++ b/scripts/zfs-tests.sh @@ -1,705 +1,716 @@ #!/bin/sh # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License, Version 1.0 only # (the "License"). You may not use this file except in compliance # with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # BASE_DIR=$(dirname "$0") SCRIPT_COMMON=common.sh if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then . "${BASE_DIR}/${SCRIPT_COMMON}" else echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 fi PROG=zfs-tests.sh VERBOSE="no" QUIET="" CLEANUP="yes" CLEANUPALL="no" LOOPBACK="yes" STACK_TRACER="no" FILESIZE="4G" DEFAULT_RUNFILES="common.run,$(uname | tr '[:upper:]' '[:lower:]').run" RUNFILES=${RUNFILES:-$DEFAULT_RUNFILES} FILEDIR=${FILEDIR:-/var/tmp} DISKS=${DISKS:-""} SINGLETEST="" SINGLETESTUSER="root" TAGS="" ITERATIONS=1 ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh" ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh" UNAME=$(uname -s) # Override some defaults if on FreeBSD if [ "$UNAME" = "FreeBSD" ] ; then TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DMESG"} LOSETUP=/sbin/mdconfig DMSETUP=/sbin/gpart else ZFS_MMP="$STF_SUITE/callbacks/zfs_mmp.ksh" TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"} LOSETUP=${LOSETUP:-/sbin/losetup} DMSETUP=${DMSETUP:-/sbin/dmsetup} fi # # Log an informational message when additional verbosity is enabled. # msg() { if [ "$VERBOSE" = "yes" ]; then echo "$@" fi } # # Log a failure message, cleanup, and return an error. # fail() { echo "$PROG: $1" >&2 cleanup exit 1 } cleanup_freebsd_loopback() { for TEST_LOOPBACK in ${LOOPBACKS}; do if [ -c "/dev/${TEST_LOOPBACK}" ]; then sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}" || echo "Failed to destroy: ${TEST_LOOPBACK}" fi done } cleanup_linux_loopback() { for TEST_LOOPBACK in ${LOOPBACKS}; do LOOP_DEV=$(basename "$TEST_LOOPBACK") DM_DEV=$(sudo "${DMSETUP}" ls 2>/dev/null | \ grep "${LOOP_DEV}" | cut -f1) if [ -n "$DM_DEV" ]; then sudo "${DMSETUP}" remove "${DM_DEV}" || echo "Failed to remove: ${DM_DEV}" fi if [ -n "${TEST_LOOPBACK}" ]; then sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" || echo "Failed to remove: ${TEST_LOOPBACK}" fi done } # # Attempt to remove loopback devices and files which where created earlier # by this script to run the test framework. The '-k' option may be passed # to the script to suppress cleanup for debugging purposes. # cleanup() { if [ "$CLEANUP" = "no" ]; then return 0 fi if [ "$LOOPBACK" = "yes" ]; then if [ "$UNAME" = "FreeBSD" ] ; then cleanup_freebsd_loopback else cleanup_linux_loopback fi fi for TEST_FILE in ${FILES}; do rm -f "${TEST_FILE}" >/dev/null 2>&1 done if [ "$STF_PATH_REMOVE" = "yes" ] && [ -d "$STF_PATH" ]; then rm -Rf "$STF_PATH" fi } trap cleanup EXIT # # Attempt to remove all testpools (testpool.XXX), unopened dm devices, # loopback devices, and files. This is a useful way to cleanup a previous # test run failure which has left the system in an unknown state. This can # be dangerous and should only be used in a dedicated test environment. # cleanup_all() { TEST_POOLS=$(sudo "$ZPOOL" list -H -o name | grep testpool) if [ "$UNAME" = "FreeBSD" ] ; then TEST_LOOPBACKS=$(sudo "${LOSETUP}" -l) else TEST_LOOPBACKS=$(sudo "${LOSETUP}" -a|grep file-vdev|cut -f1 -d:) fi TEST_FILES=$(ls /var/tmp/file-vdev* 2>/dev/null) msg msg "--- Cleanup ---" msg "Removing pool(s): $(echo "${TEST_POOLS}" | tr '\n' ' ')" for TEST_POOL in $TEST_POOLS; do sudo "$ZPOOL" destroy "${TEST_POOL}" done if [ "$UNAME" != "FreeBSD" ] ; then msg "Removing dm(s): $(sudo "${DMSETUP}" ls | grep loop | tr '\n' ' ')" sudo "${DMSETUP}" remove_all fi msg "Removing loopback(s): $(echo "${TEST_LOOPBACKS}" | tr '\n' ' ')" for TEST_LOOPBACK in $TEST_LOOPBACKS; do if [ "$UNAME" = "FreeBSD" ] ; then sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}" else sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" fi done msg "Removing files(s): $(echo "${TEST_FILES}" | tr '\n' ' ')" for TEST_FILE in $TEST_FILES; do sudo rm -f "${TEST_FILE}" done } # # Takes a name as the only arguments and looks for the following variations # on that name. If one is found it is returned. # # $RUNFILE_DIR/ # $RUNFILE_DIR/.run # # .run # find_runfile() { NAME=$1 RESULT="" if [ -f "$RUNFILE_DIR/$NAME" ]; then RESULT="$RUNFILE_DIR/$NAME" elif [ -f "$RUNFILE_DIR/$NAME.run" ]; then RESULT="$RUNFILE_DIR/$NAME.run" elif [ -f "$NAME" ]; then RESULT="$NAME" elif [ -f "$NAME.run" ]; then RESULT="$NAME.run" fi echo "$RESULT" } # # Symlink file if it appears under any of the given paths. # create_links() { dir_list="$1" file_list="$2" [ -n "$STF_PATH" ] || fail "STF_PATH wasn't correctly set" for i in $file_list; do for j in $dir_list; do [ ! -e "$STF_PATH/$i" ] || continue if [ ! -d "$j/$i" ] && [ -e "$j/$i" ]; then ln -sf "$j/$i" "$STF_PATH/$i" || \ fail "Couldn't link $i" break fi done [ ! -e "$STF_PATH/$i" ] && \ STF_MISSING_BIN="$STF_MISSING_BIN $i" done STF_MISSING_BIN=${STF_MISSING_BIN# } } # # Constrain the path to limit the available binaries to a known set. # When running in-tree a top level ./bin/ directory is created for # convenience, otherwise a temporary directory is used. # constrain_path() { . "$STF_SUITE/include/commands.cfg" # On FreeBSD, base system zfs utils are in /sbin and OpenZFS utils # install to /usr/local/sbin. To avoid testing the wrong utils we # need /usr/local to come before / in the path search order. SYSTEM_DIRS="/usr/local/bin /usr/local/sbin" SYSTEM_DIRS="$SYSTEM_DIRS /usr/bin /usr/sbin /bin /sbin $LIBEXEC_DIR" if [ "$INTREE" = "yes" ]; then # Constrained path set to ./zfs/bin/ STF_PATH="$BIN_DIR" STF_PATH_REMOVE="no" STF_MISSING_BIN="" if [ ! -d "$STF_PATH" ]; then mkdir "$STF_PATH" chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH" fi # Special case links for standard zfs utilities DIRS="$(find "$CMD_DIR" -type d \( ! -name .deps -a \ ! -name .libs \) -print | tr '\n' ' ')" create_links "$DIRS" "$ZFS_FILES" # Special case links for zfs test suite utilities DIRS="$(find "$STF_SUITE" -type d \( ! -name .deps -a \ ! -name .libs \) -print | tr '\n' ' ')" create_links "$DIRS" "$ZFSTEST_FILES" else # Constrained path set to /var/tmp/constrained_path.* SYSTEMDIR=${SYSTEMDIR:-/var/tmp/constrained_path.XXXXXX} STF_PATH=$(mktemp -d "$SYSTEMDIR") STF_PATH_REMOVE="yes" STF_MISSING_BIN="" chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH" # Special case links for standard zfs utilities create_links "$SYSTEM_DIRS" "$ZFS_FILES" # Special case links for zfs test suite utilities create_links "$STF_SUITE/bin" "$ZFSTEST_FILES" fi # Standard system utilities SYSTEM_FILES="$SYSTEM_FILES_COMMON" if [ "$UNAME" = "FreeBSD" ] ; then SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_FREEBSD" else SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_LINUX" fi create_links "$SYSTEM_DIRS" "$SYSTEM_FILES" # Exceptions ln -fs "$STF_PATH/awk" "$STF_PATH/nawk" if [ "$UNAME" = "Linux" ] ; then ln -fs /sbin/fsck.ext4 "$STF_PATH/fsck" ln -fs /sbin/mkfs.ext4 "$STF_PATH/newfs" ln -fs "$STF_PATH/gzip" "$STF_PATH/compress" ln -fs "$STF_PATH/gunzip" "$STF_PATH/uncompress" ln -fs "$STF_PATH/exportfs" "$STF_PATH/share" ln -fs "$STF_PATH/exportfs" "$STF_PATH/unshare" elif [ "$UNAME" = "FreeBSD" ] ; then ln -fs /usr/local/bin/ksh93 "$STF_PATH/ksh" fi } # # Output a useful usage message. # usage() { cat << EOF USAGE: $0 [-hvqxkfS] [-s SIZE] [-r RUNFILES] [-t PATH] [-u USER] DESCRIPTION: ZFS Test Suite launch script OPTIONS: -h Show this message -v Verbose zfs-tests.sh output -q Quiet test-runner output -x Remove all testpools, dm, lo, and files (unsafe) -k Disable cleanup after test failure -f Use files only, disables block device tests -S Enable stack tracer (negative performance impact) -c Only create and populate constrained path -n NFSFILE Use the nfsfile to determine the NFS configuration -I NUM Number of iterations -d DIR Use DIR for files and loopback devices -s SIZE Use vdevs of SIZE (default: 4G) -r RUNFILES Run tests in RUNFILES (default: ${DEFAULT_RUNFILES}) -t PATH Run single test at PATH relative to test suite -T TAGS Comma separated list of tags (default: 'functional') -u USER Run single test as USER (default: root) EXAMPLES: # Run the default (linux) suite of tests and output the configuration used. $0 -v # Run a smaller suite of tests designed to run more quickly. $0 -r linux-fast # Run a single test $0 -t tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh # Cleanup a previous run of the test suite prior to testing, run the # default (linux) suite of tests and perform no cleanup on exit. $0 -x EOF } while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do case $OPTION in h) usage exit 1 ;; v) VERBOSE="yes" ;; q) QUIET="yes" ;; x) CLEANUPALL="yes" ;; k) CLEANUP="no" ;; f) LOOPBACK="no" ;; S) STACK_TRACER="yes" ;; c) constrain_path exit ;; n) nfsfile=$OPTARG [ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile" export NFS=1 . "$nfsfile" ;; d) FILEDIR="$OPTARG" ;; I) ITERATIONS="$OPTARG" if [ "$ITERATIONS" -le 0 ]; then fail "Iterations must be greater than 0." fi ;; s) FILESIZE="$OPTARG" ;; r) RUNFILES="$OPTARG" ;; t) if [ -n "$SINGLETEST" ]; then fail "-t can only be provided once." fi SINGLETEST="$OPTARG" ;; T) TAGS="$OPTARG" ;; u) SINGLETESTUSER="$OPTARG" ;; ?) usage exit ;; esac done shift $((OPTIND-1)) FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"} LOOPBACKS=${LOOPBACKS:-""} if [ -n "$SINGLETEST" ]; then if [ -n "$TAGS" ]; then fail "-t and -T are mutually exclusive." fi RUNFILE_DIR="/var/tmp" RUNFILES="zfs-tests.$$.run" SINGLEQUIET="False" if [ -n "$QUIET" ]; then SINGLEQUIET="True" fi cat >$RUNFILE_DIR/$RUNFILES << EOF [DEFAULT] pre = quiet = $SINGLEQUIET pre_user = root user = $SINGLETESTUSER timeout = 600 post_user = root post = outputdir = /var/tmp/test_results EOF SINGLETESTDIR=$(dirname "$SINGLETEST") SINGLETESTFILE=$(basename "$SINGLETEST") SETUPSCRIPT= CLEANUPSCRIPT= if [ -f "$STF_SUITE/$SINGLETESTDIR/setup.ksh" ]; then SETUPSCRIPT="setup" fi if [ -f "$STF_SUITE/$SINGLETESTDIR/cleanup.ksh" ]; then CLEANUPSCRIPT="cleanup" fi cat >>$RUNFILE_DIR/$RUNFILES << EOF [$SINGLETESTDIR] tests = ['$SINGLETESTFILE'] pre = $SETUPSCRIPT post = $CLEANUPSCRIPT tags = ['functional'] EOF fi # # Use default tag if none was specified # TAGS=${TAGS:='functional'} # # Attempt to locate the runfiles describing the test workload. # R="" IFS=, for RUNFILE in $RUNFILES; do if [ -n "$RUNFILE" ]; then SAVED_RUNFILE="$RUNFILE" RUNFILE=$(find_runfile "$RUNFILE") [ -z "$RUNFILE" ] && fail "Cannot find runfile: $SAVED_RUNFILE" R="$R,$RUNFILE" fi if [ ! -r "$RUNFILE" ]; then fail "Cannot read runfile: $RUNFILE" fi done unset IFS RUNFILES=${R#,} # # This script should not be run as root. Instead the test user, which may # be a normal user account, needs to be configured such that it can # run commands via sudo passwordlessly. # if [ "$(id -u)" = "0" ]; then fail "This script must not be run as root." fi if [ "$(sudo whoami)" != "root" ]; then fail "Passwordless sudo access required." fi # # Constrain the available binaries to a known set. # constrain_path # # Check if ksh exists # if [ "$UNAME" = "FreeBSD" ]; then sudo ln -fs /usr/local/bin/ksh93 /bin/ksh fi [ -e "$STF_PATH/ksh" ] || fail "This test suite requires ksh." [ -e "$STF_SUITE/include/default.cfg" ] || fail \ "Missing $STF_SUITE/include/default.cfg file." # # Verify the ZFS module stack is loaded. # if [ "$STACK_TRACER" = "yes" ]; then sudo "${ZFS_SH}" -S >/dev/null 2>&1 else sudo "${ZFS_SH}" >/dev/null 2>&1 fi # # Attempt to cleanup all previous state for a new test run. # if [ "$CLEANUPALL" = "yes" ]; then cleanup_all fi # # By default preserve any existing pools # NOTE: Since 'zpool list' outputs a newline-delimited list convert $KEEP from # space-delimited to newline-delimited. # if [ -z "${KEEP}" ]; then KEEP="$(sudo "$ZPOOL" list -H -o name)" if [ -z "${KEEP}" ]; then KEEP="rpool" fi else KEEP="$(echo "$KEEP" | tr '[:blank:]' '\n')" fi # # NOTE: The following environment variables are undocumented # and should be used for testing purposes only: # # __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists # __ZFS_POOL_RESTRICT - iterate only over the pools it lists # # See libzfs/libzfs_config.c for more information. # if [ "$UNAME" = "FreeBSD" ] ; then __ZFS_POOL_EXCLUDE="$(echo "$KEEP" | tr -s '\n' ' ')" else __ZFS_POOL_EXCLUDE="$(echo "$KEEP" | sed ':a;N;s/\n/ /g;ba')" fi . "$STF_SUITE/include/default.cfg" -msg -msg "--- Configuration ---" -msg "Runfiles: $RUNFILES" -msg "STF_TOOLS: $STF_TOOLS" -msg "STF_SUITE: $STF_SUITE" -msg "STF_PATH: $STF_PATH" - # # No DISKS have been provided so a basic file or loopback based devices # must be created for the test suite to use. # if [ -z "${DISKS}" ]; then + # + # If this is a performance run, prevent accidental use of + # loopback devices. + # + [ "$TAGS" = "perf" ] && fail "Running perf tests without disks." + # # Create sparse files for the test suite. These may be used # directory or have loopback devices layered on them. # for TEST_FILE in ${FILES}; do [ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}" truncate -s "${FILESIZE}" "${TEST_FILE}" || fail "Failed creating: ${TEST_FILE} ($?)" done # # If requested setup loopback devices backed by the sparse files. # if [ "$LOOPBACK" = "yes" ]; then test -x "$LOSETUP" || fail "$LOSETUP utility must be installed" for TEST_FILE in ${FILES}; do if [ "$UNAME" = "FreeBSD" ] ; then MDDEVICE=$(sudo "${LOSETUP}" -a -t vnode -f "${TEST_FILE}") if [ -z "$MDDEVICE" ] ; then fail "Failed: ${TEST_FILE} -> loopback" fi DISKS="$DISKS $MDDEVICE" LOOPBACKS="$LOOPBACKS $MDDEVICE" else TEST_LOOPBACK=$(sudo "${LOSETUP}" -f) sudo "${LOSETUP}" "${TEST_LOOPBACK}" "${TEST_FILE}" || fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}" BASELOOPBACK=$(basename "$TEST_LOOPBACK") DISKS="$DISKS $BASELOOPBACK" LOOPBACKS="$LOOPBACKS $TEST_LOOPBACK" fi done DISKS=${DISKS# } LOOPBACKS=${LOOPBACKS# } else DISKS="$FILES" fi fi +# +# It may be desirable to test with fewer disks than the default when running +# the performance tests, but the functional tests require at least three. +# NUM_DISKS=$(echo "${DISKS}" | awk '{print NF}') -[ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)" +if [ "$TAGS" != "perf" ]; then + [ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)" +fi # # Disable SELinux until the ZFS Test Suite has been updated accordingly. # if [ -x "$STF_PATH/setenforce" ]; then sudo setenforce permissive >/dev/null 2>&1 fi # # Enable internal ZFS debug log and clear it. # if [ -e /sys/module/zfs/parameters/zfs_dbgmsg_enable ]; then sudo /bin/sh -c "echo 1 >/sys/module/zfs/parameters/zfs_dbgmsg_enable" sudo /bin/sh -c "echo 0 >/proc/spl/kstat/zfs/dbgmsg" fi +msg +msg "--- Configuration ---" +msg "Runfiles: $RUNFILES" +msg "STF_TOOLS: $STF_TOOLS" +msg "STF_SUITE: $STF_SUITE" +msg "STF_PATH: $STF_PATH" msg "FILEDIR: $FILEDIR" msg "FILES: $FILES" msg "LOOPBACKS: $LOOPBACKS" msg "DISKS: $DISKS" msg "NUM_DISKS: $NUM_DISKS" msg "FILESIZE: $FILESIZE" msg "ITERATIONS: $ITERATIONS" msg "TAGS: $TAGS" msg "STACK_TRACER: $STACK_TRACER" msg "Keep pool(s): $KEEP" msg "Missing util(s): $STF_MISSING_BIN" msg "" export STF_TOOLS export STF_SUITE export STF_PATH export DISKS export FILEDIR export KEEP export __ZFS_POOL_EXCLUDE export TESTFAIL_CALLBACKS export PATH=$STF_PATH if [ "$UNAME" = "FreeBSD" ] ; then mkdir -p "$FILEDIR" || true RESULTS_FILE=$(mktemp -u "${FILEDIR}/zts-results.XXXXXX") REPORT_FILE=$(mktemp -u "${FILEDIR}/zts-report.XXXXXX") else RESULTS_FILE=$(mktemp -u -t zts-results.XXXXXX -p "$FILEDIR") REPORT_FILE=$(mktemp -u -t zts-report.XXXXXX -p "$FILEDIR") fi # # Run all the tests as specified. # msg "${TEST_RUNNER} ${QUIET:+-q}" \ "-c \"${RUNFILES}\"" \ "-T \"${TAGS}\"" \ "-i \"${STF_SUITE}\"" \ "-I \"${ITERATIONS}\"" ${TEST_RUNNER} ${QUIET:+-q} \ -c "${RUNFILES}" \ -T "${TAGS}" \ -i "${STF_SUITE}" \ -I "${ITERATIONS}" \ 2>&1 | tee "$RESULTS_FILE" # # Analyze the results. # ${ZTS_REPORT} "$RESULTS_FILE" >"$REPORT_FILE" RESULT=$? cat "$REPORT_FILE" RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE") if [ -d "$RESULTS_DIR" ]; then cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results" fi rm -f "$RESULTS_FILE" "$REPORT_FILE" if [ -n "$SINGLETEST" ]; then rm -f "$RUNFILES" >/dev/null 2>&1 fi exit ${RESULT} diff --git a/tests/zfs-tests/include/tunables.cfg b/tests/zfs-tests/include/tunables.cfg index a1b75a48292f..56d430a39875 100644 --- a/tests/zfs-tests/include/tunables.cfg +++ b/tests/zfs-tests/include/tunables.cfg @@ -1,95 +1,95 @@ # This file exports variables for each tunable used in the test suite. # # Different platforms use different names for most tunables. To avoid littering # the tests with conditional logic for deciding how to set each tunable, the # logic is instead consolidated to this one file. # # Any use of tunables in tests must use a name defined here. New entries # should be added to the table as needed. Please keep the table sorted # alphabetically for ease of maintenance. # # Platform-specific tunables should still use a NAME from this table for # consistency. Enter UNSUPPORTED in the column for platforms on which the # tunable is not implemented. UNAME=$(uname) # NAME FreeBSD tunable Linux tunable cat <<%%%% | ADMIN_SNAPSHOT UNSUPPORTED zfs_admin_snapshot ALLOW_REDACTED_DATASET_MOUNT allow_redacted_dataset_mount zfs_allow_redacted_dataset_mount ARC_MAX arc.max zfs_arc_max ARC_MIN arc.min zfs_arc_min ASYNC_BLOCK_MAX_BLOCKS async_block_max_blocks zfs_async_block_max_blocks CHECKSUM_EVENTS_PER_SECOND checksum_events_per_second zfs_checksum_events_per_second COMMIT_TIMEOUT_PCT commit_timeout_pct zfs_commit_timeout_pct COMPRESSED_ARC_ENABLED compressed_arc_enabled zfs_compressed_arc_enabled CONDENSE_INDIRECT_COMMIT_ENTRY_DELAY_MS condense.indirect_commit_entry_delay_ms zfs_condense_indirect_commit_entry_delay_ms CONDENSE_INDIRECT_OBSOLETE_PCT condense.indirect_obsolete_pct zfs_condense_indirect_obsolete_pct CONDENSE_MIN_MAPPING_BYTES condense.min_mapping_bytes zfs_condense_min_mapping_bytes -DBUF_CACHE_MAX_BYTES dbuf_cache.max_bytes dbuf_cache_max_bytes +DBUF_CACHE_SHIFT dbuf.cache_shift dbuf_cache_shift DEADMAN_CHECKTIME_MS deadman.checktime_ms zfs_deadman_checktime_ms DEADMAN_FAILMODE deadman.failmode zfs_deadman_failmode DEADMAN_SYNCTIME_MS deadman.synctime_ms zfs_deadman_synctime_ms DEADMAN_ZIOTIME_MS deadman.ziotime_ms zfs_deadman_ziotime_ms DISABLE_IVSET_GUID_CHECK disable_ivset_guid_check zfs_disable_ivset_guid_check INITIALIZE_CHUNK_SIZE initialize_chunk_size zfs_initialize_chunk_size INITIALIZE_VALUE initialize_value zfs_initialize_value KEEP_LOG_SPACEMAPS_AT_EXPORT keep_log_spacemaps_at_export zfs_keep_log_spacemaps_at_export LUA_MAX_MEMLIMIT lua.max_memlimit zfs_lua_max_memlimit L2ARC_MFUONLY l2arc.mfuonly l2arc_mfuonly L2ARC_NOPREFETCH l2arc.noprefetch l2arc_noprefetch L2ARC_REBUILD_BLOCKS_MIN_L2SIZE l2arc.rebuild_blocks_min_l2size l2arc_rebuild_blocks_min_l2size L2ARC_REBUILD_ENABLED l2arc.rebuild_enabled l2arc_rebuild_enabled L2ARC_TRIM_AHEAD l2arc.trim_ahead l2arc_trim_ahead L2ARC_WRITE_BOOST l2arc.write_boost l2arc_write_boost L2ARC_WRITE_MAX l2arc.write_max l2arc_write_max LIVELIST_CONDENSE_NEW_ALLOC livelist.condense.new_alloc zfs_livelist_condense_new_alloc LIVELIST_CONDENSE_SYNC_CANCEL livelist.condense.sync_cancel zfs_livelist_condense_sync_cancel LIVELIST_CONDENSE_SYNC_PAUSE livelist.condense.sync_pause zfs_livelist_condense_sync_pause LIVELIST_CONDENSE_ZTHR_CANCEL livelist.condense.zthr_cancel zfs_livelist_condense_zthr_cancel LIVELIST_CONDENSE_ZTHR_PAUSE livelist.condense.zthr_pause zfs_livelist_condense_zthr_pause LIVELIST_MAX_ENTRIES livelist.max_entries zfs_livelist_max_entries LIVELIST_MIN_PERCENT_SHARED livelist.min_percent_shared zfs_livelist_min_percent_shared MAX_DATASET_NESTING max_dataset_nesting zfs_max_dataset_nesting MAX_MISSING_TVDS max_missing_tvds zfs_max_missing_tvds METASLAB_DEBUG_LOAD metaslab.debug_load metaslab_debug_load METASLAB_FORCE_GANGING metaslab.force_ganging metaslab_force_ganging MULTIHOST_FAIL_INTERVALS multihost.fail_intervals zfs_multihost_fail_intervals MULTIHOST_HISTORY multihost.history zfs_multihost_history MULTIHOST_IMPORT_INTERVALS multihost.import_intervals zfs_multihost_import_intervals MULTIHOST_INTERVAL multihost.interval zfs_multihost_interval OVERRIDE_ESTIMATE_RECORDSIZE send.override_estimate_recordsize zfs_override_estimate_recordsize PREFETCH_DISABLE prefetch.disable zfs_prefetch_disable REBUILD_SCRUB_ENABLED rebuild_scrub_enabled zfs_rebuild_scrub_enabled REMOVAL_SUSPEND_PROGRESS removal_suspend_progress zfs_removal_suspend_progress REMOVE_MAX_SEGMENT remove_max_segment zfs_remove_max_segment RESILVER_MIN_TIME_MS resilver_min_time_ms zfs_resilver_min_time_ms SCAN_LEGACY scan_legacy zfs_scan_legacy SCAN_SUSPEND_PROGRESS scan_suspend_progress zfs_scan_suspend_progress SCAN_VDEV_LIMIT scan_vdev_limit zfs_scan_vdev_limit SEND_HOLES_WITHOUT_BIRTH_TIME send_holes_without_birth_time send_holes_without_birth_time SLOW_IO_EVENTS_PER_SECOND slow_io_events_per_second zfs_slow_io_events_per_second SPA_ASIZE_INFLATION spa.asize_inflation spa_asize_inflation SPA_DISCARD_MEMORY_LIMIT spa.discard_memory_limit zfs_spa_discard_memory_limit SPA_LOAD_VERIFY_DATA spa.load_verify_data spa_load_verify_data SPA_LOAD_VERIFY_METADATA spa.load_verify_metadata spa_load_verify_metadata TRIM_EXTENT_BYTES_MIN trim.extent_bytes_min zfs_trim_extent_bytes_min TRIM_METASLAB_SKIP trim.metaslab_skip zfs_trim_metaslab_skip TRIM_TXG_BATCH trim.txg_batch zfs_trim_txg_batch TXG_HISTORY txg.history zfs_txg_history TXG_TIMEOUT txg.timeout zfs_txg_timeout UNLINK_SUSPEND_PROGRESS UNSUPPORTED zfs_unlink_suspend_progress VDEV_FILE_PHYSICAL_ASHIFT vdev.file.physical_ashift vdev_file_physical_ashift VDEV_MIN_MS_COUNT vdev.min_ms_count zfs_vdev_min_ms_count VDEV_VALIDATE_SKIP vdev.validate_skip vdev_validate_skip VOL_INHIBIT_DEV UNSUPPORTED zvol_inhibit_dev VOL_MODE vol.mode zvol_volmode VOL_RECURSIVE vol.recursive UNSUPPORTED ZEVENT_LEN_MAX zevent.len_max zfs_zevent_len_max ZEVENT_RETAIN_MAX zevent.retain_max zfs_zevent_retain_max ZIO_SLOW_IO_MS zio.slow_io_ms zio_slow_io_ms %%%% while read name FreeBSD Linux; do eval "export ${name}=\$${UNAME}" done diff --git a/tests/zfs-tests/tests/perf/perf.shlib b/tests/zfs-tests/tests/perf/perf.shlib index 6addd46610c2..6f4fdc94348f 100644 --- a/tests/zfs-tests/tests/perf/perf.shlib +++ b/tests/zfs-tests/tests/perf/perf.shlib @@ -1,575 +1,602 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2016 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # Copyright (c) 2016, Intel Corporation. # . $STF_SUITE/include/libtest.shlib -# If neither is specified, do a nightly run. -[[ -z $PERF_REGRESSION_WEEKLY ]] && export PERF_REGRESSION_NIGHTLY=1 - -# Default runtime for each type of test run. -export PERF_RUNTIME_WEEKLY=$((30 * 60)) -export PERF_RUNTIME_NIGHTLY=$((10 * 60)) +# Defaults common to all the tests in the regression group +export PERF_RUNTIME=${PERF_RUNTIME:-'180'} +export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} +export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} +export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} +export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} # Default to JSON for fio output export PERF_FIO_FORMAT=${PERF_FIO_FORMAT:-'json'} # Default fs creation options export PERF_FS_OPTS=${PERF_FS_OPTS:-'-o recsize=8k -o compress=lz4' \ ' -o checksum=sha256 -o redundant_metadata=most'} function get_sync_str { typeset sync=$1 typeset sync_str='' [[ $sync -eq 0 ]] && sync_str='async' [[ $sync -eq 1 ]] && sync_str='sync' echo $sync_str } function get_suffix { typeset threads=$1 typeset sync=$2 typeset iosize=$3 typeset sync_str=$(get_sync_str $sync) typeset filesystems=$(get_nfilesystems) typeset suffix="$sync_str.$iosize-ios" suffix="$suffix.$threads-threads.$filesystems-filesystems" echo $suffix } function do_fio_run_impl { typeset script=$1 typeset do_recreate=$2 typeset clear_cache=$3 typeset threads=$4 typeset threads_per_fs=$5 typeset sync=$6 typeset iosize=$7 typeset sync_str=$(get_sync_str $sync) log_note "Running with $threads $sync_str threads, $iosize ios" if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then log_must test $do_recreate verify_threads_per_fs $threads $threads_per_fs fi if $do_recreate; then recreate_perf_pool # # A value of zero for "threads_per_fs" is "special", and # means a single filesystem should be used, regardless # of the number of threads. # if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then populate_perf_filesystems $((threads / threads_per_fs)) else populate_perf_filesystems 1 fi fi if $clear_cache; then # Clear the ARC - zpool export $PERFPOOL - zpool import $PERFPOOL + log_must zinject -a fi if [[ -n $ZINJECT_DELAYS ]]; then apply_zinject_delays else log_note "No per-device commands to execute." fi # # Allow this to be overridden by the individual test case. This # can be used to run the FIO job against something other than # the default filesystem (e.g. against a clone). # export DIRECTORY=$(get_directory) log_note "DIRECTORY: " $DIRECTORY export RUNTIME=$PERF_RUNTIME export RANDSEED=$PERF_RANDSEED export COMPPERCENT=$PERF_COMPPERCENT export COMPCHUNK=$PERF_COMPCHUNK export FILESIZE=$((TOTAL_SIZE / threads)) export NUMJOBS=$threads export SYNC_TYPE=$sync export BLOCKSIZE=$iosize sync # When running locally, we want to keep the default behavior of # DIRECT == 0, so only set it when we're running over NFS to # disable client cache for reads. if [[ $NFS -eq 1 ]]; then export DIRECT=1 do_setup_nfs $script else export DIRECT=0 fi # This will be part of the output filename. typeset suffix=$(get_suffix $threads $sync $iosize) # Start the data collection do_collect_scripts $suffix # Define output file typeset logbase="$(get_perf_output_dir)/$(basename \ $SUDO_COMMAND)" typeset outfile="$logbase.fio.$suffix" # Start the load if [[ $NFS -eq 1 ]]; then log_must ssh -t $NFS_USER@$NFS_CLIENT " fio --output-format=${PERF_FIO_FORMAT} \ --output /tmp/fio.out /tmp/test.fio " log_must scp $NFS_USER@$NFS_CLIENT:/tmp/fio.out $outfile log_must ssh -t $NFS_USER@$NFS_CLIENT "sudo -S umount $NFS_MOUNT" else log_must fio --output-format=${PERF_FIO_FORMAT} \ --output $outfile $FIO_SCRIPTS/$script fi } # # This function will run fio in a loop, according to the .fio file passed # in and a number of environment variables. The following variables can be # set before launching zfstest to override the defaults. # # PERF_RUNTIME: The time in seconds each fio invocation should run. -# PERF_RUNTYPE: A human readable tag that appears in logs. The defaults are -# nightly and weekly. # PERF_NTHREADS: A list of how many threads each fio invocation will use. # PERF_SYNC_TYPES: Whether to use (O_SYNC) or not. 1 is sync IO, 0 is async IO. # PERF_IOSIZES: A list of blocksizes in which each fio invocation will do IO. # PERF_COLLECT_SCRIPTS: A comma delimited list of 'command args, logfile_tag' # pairs that will be added to the scripts specified in each test. # function do_fio_run { typeset script=$1 typeset do_recreate=$2 typeset clear_cache=$3 typeset threads threads_per_fs sync iosize for threads in $PERF_NTHREADS; do for threads_per_fs in $PERF_NTHREADS_PER_FS; do for sync in $PERF_SYNC_TYPES; do for iosize in $PERF_IOSIZES; do do_fio_run_impl \ $script \ $do_recreate \ $clear_cache \ $threads \ $threads_per_fs \ $sync \ $iosize done done done done } # This function sets NFS mount on the client and make sure all correct # permissions are in place # function do_setup_nfs { typeset script=$1 zfs set sharenfs=on $TESTFS log_must chmod -R 777 /$TESTFS ssh -t $NFS_USER@$NFS_CLIENT "mkdir -m 777 -p $NFS_MOUNT" ssh -t $NFS_USER@$NFS_CLIENT "sudo -S umount $NFS_MOUNT" log_must ssh -t $NFS_USER@$NFS_CLIENT " sudo -S mount $NFS_OPTIONS $NFS_SERVER:/$TESTFS $NFS_MOUNT " # # The variables in the fio script are only available in our current # shell session, so we have to evaluate them here before copying # the resulting script over to the target machine. # export jobnum='$jobnum' while read line; do eval echo "$line" done < $FIO_SCRIPTS/$script > /tmp/test.fio log_must sed -i -e "s%directory.*%directory=$NFS_MOUNT%" /tmp/test.fio log_must scp /tmp/test.fio $NFS_USER@$NFS_CLIENT:/tmp log_must rm /tmp/test.fio } # # This function iterates through the value pairs in $PERF_COLLECT_SCRIPTS. # The script at index N is launched in the background, with its output # redirected to a logfile containing the tag specified at index N + 1. # function do_collect_scripts { typeset suffix=$1 [[ -n $collect_scripts ]] || log_fail "No data collection scripts." [[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified." # Add in user supplied scripts and logfiles, if any. typeset oIFS=$IFS IFS=',' for item in $PERF_COLLECT_SCRIPTS; do collect_scripts+=($(echo $item | sed 's/^ *//g')) done IFS=$oIFS typeset idx=0 while [[ $idx -lt "${#collect_scripts[@]}" ]]; do typeset logbase="$(get_perf_output_dir)/$(basename \ $SUDO_COMMAND)" typeset outfile="$logbase.${collect_scripts[$idx + 1]}.$suffix" timeout $PERF_RUNTIME ${collect_scripts[$idx]} >$outfile 2>&1 & ((idx += 2)) done # Need to explicitly return 0 because timeout(1) will kill # a child process and cause us to return non-zero. return 0 } # Find a place to deposit performance data collected while under load. function get_perf_output_dir { typeset dir="$(pwd)/perf_data" [[ -d $dir ]] || mkdir -p $dir echo $dir } function apply_zinject_delays { typeset idx=0 while [[ $idx -lt "${#ZINJECT_DELAYS[@]}" ]]; do [[ -n ${ZINJECT_DELAYS[$idx]} ]] || \ log_must "No zinject delay found at index: $idx" for disk in $DISKS; do log_must zinject \ -d $disk -D ${ZINJECT_DELAYS[$idx]} $PERFPOOL done ((idx += 1)) done } function clear_zinject_delays { log_must zinject -c all } # # Destroy and create the pool used for performance tests. # function recreate_perf_pool { [[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set." # # In case there's been some "leaked" zinject delays, or if the # performance test injected some delays itself, we clear all # delays before attempting to destroy the pool. Each delay # places a hold on the pool, so the destroy will fail if there # are any outstanding delays. # clear_zinject_delays # # This function handles the case where the pool already exists, # and will destroy the previous pool and recreate a new pool. # create_pool $PERFPOOL $DISKS } function verify_threads_per_fs { typeset threads=$1 typeset threads_per_fs=$2 log_must test -n $threads log_must test -n $threads_per_fs # # A value of "0" is treated as a "special value", and it is # interpreted to mean all threads will run using a single # filesystem. # [[ $threads_per_fs -eq 0 ]] && return # # The number of threads per filesystem must be a value greater # than or equal to zero; since we just verified the value isn't # 0 above, then it must be greater than zero here. # log_must test $threads_per_fs -ge 0 # # This restriction can be lifted later if needed, but for now, # we restrict the number of threads per filesystem to a value # that evenly divides the thread count. This way, the threads # will be evenly distributed over all the filesystems. # log_must test $((threads % threads_per_fs)) -eq 0 } function populate_perf_filesystems { typeset nfilesystems=${1:-1} export TESTFS="" for i in $(seq 1 $nfilesystems); do typeset dataset="$PERFPOOL/fs$i" create_dataset $dataset $PERF_FS_OPTS if [[ -z "$TESTFS" ]]; then TESTFS="$dataset" else TESTFS="$TESTFS $dataset" fi done } function get_nfilesystems { typeset filesystems=( $TESTFS ) echo ${#filesystems[@]} } function get_directory { typeset filesystems=( $TESTFS ) typeset directory= typeset idx=0 while [[ $idx -lt "${#filesystems[@]}" ]]; do mountpoint=$(get_prop mountpoint "${filesystems[$idx]}") if [[ -n $directory ]]; then directory=$directory:$mountpoint else directory=$mountpoint fi ((idx += 1)) done echo $directory } function get_min_arc_size { typeset -l min_arc_size if is_freebsd; then min_arc_size=$(sysctl -n kstat.zfs.misc.arcstats.c_min) elif is_illumos; then min_arc_size=$(dtrace -qn 'BEGIN { printf("%u\n", `arc_stats.arcstat_c_min.value.ui64); exit(0); }') elif is_linux; then min_arc_size=`awk '$1 == "c_min" { print $3 }' \ /proc/spl/kstat/zfs/arcstats` fi [[ $? -eq 0 ]] || log_fail "get_min_arc_size failed" echo $min_arc_size } function get_max_arc_size { typeset -l max_arc_size if is_freebsd; then max_arc_size=$(sysctl -n kstat.zfs.misc.arcstats.c_max) elif is_illumos; then max_arc_size=$(dtrace -qn 'BEGIN { printf("%u\n", `arc_stats.arcstat_c_max.value.ui64); exit(0); }') elif is_linux; then max_arc_size=`awk '$1 == "c_max" { print $3 }' \ /proc/spl/kstat/zfs/arcstats` fi [[ $? -eq 0 ]] || log_fail "get_max_arc_size failed" echo $max_arc_size } -function get_max_dbuf_cache_size +function get_arc_target { - typeset -l max_dbuf_cache_size + typeset -l arc_c + + if is_freebsd; then + arc_c=$(sysctl -n kstat.zfs.misc.arcstats.c) + elif is_illumos; then + arc_c=$(dtrace -qn 'BEGIN { + printf("%u\n", `arc_stats.arcstat_c.value.ui64); + exit(0); + }') + elif is_linux; then + arc_c=`awk '$1 == "c" { print $3 }' \ + /proc/spl/kstat/zfs/arcstats` + fi + + [[ $? -eq 0 ]] || log_fail "get_arc_target failed" + + echo $arc_c +} + +function get_dbuf_cache_size +{ + typeset -l dbuf_cache_size dbuf_cache_shift if is_illumos; then - max_dbuf_cache_size=$(dtrace -qn 'BEGIN { + dbuf_cache_size=$(dtrace -qn 'BEGIN { printf("%u\n", `dbuf_cache_max_bytes); exit(0); }') else - max_dbuf_cache_size=$(get_tunable DBUF_CACHE_MAX_BYTES) + dbuf_cache_shift=$(get_tunable DBUF_CACHE_SHIFT) + dbuf_cache_size=$(($(get_arc_target) / 2**dbuf_cache_shift)) fi - [[ $? -eq 0 ]] || log_fail "get_max_dbuf_cache_size failed" + [[ $? -eq 0 ]] || log_fail "get_dbuf_cache_size failed" - echo $max_dbuf_cache_size + echo $dbuf_cache_size } # Create a file with some information about how this system is configured. function get_system_config { typeset config=$PERF_DATA_DIR/$1 echo "{" >>$config if is_linux; then echo " \"ncpus\": \"$(nproc --all)\"," >>$config echo " \"physmem\": \"$(free -b | \ awk '$1 == "Mem:" { print $2 }')\"," >>$config echo " \"c_max\": \"$(get_max_arc_size)\"," >>$config echo " \"hostname\": \"$(uname -n)\"," >>$config echo " \"kernel version\": \"$(uname -sr)\"," >>$config else dtrace -qn 'BEGIN{ printf(" \"ncpus\": %d,\n", `ncpus); printf(" \"physmem\": %u,\n", `physmem * `_pagesize); printf(" \"c_max\": %u,\n", `arc_stats.arcstat_c_max.value.ui64); printf(" \"kmem_flags\": \"0x%x\",", `kmem_flags); exit(0)}' >>$config echo " \"hostname\": \"$(uname -n)\"," >>$config echo " \"kernel version\": \"$(uname -v)\"," >>$config fi if is_linux; then lsblk -dino NAME,SIZE | awk 'BEGIN { printf(" \"disks\": {\n"); first = 1} {disk = $1} {size = $2; if (first != 1) {printf(",\n")} else {first = 0} printf(" \"%s\": \"%s\"", disk, size)} END {printf("\n },\n")}' >>$config zfs_tunables="/sys/module/zfs/parameters" printf " \"tunables\": {\n" >>$config for tunable in \ zfs_arc_max \ zfs_arc_meta_limit \ zfs_arc_sys_free \ zfs_dirty_data_max \ zfs_flags \ zfs_prefetch_disable \ zfs_txg_timeout \ zfs_vdev_aggregation_limit \ zfs_vdev_async_read_max_active \ zfs_vdev_async_write_max_active \ zfs_vdev_sync_read_max_active \ zfs_vdev_sync_write_max_active \ zio_slow_io_ms do if [ "$tunable" != "zfs_arc_max" ] then printf ",\n" >>$config fi printf " \"$tunable\": \"$(<$zfs_tunables/$tunable)\"" \ >>$config done printf "\n }\n" >>$config else iostat -En | awk 'BEGIN { printf(" \"disks\": {\n"); first = 1} /^c/ {disk = $1} /^Size: [^0]/ {size = $2; if (first != 1) {printf(",\n")} else {first = 0} printf(" \"%s\": \"%s\"", disk, size)} END {printf("\n },\n")}' >>$config sed -n 's/^set \(.*\)[ ]=[ ]\(.*\)/\1=\2/p' /etc/system | \ awk -F= 'BEGIN {printf(" \"system\": {\n"); first = 1} {if (first != 1) {printf(",\n")} else {first = 0}; printf(" \"%s\": %s", $1, $2)} END {printf("\n }\n")}' >>$config fi echo "}" >>$config } function num_jobs_by_cpu { if is_linux; then typeset ncpu=$($NPROC --all) else typeset ncpu=$(psrinfo | $WC -l) fi typeset num_jobs=$ncpu [[ $ncpu -gt 8 ]] && num_jobs=$(echo "$ncpu * 3 / 4" | bc) echo $num_jobs } # # On illumos this looks like: ":sd3:sd4:sd1:sd2:" # function pool_to_lun_list { typeset pool=$1 typeset ctd ctds devname lun typeset lun_list=':' if is_illumos; then ctds=$(zpool list -v $pool | awk '/c[0-9]*t[0-9a-fA-F]*d[0-9]*/ {print $1}') for ctd in $ctds; do # Get the device name as it appears in /etc/path_to_inst devname=$(readlink -f /dev/dsk/${ctd}s0 | sed -n \ 's/\/devices\([^:]*\):.*/\1/p') # Add a string composed of the driver name and instance # number to the list for comparison with dev_statname. lun=$(sed 's/"//g' /etc/path_to_inst | grep \ $devname | awk '{print $3$2}') lun_list="$lun_list$lun:" done elif is_freebsd; then lun_list+=$(zpool list -HLv $pool | \ awk '/a?da[0-9]+|md[0-9]+|mfid[0-9]+|nda[0-9]+|nvd[0-9]+|vtbd[0-9]+/ { printf "%s:", $1 }') elif is_linux; then ctds=$(zpool list -HLv $pool | \ awk '/sd[a-z]*|loop[0-9]*|dm-[0-9]*/ {print $1}') for ctd in $ctds; do lun_list="$lun_list$ctd:" done fi echo $lun_list } +function print_perf_settings +{ + echo "PERF_NTHREADS: $PERF_NTHREADS" + echo "PERF_NTHREADS_PER_FS: $PERF_NTHREADS_PER_FS" + echo "PERF_SYNC_TYPES: $PERF_SYNC_TYPES" + echo "PERF_IOSIZES: $PERF_IOSIZES" +} + # Create a perf_data directory to hold performance statistics and # configuration information. export PERF_DATA_DIR=$(get_perf_output_dir) [[ -f $PERF_DATA_DIR/config.json ]] || get_system_config config.json diff --git a/tests/zfs-tests/tests/perf/regression/random_reads.ksh b/tests/zfs-tests/tests/perf/regression/random_reads.ksh index e6d207e22747..5c8066d17549 100755 --- a/tests/zfs-tests/tests/perf/regression/random_reads.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_reads.ksh @@ -1,114 +1,96 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the random_reads job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # The files to read from are created prior to the first fio run, and used # for all fio runs. The ARC is cleared with `zinject -a` prior to each run # so reads will go to disk. # # Thread/Concurrency settings: # PERF_NTHREADS defines the number of files created in the test filesystem, # as well as the number of threads that will simultaneously drive IO to # those files. The settings chosen are from measurements in the # PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that # are at peak throughput but lowest latency. Higher concurrency introduces # queue time latency and would reduce the impact of code-induced performance # regressions. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'} -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} # Layout the files to be used by the read tests. Create as many files as the # largest number of threads. An fio run with fewer threads will use a subset # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" ) fi -log_note "Random reads with $PERF_RUNTYPE settings" +log_note "Random reads with settings: $(print_perf_settings)" do_fio_run random_reads.fio false true log_pass "Measure IO stats during random read load" diff --git a/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh index 573e9c7d4c58..33d7d8c8d945 100755 --- a/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_readwrite.ksh @@ -1,114 +1,96 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the random_readwrite job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # The files to read and write from are created prior to the first fio run, # and used for all fio runs. The ARC is cleared with `zinject -a` prior to # each run so reads will go to disk. # # Thread/Concurrency settings: # PERF_NTHREADS defines the number of files created in the test filesystem, # as well as the number of threads that will simultaneously drive IO to # those files. The settings chosen are from measurements in the # PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that # are at peak throughput but lowest latency. Higher concurrency introduces # queue time latency and would reduce the impact of code-induced performance # regressions. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'4 8 16 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} - export PERF_IOSIZES='' # bssplit used instead -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES='' # bssplit used instead -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'32 64'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES='' # bssplit used instead # Layout the files to be used by the readwrite tests. Create as many files # as the largest number of threads. An fio run with fewer threads will use # a subset of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" ) fi -log_note "Random reads and writes with $PERF_RUNTYPE settings" +log_note "Random reads and writes with settings: $(print_perf_settings)" do_fio_run random_readwrite.fio false true log_pass "Measure IO stats during random read and write load" diff --git a/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh b/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh index 78af5213a3d3..bb4014563f1f 100755 --- a/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_readwrite_fixed.ksh @@ -1,106 +1,88 @@ #!/bin/ksh # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2017, 2020 by Delphix. All rights reserved. +# Copyright (c) 2017, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the random_readwrite_fixed job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # The files to read and write from are created prior to the first fio run, # and used for all fio runs. The ARC is cleared with `zinject -a` prior to # each run so reads will go to disk. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read write load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} - export PERF_IOSIZES='8k 64k' -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES='8k' -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} # Layout the files to be used by the readwrite tests. Create as many files # as the largest number of threads. An fio run with fewer threads will use # a subset of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "dtrace -s $PERF_SCRIPTS/profile.d" "profile" ) fi -log_note "Random reads and writes with $PERF_RUNTYPE settings" +log_note "Random reads and writes with settings: $(print_perf_settings)" do_fio_run random_readwrite_fixed.fio false true log_pass "Measure IO stats during random read and write load" diff --git a/tests/zfs-tests/tests/perf/regression/random_writes.ksh b/tests/zfs-tests/tests/perf/regression/random_writes.ksh index dca013cbae0c..4b826835efbf 100755 --- a/tests/zfs-tests/tests/perf/regression/random_writes.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_writes.ksh @@ -1,105 +1,87 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the random_writes job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # Prior to each fio run the dataset is recreated, and fio writes new files # into an otherwise empty pool. # # Thread/Concurrency settings: # PERF_NTHREADS defines the number of files created in the test filesystem, # as well as the number of threads that will simultaneously drive IO to # those files. The settings chosen are from measurements in the # PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that # are at peak throughput but lowest latency. Higher concurrency introduces # queue time latency and would reduce the impact of code-induced performance # regressions. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 256k'} -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'32 128'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" ) fi -log_note "Random writes with $PERF_RUNTYPE settings" +log_note "Random writes with settings: $(print_perf_settings)" do_fio_run random_writes.fio true false log_pass "Measure IO stats during random write load" diff --git a/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh b/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh index 5d4fd77a7458..522ee4526828 100755 --- a/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh @@ -1,100 +1,83 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat # # We're using many filesystems depending on the number of # threads for each test, and there's no good way to get a list # of all the filesystems that should be destroyed on cleanup # (i.e. the list of filesystems used for the last test ran). # Thus, we simply recreate the pool as a way to destroy all # filesystems and leave a fresh pool behind. # recreate_perf_pool } trap "log_fail \"Measure IO stats during random write load\"" SIGTERM log_onexit cleanup recreate_perf_pool # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'1 2 4 8 16 32 64 128'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} - -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'} +export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} # Until the performance tests over NFS can deal with multiple file systems, # force the use of only one file system when testing over NFS. [[ $NFS -eq 1 ]] && PERF_NTHREADS_PER_FS='0' lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "dtrace -s $PERF_SCRIPTS/zil.d $PERFPOOL 1" "zil" "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "dtrace -s $PERF_SCRIPTS/offcpu-profile.d" "offcpu-profile" ) fi -log_note "ZIL specific random write workload with $PERF_RUNTYPE settings" +log_note \ + "ZIL specific random write workload with settings: $(print_perf_settings)" do_fio_run random_writes.fio true false log_pass "Measure IO stats during ZIL specific random write workload" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh index e5cf6278391c..2bdfff736f4e 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads.ksh @@ -1,116 +1,98 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the sequential_reads job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # The files to read from are created prior to the first fio run, and used # for all fio runs. The ARC is cleared with `zinject -a` prior to each run # so reads will go to disk. # # Thread/Concurrency settings: # PERF_NTHREADS defines the number of files created in the test filesystem, # as well as the number of threads that will simultaneously drive IO to # those files. The settings chosen are from measurements in the # PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that # are at peak throughput but lowest latency. Higher concurrency introduces # queue time latency and would reduce the impact of code-induced performance # regressions. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'} -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'8 16'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} # Layout the files to be used by the read tests. Create as many files as the # largest number of threads. An fio run with fewer threads will use a subset # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" ) fi -log_note "Sequential reads with $PERF_RUNTYPE settings" +log_note "Sequential reads with settings: $(print_perf_settings)" do_fio_run sequential_reads.fio false true log_pass "Measure IO stats during sequential read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh index d44e37f3eaaf..8127786361ba 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached.ksh @@ -1,106 +1,88 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the sequential_reads job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # The files to read from are created prior to the first fio run, and used # for all fio runs. The ARC is not cleared to ensure that all data is cached. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Make sure the working set can be cached in the arc. Aim for 1/2 of arc. export TOTAL_SIZE=$(($(get_max_arc_size) / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'} -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} # Layout the files to be used by the read tests. Create as many files as the # largest number of threads. An fio run with fewer threads will use a subset # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" ) fi -log_note "Sequential cached reads with $PERF_RUNTYPE settings" +log_note "Sequential cached reads with settings: $(print_perf_settings)" do_fio_run sequential_reads.fio false false log_pass "Measure IO stats during sequential cached read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh index 1b3ee85ec55a..8ce1273c2869 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_arc_cached_clone.ksh @@ -1,132 +1,115 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the sequential_reads job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # The files to read from are created prior to the first fio run, and used # for all fio runs. This test will exercise cached read performance from # a clone filesystem. The data is initially cached in the ARC and then # a snapshot and clone are created. All the performance runs are then # initiated against the clone filesystem to exercise the performance of # reads when the ARC has to create another buffer from a different dataset. # It will also exercise the need to evict the duplicate buffer once the last # reference on that buffer is released. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Make sure the working set can be cached in the arc. Aim for 1/2 of arc. export TOTAL_SIZE=$(($(get_max_arc_size) / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'} -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'64 128'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'128k 1m'} # Layout the files to be used by the read tests. Create as many files as the # largest number of threads. An fio run with fewer threads will use a subset # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # # Only a single filesystem is used by this test. To be defensive, we # double check that TESTFS only contains a single filesystem. We # wouldn't want to assume this was the case, and have it actually # contain multiple filesystem (causing cascading failures later). # log_must test $(get_nfilesystems) -eq 1 log_note "Creating snapshot, $TESTSNAP, of $TESTFS" create_snapshot $TESTFS $TESTSNAP log_note "Creating clone, $PERFPOOL/$TESTCLONE, from $TESTFS@$TESTSNAP" create_clone $TESTFS@$TESTSNAP $PERFPOOL/$TESTCLONE # # We want to run FIO against the clone we created above, and not the # clone's originating filesystem. Thus, we override the default behavior # and explicitly set TESTFS to the clone. # export TESTFS=$PERFPOOL/$TESTCLONE # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "$PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" ) fi -log_note "Sequential cached reads from $DIRECTORY with $PERF_RUNTYPE settings" +log_note "Sequential cached reads from $DIRECTORY with " \ + "ettings: $(print_perf_settings)" do_fio_run sequential_reads.fio false false log_pass "Measure IO stats during sequential cached read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh index 888136fec93c..adacdc29799c 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_reads_dbuf_cached.ksh @@ -1,112 +1,94 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2016, 2020 by Delphix. All rights reserved. +# Copyright (c) 2016, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the sequential_reads job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # The files to read from are created prior to the first fio run, and used # for all fio runs. The ARC is not cleared to ensure that all data is cached. # # This is basically a copy of the sequential_reads_cached test case, but with # a smaller dataset so that we can fit everything into the decompressed, linear # space in the dbuf cache. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during sequential read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Ensure the working set can be cached in the dbuf cache. -export TOTAL_SIZE=$(($(get_max_dbuf_cache_size) * 3 / 4)) +export TOTAL_SIZE=$(($(get_dbuf_cache_size) * 3 / 4)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'8 16 32 64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 128k'} -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'64'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'64k'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'64'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'64k'} # Layout the files to be used by the read tests. Create as many files as the # largest number of threads. An fio run with fewer threads will use a subset # of the available files. export NUMJOBS=$(get_max $PERF_NTHREADS) export FILE_SIZE=$((TOTAL_SIZE / NUMJOBS)) export DIRECTORY=$(get_directory) log_must fio $FIO_SCRIPTS/mkfiles.fio # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "$PERF_SCRIPTS/prefetch_io.sh $PERFPOOL 1" "prefetch" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "dtrace -Cs $PERF_SCRIPTS/prefetch_io.d $PERFPOOL 1" "prefetch" "dtrace -s $PERF_SCRIPTS/profile.d" "profile" ) fi -log_note "Sequential cached reads with $PERF_RUNTYPE settings" +log_note "Sequential cached reads with settings: $(print_perf_settings)" do_fio_run sequential_reads.fio false false log_pass "Measure IO stats during sequential cached read load" diff --git a/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh index b4f466c4f65c..d32690a0542e 100755 --- a/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh +++ b/tests/zfs-tests/tests/perf/regression/sequential_writes.ksh @@ -1,105 +1,87 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015, 2020 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # # # Description: # Trigger fio runs using the sequential_writes job file. The number of runs and # data collected is determined by the PERF_* variables. See do_fio_run for # details about these variables. # # Prior to each fio run the dataset is recreated, and fio writes new files # into an otherwise empty pool. # # Thread/Concurrency settings: # PERF_NTHREADS defines the number of files created in the test filesystem, # as well as the number of threads that will simultaneously drive IO to # those files. The settings chosen are from measurements in the # PerfAutoESX/ZFSPerfESX Environments, selected at concurrency levels that # are at peak throughput but lowest latency. Higher concurrency introduces # queue time latency and would reduce the impact of code-induced performance # regressions. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat recreate_perf_pool } trap "log_fail \"Measure IO stats during random read load\"" SIGTERM log_onexit cleanup recreate_perf_pool populate_perf_filesystems # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) -# Variables for use by fio. -if [[ -n $PERF_REGRESSION_WEEKLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 8 16 32 64 128'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'0 1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 64k 256k'} -elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then - export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} - export PERF_RANDSEED=${PERF_RANDSEED:-'1234'} - export PERF_COMPPERCENT=${PERF_COMPPERCENT:-'66'} - export PERF_COMPCHUNK=${PERF_COMPCHUNK:-'4096'} - export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} - export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'} - export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} - export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} - export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'} -fi +# Variables specific to this test for use by fio. +export PERF_NTHREADS=${PERF_NTHREADS:-'16 32'} +export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0'} +export PERF_IOSIZES=${PERF_IOSIZES:-'8k 128k 1m'} # Set up the scripts and output files that will log performance data. lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "$PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" ) fi -log_note "Sequential writes with $PERF_RUNTYPE settings" +log_note "Sequential writes with settings: $(print_perf_settings)" do_fio_run sequential_writes.fio true false log_pass "Measure IO stats during sequential write load" diff --git a/tests/zfs-tests/tests/perf/regression/setup.ksh b/tests/zfs-tests/tests/perf/regression/setup.ksh index 1544f637d8d9..68be00d4a63c 100755 --- a/tests/zfs-tests/tests/perf/regression/setup.ksh +++ b/tests/zfs-tests/tests/perf/regression/setup.ksh @@ -1,23 +1,22 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # -# Copyright (c) 2015 by Delphix. All rights reserved. +# Copyright (c) 2015, 2021 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib verify_runnable "global" -verify_disk_count "$DISKS" 3 log_pass