diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh index f00a284847ac..7c5286ba70ff 100755 --- a/scripts/zfs-tests.sh +++ b/scripts/zfs-tests.sh @@ -1,622 +1,629 @@ #!/bin/bash # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License, Version 1.0 only # (the "License"). You may not use this file except in compliance # with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # BASE_DIR=$(dirname "$0") SCRIPT_COMMON=common.sh if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then . "${BASE_DIR}/${SCRIPT_COMMON}" else echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 fi PROG=zfs-tests.sh VERBOSE="no" QUIET= CLEANUP="yes" CLEANUPALL="no" LOOPBACK="yes" STACK_TRACER="no" FILESIZE="4G" RUNFILE=${RUNFILE:-"linux.run"} FILEDIR=${FILEDIR:-/var/tmp} DISKS=${DISKS:-""} SINGLETEST=() SINGLETESTUSER="root" TAGS="" ITERATIONS=1 ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh" ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh" ZFS_MMP="$STF_SUITE/callbacks/zfs_mmp.ksh" TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"} LOSETUP=${LOSETUP:-/sbin/losetup} DMSETUP=${DMSETUP:-/sbin/dmsetup} # # Log an informational message when additional verbosity is enabled. # msg() { if [ "$VERBOSE" = "yes" ]; then echo "$@" fi } # # Log a failure message, cleanup, and return an error. # fail() { echo -e "$PROG: $1" >&2 cleanup exit 1 } # # Attempt to remove loopback devices and files which where created earlier # by this script to run the test framework. The '-k' option may be passed # to the script to suppress cleanup for debugging purposes. # cleanup() { if [ "$CLEANUP" = "no" ]; then return 0 fi if [ "$LOOPBACK" = "yes" ]; then for TEST_LOOPBACK in ${LOOPBACKS}; do LOOP_DEV=$(basename "$TEST_LOOPBACK") DM_DEV=$(sudo "${DMSETUP}" ls 2>/dev/null | \ grep "${LOOP_DEV}" | cut -f1) if [ -n "$DM_DEV" ]; then sudo "${DMSETUP}" remove "${DM_DEV}" || echo "Failed to remove: ${DM_DEV}" fi if [ -n "${TEST_LOOPBACK}" ]; then sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" || echo "Failed to remove: ${TEST_LOOPBACK}" fi done fi for TEST_FILE in ${FILES}; do rm -f "${TEST_FILE}" &>/dev/null done if [ "$STF_PATH_REMOVE" = "yes" ] && [ -d "$STF_PATH" ]; then rm -Rf "$STF_PATH" fi } trap cleanup EXIT # # Attempt to remove all testpools (testpool.XXX), unopened dm devices, # loopback devices, and files. This is a useful way to cleanup a previous # test run failure which has left the system in an unknown state. This can # be dangerous and should only be used in a dedicated test environment. # cleanup_all() { local TEST_POOLS TEST_POOLS=$(sudo "$ZPOOL" list -H -o name | grep testpool) local TEST_LOOPBACKS TEST_LOOPBACKS=$(sudo "${LOSETUP}" -a|grep file-vdev|cut -f1 -d:) local TEST_FILES TEST_FILES=$(ls /var/tmp/file-vdev* 2>/dev/null) msg msg "--- Cleanup ---" msg "Removing pool(s): $(echo "${TEST_POOLS}" | tr '\n' ' ')" for TEST_POOL in $TEST_POOLS; do sudo "$ZPOOL" destroy "${TEST_POOL}" done msg "Removing dm(s): $(sudo "${DMSETUP}" ls | grep loop | tr '\n' ' ')" sudo "${DMSETUP}" remove_all msg "Removing loopback(s): $(echo "${TEST_LOOPBACKS}" | tr '\n' ' ')" for TEST_LOOPBACK in $TEST_LOOPBACKS; do sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" done msg "Removing files(s): $(echo "${TEST_FILES}" | tr '\n' ' ')" for TEST_FILE in $TEST_FILES; do sudo rm -f "${TEST_FILE}" done } # # Takes a name as the only arguments and looks for the following variations # on that name. If one is found it is returned. # # $RUNFILE_DIR/ # $RUNFILE_DIR/.run # # .run # find_runfile() { local NAME=$1 local RESULT="" if [ -f "$RUNFILE_DIR/$NAME" ]; then RESULT="$RUNFILE_DIR/$NAME" elif [ -f "$RUNFILE_DIR/$NAME.run" ]; then RESULT="$RUNFILE_DIR/$NAME.run" elif [ -f "$NAME" ]; then RESULT="$NAME" elif [ -f "$NAME.run" ]; then RESULT="$NAME.run" fi echo "$RESULT" } # # Symlink file if it appears under any of the given paths. # create_links() { local dir_list="$1" local file_list="$2" [ -n "$STF_PATH" ] || fail "STF_PATH wasn't correctly set" for i in $file_list; do for j in $dir_list; do [ ! -e "$STF_PATH/$i" ] || continue if [ ! -d "$j/$i" ] && [ -e "$j/$i" ]; then ln -s "$j/$i" "$STF_PATH/$i" || \ fail "Couldn't link $i" break fi done [ ! -e "$STF_PATH/$i" ] && STF_MISSING_BIN="$STF_MISSING_BIN$i " done } # # Constrain the path to limit the available binaries to a known set. # When running in-tree a top level ./bin/ directory is created for # convenience, otherwise a temporary directory is used. # constrain_path() { . "$STF_SUITE/include/commands.cfg" if [ "$INTREE" = "yes" ]; then # Constrained path set to ./zfs/bin/ STF_PATH="$BIN_DIR" STF_PATH_REMOVE="no" STF_MISSING_BIN="" if [ ! -d "$STF_PATH" ]; then mkdir "$STF_PATH" chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH" fi # Special case links for standard zfs utilities DIRS="$(find "$CMD_DIR" -type d \( ! -name .deps -a \ ! -name .libs \) -print | tr '\n' ' ')" create_links "$DIRS" "$ZFS_FILES" # Special case links for zfs test suite utilities DIRS="$(find "$STF_SUITE" -type d \( ! -name .deps -a \ ! -name .libs \) -print | tr '\n' ' ')" create_links "$DIRS" "$ZFSTEST_FILES" else # Constrained path set to /var/tmp/constrained_path.* SYSTEMDIR=${SYSTEMDIR:-/var/tmp/constrained_path.XXXX} STF_PATH=$(/bin/mktemp -d "$SYSTEMDIR") STF_PATH_REMOVE="yes" STF_MISSING_BIN="" chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH" # Special case links for standard zfs utilities create_links "/bin /usr/bin /sbin /usr/sbin" "$ZFS_FILES" # Special case links for zfs test suite utilities create_links "$STF_SUITE/bin" "$ZFSTEST_FILES" fi # Standard system utilities create_links "/bin /usr/bin /sbin /usr/sbin" "$SYSTEM_FILES" # Exceptions ln -fs "$STF_PATH/awk" "$STF_PATH/nawk" ln -fs /sbin/fsck.ext4 "$STF_PATH/fsck" ln -fs /sbin/mkfs.ext4 "$STF_PATH/newfs" ln -fs "$STF_PATH/gzip" "$STF_PATH/compress" ln -fs "$STF_PATH/gunzip" "$STF_PATH/uncompress" ln -fs "$STF_PATH/exportfs" "$STF_PATH/share" ln -fs "$STF_PATH/exportfs" "$STF_PATH/unshare" if [ -L "$STF_PATH/arc_summary3" ]; then ln -fs "$STF_PATH/arc_summary3" "$STF_PATH/arc_summary" fi } # # Output a useful usage message. # usage() { cat << EOF USAGE: $0 [hvqxkfS] [-s SIZE] [-r RUNFILE] [-t PATH] [-u USER] DESCRIPTION: ZFS Test Suite launch script OPTIONS: -h Show this message -v Verbose zfs-tests.sh output -q Quiet test-runner output -x Remove all testpools, dm, lo, and files (unsafe) -k Disable cleanup after test failure -f Use files only, disables block device tests -S Enable stack tracer (negative performance impact) -c Only create and populate constrained path + -n NFSFILE Use the nfsfile to determine the NFS configuration -I NUM Number of iterations -d DIR Use DIR for files and loopback devices -s SIZE Use vdevs of SIZE (default: 4G) -r RUNFILE Run tests in RUNFILE (default: linux.run) -t PATH Run single test at PATH relative to test suite -T TAGS Comma separated list of tags (default: 'functional') -u USER Run single test as USER (default: root) EXAMPLES: # Run the default (linux) suite of tests and output the configuration used. $0 -v # Run a smaller suite of tests designed to run more quickly. $0 -r linux-fast # Cleanup a previous run of the test suite prior to testing, run the # default (linux) suite of tests and perform no cleanup on exit. $0 -x EOF } -while getopts 'hvqxkfScd:s:r:?t:T:u:I:' OPTION; do +while getopts 'hvqxkfScn:d:s:r:?t:T:u:I:' OPTION; do case $OPTION in h) usage exit 1 ;; v) # shellcheck disable=SC2034 VERBOSE="yes" ;; q) QUIET="-q" ;; x) CLEANUPALL="yes" ;; k) CLEANUP="no" ;; f) LOOPBACK="no" ;; S) STACK_TRACER="yes" ;; c) constrain_path exit ;; + n) + nfsfile=$OPTARG + [[ -f $nfsfile ]] || fail "Cannot read file: $nfsfile" + export NFS=1 + . "$nfsfile" + ;; d) FILEDIR="$OPTARG" ;; I) ITERATIONS="$OPTARG" if [ "$ITERATIONS" -le 0 ]; then fail "Iterations must be greater than 0." fi ;; s) FILESIZE="$OPTARG" ;; r) RUNFILE="$OPTARG" ;; t) if [ ${#SINGLETEST[@]} -ne 0 ]; then fail "-t can only be provided once." fi SINGLETEST+=("$OPTARG") ;; T) TAGS="$OPTARG" ;; u) SINGLETESTUSER="$OPTARG" ;; ?) usage exit ;; esac done shift $((OPTIND-1)) FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"} LOOPBACKS=${LOOPBACKS:-""} if [ ${#SINGLETEST[@]} -ne 0 ]; then if [ -n "$TAGS" ]; then fail "-t and -T are mutually exclusive." fi RUNFILE_DIR="/var/tmp" RUNFILE="zfs-tests.$$.run" SINGLEQUIET="False" if [ -n "$QUIET" ]; then SINGLEQUIET="True" fi cat >$RUNFILE_DIR/$RUNFILE << EOF [DEFAULT] pre = quiet = $SINGLEQUIET pre_user = root user = $SINGLETESTUSER timeout = 600 post_user = root post = outputdir = /var/tmp/test_results EOF for t in "${SINGLETEST[@]}" do SINGLETESTDIR=$(dirname "$t") SINGLETESTFILE=$(basename "$t") SETUPSCRIPT= CLEANUPSCRIPT= if [ -f "$STF_SUITE/$SINGLETESTDIR/setup.ksh" ]; then SETUPSCRIPT="setup" fi if [ -f "$STF_SUITE/$SINGLETESTDIR/cleanup.ksh" ]; then CLEANUPSCRIPT="cleanup" fi cat >>$RUNFILE_DIR/$RUNFILE << EOF [$SINGLETESTDIR] tests = ['$SINGLETESTFILE'] pre = $SETUPSCRIPT post = $CLEANUPSCRIPT tags = ['functional'] EOF done fi # # Use default tag if none was specified # TAGS=${TAGS:='functional'} # # Attempt to locate the runfile describing the test workload. # if [ -n "$RUNFILE" ]; then SAVED_RUNFILE="$RUNFILE" RUNFILE=$(find_runfile "$RUNFILE") [ -z "$RUNFILE" ] && fail "Cannot find runfile: $SAVED_RUNFILE" fi if [ ! -r "$RUNFILE" ]; then fail "Cannot read runfile: $RUNFILE" fi # # This script should not be run as root. Instead the test user, which may # be a normal user account, needs to be configured such that it can # run commands via sudo passwordlessly. # if [ "$(id -u)" = "0" ]; then fail "This script must not be run as root." fi if [ "$(sudo whoami)" != "root" ]; then fail "Passwordless sudo access required." fi # # Constrain the available binaries to a known set. # constrain_path # # Check if ksh exists # [ -e "$STF_PATH/ksh" ] || fail "This test suite requires ksh." [ -e "$STF_SUITE/include/default.cfg" ] || fail \ "Missing $STF_SUITE/include/default.cfg file." # # Verify the ZFS module stack is loaded. # if [ "$STACK_TRACER" = "yes" ]; then sudo "${ZFS_SH}" -S &>/dev/null else sudo "${ZFS_SH}" &>/dev/null fi # # Attempt to cleanup all previous state for a new test run. # if [ "$CLEANUPALL" = "yes" ]; then cleanup_all fi # # By default preserve any existing pools # NOTE: Since 'zpool list' outputs a newline-delimited list convert $KEEP from # space-delimited to newline-delimited. # if [ -z "${KEEP}" ]; then KEEP="$(sudo "$ZPOOL" list -H -o name)" if [ -z "${KEEP}" ]; then KEEP="rpool" fi else KEEP="$(echo -e "${KEEP//[[:blank:]]/\n}")" fi # # NOTE: The following environment variables are undocumented # and should be used for testing purposes only: # # __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists # __ZFS_POOL_RESTRICT - iterate only over the pools it lists # # See libzfs/libzfs_config.c for more information. # __ZFS_POOL_EXCLUDE="$(echo "$KEEP" | sed ':a;N;s/\n/ /g;ba')" . "$STF_SUITE/include/default.cfg" msg msg "--- Configuration ---" msg "Runfile: $RUNFILE" msg "STF_TOOLS: $STF_TOOLS" msg "STF_SUITE: $STF_SUITE" msg "STF_PATH: $STF_PATH" # # No DISKS have been provided so a basic file or loopback based devices # must be created for the test suite to use. # if [ -z "${DISKS}" ]; then # # Create sparse files for the test suite. These may be used # directory or have loopback devices layered on them. # for TEST_FILE in ${FILES}; do [ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}" truncate -s "${FILESIZE}" "${TEST_FILE}" || fail "Failed creating: ${TEST_FILE} ($?)" if [[ "$DISKS" ]]; then DISKS="$DISKS $TEST_FILE" else DISKS="$TEST_FILE" fi done # # If requested setup loopback devices backed by the sparse files. # if [ "$LOOPBACK" = "yes" ]; then DISKS="" test -x "$LOSETUP" || fail "$LOSETUP utility must be installed" for TEST_FILE in ${FILES}; do TEST_LOOPBACK=$(sudo "${LOSETUP}" -f) sudo "${LOSETUP}" "${TEST_LOOPBACK}" "${TEST_FILE}" || fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}" LOOPBACKS="${LOOPBACKS}${TEST_LOOPBACK} " BASELOOPBACKS=$(basename "$TEST_LOOPBACK") if [[ "$DISKS" ]]; then DISKS="$DISKS $BASELOOPBACKS" else DISKS="$BASELOOPBACKS" fi done fi fi NUM_DISKS=$(echo "${DISKS}" | awk '{print NF}') [ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)" # # Disable SELinux until the ZFS Test Suite has been updated accordingly. # if [ -x "$STF_PATH/setenforce" ]; then sudo setenforce permissive &>/dev/null fi # # Enable internal ZFS debug log and clear it. # if [ -e /sys/module/zfs/parameters/zfs_dbgmsg_enable ]; then sudo /bin/sh -c "echo 1 >/sys/module/zfs/parameters/zfs_dbgmsg_enable" sudo /bin/sh -c "echo 0 >/proc/spl/kstat/zfs/dbgmsg" fi msg "FILEDIR: $FILEDIR" msg "FILES: $FILES" msg "LOOPBACKS: $LOOPBACKS" msg "DISKS: $DISKS" msg "NUM_DISKS: $NUM_DISKS" msg "FILESIZE: $FILESIZE" msg "ITERATIONS: $ITERATIONS" msg "TAGS: $TAGS" msg "STACK_TRACER: $STACK_TRACER" msg "Keep pool(s): $KEEP" msg "Missing util(s): $STF_MISSING_BIN" msg "" export STF_TOOLS export STF_SUITE export STF_PATH export DISKS export FILEDIR export KEEP export __ZFS_POOL_EXCLUDE export TESTFAIL_CALLBACKS export PATH=$STF_PATH RESULTS_FILE=$(mktemp -u -t zts-results.XXXX -p "$FILEDIR") REPORT_FILE=$(mktemp -u -t zts-report.XXXX -p "$FILEDIR") # # Run all the tests as specified. # msg "${TEST_RUNNER} ${QUIET} -c ${RUNFILE} -T ${TAGS} -i ${STF_SUITE}" \ "-I ${ITERATIONS}" ${TEST_RUNNER} ${QUIET} -c "${RUNFILE}" -T "${TAGS}" -i "${STF_SUITE}" \ -I "${ITERATIONS}" 2>&1 | tee "$RESULTS_FILE" # # Analyze the results. # set -o pipefail ${ZTS_REPORT} "$RESULTS_FILE" | tee "$REPORT_FILE" RESULT=$? set +o pipefail RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE") if [ -d "$RESULTS_DIR" ]; then cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results" fi rm -f "$RESULTS_FILE" "$REPORT_FILE" if [ ${#SINGLETEST[@]} -ne 0 ]; then rm -f "$RUNFILE" &>/dev/null fi exit ${RESULT} diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg index 78ba2488d460..127a1477d426 100644 --- a/tests/zfs-tests/include/commands.cfg +++ b/tests/zfs-tests/include/commands.cfg @@ -1,181 +1,183 @@ # # These variables are used by zfs-tests.sh to constrain which utilities # may be used by the suite. The suite will create a directory which is # the only element of $PATH and create symlinks from that dir to the # binaries listed below. # # Please keep the contents of each variable sorted for ease of reading # and maintenance. # export SYSTEM_FILES='arp awk attr base64 basename bc blkid blockdev bunzip2 bzcat cat chattr chgrp chmod chown cksum cmp cp cpio cut date dd df diff dirname dmesg du echo egrep exportfs expr fallocate false fdisk file find fio free getconf getent getfacl getfattr grep groupadd groupdel groupmod gunzip gzip head hostid hostname id iostat kill ksh ln logname losetup ls lsattr lsblk lscpu lsmod lsscsi md5sum mkdir mknod mkswap mktemp modprobe mount mpstat mv net nproc od openssl parted pax perf pgrep ping pkill printenv printf ps pwd python python3 quotaon readlink rm rmdir + scp sed seq setenforce setfacl setfattr sh sha256sum shuf sleep sort + ssh stat strings su sudo sum swapoff swapon sync tail tar tee timeout touch tr true truncate udevadm umask umount uname useradd userdel usermod uuidgen vmstat wait wc which xargs' export ZFS_FILES='zdb zfs zhack zinject zpool ztest raidz_test arc_summary arc_summary3 arcstat dbufstat zed zgenhostid zstreamdump' export ZFSTEST_FILES='chg_usr_exec devname2devid dir_rd_update file_check file_trunc file_write largest_file libzfs_input_check mkbusy mkfile mkfiles mktree mmap_exec mmap_libaio mmapwrite nvlist_to_lua randfree_file randwritecomp readmmap rename_dir rm_lnkcnt_zero_file threadsappend user_ns_exec xattrtest' diff --git a/tests/zfs-tests/tests/perf/Makefile.am b/tests/zfs-tests/tests/perf/Makefile.am index d31bba0e2251..68dd31ec12b1 100644 --- a/tests/zfs-tests/tests/perf/Makefile.am +++ b/tests/zfs-tests/tests/perf/Makefile.am @@ -1,7 +1,9 @@ pkgdatadir = $(datadir)/@PACKAGE@/zfs-tests/tests/perf -dist_pkgdata_DATA = perf.shlib +dist_pkgdata_SCRIPTS = \ + nfs-sample.cfg \ + perf.shlib SUBDIRS = \ fio \ regression \ scripts diff --git a/tests/zfs-tests/tests/perf/fio/random_reads.fio b/tests/zfs-tests/tests/perf/fio/random_reads.fio index 79610f9b28aa..e6e7034e0acb 100644 --- a/tests/zfs-tests/tests/perf/fio/random_reads.fio +++ b/tests/zfs-tests/tests/perf/fio/random_reads.fio @@ -1,31 +1,32 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [global] filename_format=file$jobnum group_reporting=1 fallocate=0 overwrite=0 thread=1 rw=randread time_based=1 directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync sync=${SYNC_TYPE} +direct=${DIRECT} numjobs=${NUMJOBS} [job] diff --git a/tests/zfs-tests/tests/perf/fio/random_readwrite.fio b/tests/zfs-tests/tests/perf/fio/random_readwrite.fio index 7d01c38ada96..852d4bed69b8 100644 --- a/tests/zfs-tests/tests/perf/fio/random_readwrite.fio +++ b/tests/zfs-tests/tests/perf/fio/random_readwrite.fio @@ -1,35 +1,36 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [global] filename_format=file$jobnum nrfiles=16 group_reporting=1 fallocate=0 overwrite=0 thread=1 rw=randrw rwmixread=80 time_based=1 directory=${DIRECTORY} runtime=${RUNTIME} bssplit=4k/50:8k/30:128k/10:1m/10 ioengine=psync sync=${SYNC_TYPE} +direct=${DIRECT} numjobs=${NUMJOBS} buffer_compress_percentage=66 buffer_compress_chunk=4096 [job] diff --git a/tests/zfs-tests/tests/perf/fio/random_readwrite_fixed.fio b/tests/zfs-tests/tests/perf/fio/random_readwrite_fixed.fio index ed449555328c..67b88c09d79f 100644 --- a/tests/zfs-tests/tests/perf/fio/random_readwrite_fixed.fio +++ b/tests/zfs-tests/tests/perf/fio/random_readwrite_fixed.fio @@ -1,35 +1,36 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2017 by Delphix. All rights reserved. # [global] filename_format=file$jobnum nrfiles=16 group_reporting=1 fallocate=0 overwrite=0 thread=1 rw=randrw rwmixread=70 time_based=1 directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync sync=${SYNC_TYPE} +direct=${DIRECT} numjobs=${NUMJOBS} buffer_compress_percentage=66 buffer_compress_chunk=4096 [job] diff --git a/tests/zfs-tests/tests/perf/fio/random_writes.fio b/tests/zfs-tests/tests/perf/fio/random_writes.fio index 5e2cb30026c7..90db5ce3bfd1 100644 --- a/tests/zfs-tests/tests/perf/fio/random_writes.fio +++ b/tests/zfs-tests/tests/perf/fio/random_writes.fio @@ -1,33 +1,34 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [global] filename_format=file$jobnum group_reporting=1 fallocate=0 thread=1 rw=randwrite time_based=1 directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync sync=${SYNC_TYPE} +direct=${DIRECT} numjobs=${NUMJOBS} filesize=${FILESIZE} buffer_compress_percentage=66 buffer_compress_chunk=4096 [job] diff --git a/tests/zfs-tests/tests/perf/fio/sequential_reads.fio b/tests/zfs-tests/tests/perf/fio/sequential_reads.fio index 33a9a1d89396..b4b45e084135 100644 --- a/tests/zfs-tests/tests/perf/fio/sequential_reads.fio +++ b/tests/zfs-tests/tests/perf/fio/sequential_reads.fio @@ -1,31 +1,32 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [global] filename_format=file$jobnum group_reporting=1 fallocate=0 overwrite=0 thread=1 rw=read time_based=1 directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync sync=${SYNC_TYPE} +direct=${DIRECT} numjobs=${NUMJOBS} [job] diff --git a/tests/zfs-tests/tests/perf/fio/sequential_writes.fio b/tests/zfs-tests/tests/perf/fio/sequential_writes.fio index 65a65910fd4f..714993e92f16 100644 --- a/tests/zfs-tests/tests/perf/fio/sequential_writes.fio +++ b/tests/zfs-tests/tests/perf/fio/sequential_writes.fio @@ -1,33 +1,34 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2015, 2016 by Delphix. All rights reserved. # [global] filename_format=file$jobnum group_reporting=1 fallocate=0 thread=1 rw=write time_based=1 directory=${DIRECTORY} runtime=${RUNTIME} bs=${BLOCKSIZE} ioengine=psync sync=${SYNC_TYPE} +direct=${DIRECT} numjobs=${NUMJOBS} filesize=${FILESIZE} buffer_compress_percentage=66 buffer_compress_chunk=4096 [job] diff --git a/tests/zfs-tests/tests/perf/nfs-sample.cfg b/tests/zfs-tests/tests/perf/nfs-sample.cfg new file mode 100644 index 000000000000..f7ac2dae3500 --- /dev/null +++ b/tests/zfs-tests/tests/perf/nfs-sample.cfg @@ -0,0 +1,46 @@ +# +# This file and its contents are supplied under the terms of the +# Common Development and Distribution License ("CDDL"), version 1.0. +# You may only use this file in accordance with the terms of version +# 1.0 of the CDDL. +# +# A full copy of the text of the CDDL should have accompanied this +# source. A copy of the CDDL is also available via the Internet at +# http://www.illumos.org/license/CDDL. +# + +# +# Copyright (c) 2016 by Delphix. All rights reserved. +# + +# +# This file is a sample NFS configuration for the performance tests. To use the +# performance tests over NFS you must have: +# - a client machine with fio and sudo installed +# - passwordless SSH set up from this host +# for delphix and root users to the client +# - passwordless sudo for the user on the client +# + + +# The IP address for the server +export NFS_SERVER=127.0.0.1 + +# The IP address for the client +export NFS_CLIENT=127.0.0.1 + +# The mountpoint to use inside the client +export NFS_MOUNT=/var/tmp/nfs + +# The user to run the tests as on the client +export NFS_USER=delphix + +# Common NFS client mount options +export NFS_OPTIONS="-o rw,nosuid,bg,hard,rsize=1048576,wsize=1048576," +NFS_OPTIONS+="nointr,timeo=600,proto=tcp,actimeo=0,port=2049" + +# illumos NFS client mount options +# export NFS_OPTIONS="$NFS_OPTIONS,vers=3" + +# Linux NFS client mount options +export NFS_OPTIONS="-t nfs $NFS_OPTIONS,noacl,nfsvers=3" diff --git a/tests/zfs-tests/tests/perf/perf.shlib b/tests/zfs-tests/tests/perf/perf.shlib index 7165df759b1f..69e61e9fd122 100644 --- a/tests/zfs-tests/tests/perf/perf.shlib +++ b/tests/zfs-tests/tests/perf/perf.shlib @@ -1,492 +1,537 @@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2015, 2016 by Delphix. All rights reserved. # Copyright (c) 2016, Intel Corporation. # . $STF_SUITE/include/libtest.shlib # If neither is specified, do a nightly run. [[ -z $PERF_REGRESSION_WEEKLY ]] && export PERF_REGRESSION_NIGHTLY=1 # Default runtime for each type of test run. export PERF_RUNTIME_WEEKLY=$((30 * 60)) export PERF_RUNTIME_NIGHTLY=$((10 * 60)) # Default fs creation options export PERF_FS_OPTS=${PERF_FS_OPTS:-'-o recsize=8k -o compress=lz4' \ ' -o checksum=sha256 -o redundant_metadata=most'} function get_sync_str { typeset sync=$1 typeset sync_str='' [[ $sync -eq 0 ]] && sync_str='async' [[ $sync -eq 1 ]] && sync_str='sync' echo $sync_str } function get_suffix { typeset threads=$1 typeset sync=$2 typeset iosize=$3 typeset sync_str=$(get_sync_str $sync) typeset filesystems=$(get_nfilesystems) typeset suffix="$sync_str.$iosize-ios" suffix="$suffix.$threads-threads.$filesystems-filesystems" echo $suffix } function do_fio_run_impl { typeset script=$1 typeset do_recreate=$2 typeset clear_cache=$3 typeset threads=$4 typeset threads_per_fs=$5 typeset sync=$6 typeset iosize=$7 typeset sync_str=$(get_sync_str $sync) log_note "Running with $threads $sync_str threads, $iosize ios" if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then log_must test $do_recreate verify_threads_per_fs $threads $threads_per_fs fi if $do_recreate; then recreate_perf_pool # # A value of zero for "threads_per_fs" is "special", and # means a single filesystem should be used, regardless # of the number of threads. # if [[ -n $threads_per_fs && $threads_per_fs -ne 0 ]]; then populate_perf_filesystems $((threads / threads_per_fs)) else populate_perf_filesystems 1 fi fi if $clear_cache; then # Clear the ARC zpool export $PERFPOOL zpool import $PERFPOOL fi if [[ -n $ZINJECT_DELAYS ]]; then apply_zinject_delays else log_note "No per-device commands to execute." fi # # Allow this to be overridden by the individual test case. This # can be used to run the FIO job against something other than # the default filesystem (e.g. against a clone). # export DIRECTORY=$(get_directory) log_note "DIRECTORY: " $DIRECTORY export RUNTIME=$PERF_RUNTIME export FILESIZE=$((TOTAL_SIZE / threads)) export NUMJOBS=$threads export SYNC_TYPE=$sync export BLOCKSIZE=$iosize sync + # When running locally, we want to keep the default behavior of + # DIRECT == 0, so only set it when we're running over NFS to + # disable client cache for reads. + if [[ $NFS -eq 1 ]]; then + export DIRECT=1 + do_setup_nfs $script + else + export DIRECT=0 + fi + # This will be part of the output filename. typeset suffix=$(get_suffix $threads $sync $iosize) # Start the data collection do_collect_scripts $suffix # Define output file typeset logbase="$(get_perf_output_dir)/$(basename \ $SUDO_COMMAND)" typeset outfile="$logbase.fio.$suffix" # Start the load - log_must fio --output $outfile $FIO_SCRIPTS/$script + if [[ $NFS -eq 1 ]]; then + log_must ssh -t $NFS_USER@$NFS_CLIENT " + fio --output /tmp/fio.out /tmp/test.fio + " + log_must scp $NFS_USER@$NFS_CLIENT:/tmp/fio.out $outfile + else + log_must fio --output $outfile $FIO_SCRIPTS/$script + fi } # # This function will run fio in a loop, according to the .fio file passed # in and a number of environment variables. The following variables can be # set before launching zfstest to override the defaults. # # PERF_RUNTIME: The time in seconds each fio invocation should run. # PERF_RUNTYPE: A human readable tag that appears in logs. The defaults are # nightly and weekly. # PERF_NTHREADS: A list of how many threads each fio invocation will use. # PERF_SYNC_TYPES: Whether to use (O_SYNC) or not. 1 is sync IO, 0 is async IO. # PERF_IOSIZES: A list of blocksizes in which each fio invocation will do IO. # PERF_COLLECT_SCRIPTS: A comma delimited list of 'command args, logfile_tag' # pairs that will be added to the scripts specified in each test. # function do_fio_run { typeset script=$1 typeset do_recreate=$2 typeset clear_cache=$3 typeset threads threads_per_fs sync iosize for threads in $PERF_NTHREADS; do for threads_per_fs in $PERF_NTHREADS_PER_FS; do for sync in $PERF_SYNC_TYPES; do for iosize in $PERF_IOSIZES; do do_fio_run_impl \ $script \ $do_recreate \ $clear_cache \ $threads \ $threads_per_fs \ $sync \ $iosize done done done done } +# This function sets NFS mount on the client and make sure all correct +# permissions are in place +# +function do_setup_nfs +{ + typeset script=$1 + zfs set sharenfs=on $TESTFS + log_must chmod -R 777 /$TESTFS + + ssh -t $NFS_USER@$NFS_CLIENT "mkdir -m 777 -p $NFS_MOUNT" + ssh -t $NFS_USER@$NFS_CLIENT "sudo -S umount $NFS_MOUNT" + log_must ssh -t $NFS_USER@$NFS_CLIENT " + sudo -S mount $NFS_OPTIONS $NFS_SERVER:/$TESTFS $NFS_MOUNT + " + # + # The variables in the fio script are only available in our current + # shell session, so we have to evaluate them here before copying + # the resulting script over to the target machine. + # + export jobnum='$jobnum' + while read line; do + eval echo "$line" + done < $FIO_SCRIPTS/$script > /tmp/test.fio + log_must sed -i -e "s%directory.*%directory=$NFS_MOUNT%" /tmp/test.fio + log_must scp /tmp/test.fio $NFS_USER@$NFS_CLIENT:/tmp + log_must rm /tmp/test.fio +} + # # This function iterates through the value pairs in $PERF_COLLECT_SCRIPTS. # The script at index N is launched in the background, with its output # redirected to a logfile containing the tag specified at index N + 1. # function do_collect_scripts { typeset suffix=$1 [[ -n $collect_scripts ]] || log_fail "No data collection scripts." [[ -n $PERF_RUNTIME ]] || log_fail "No runtime specified." # Add in user supplied scripts and logfiles, if any. typeset oIFS=$IFS IFS=',' for item in $PERF_COLLECT_SCRIPTS; do collect_scripts+=($(echo $item | sed 's/^ *//g')) done IFS=$oIFS typeset idx=0 while [[ $idx -lt "${#collect_scripts[@]}" ]]; do typeset logbase="$(get_perf_output_dir)/$(basename \ $SUDO_COMMAND)" typeset outfile="$logbase.${collect_scripts[$idx + 1]}.$suffix" timeout $PERF_RUNTIME ${collect_scripts[$idx]} >$outfile 2>&1 & ((idx += 2)) done # Need to explicitly return 0 because timeout(1) will kill # a child process and cause us to return non-zero. return 0 } # Find a place to deposit performance data collected while under load. function get_perf_output_dir { typeset dir="$(pwd)/perf_data" [[ -d $dir ]] || mkdir -p $dir echo $dir } function apply_zinject_delays { typeset idx=0 while [[ $idx -lt "${#ZINJECT_DELAYS[@]}" ]]; do [[ -n ${ZINJECT_DELAYS[$idx]} ]] || \ log_must "No zinject delay found at index: $idx" for disk in $DISKS; do log_must zinject \ -d $disk -D ${ZINJECT_DELAYS[$idx]} $PERFPOOL done ((idx += 1)) done } function clear_zinject_delays { log_must zinject -c all } # # Destroy and create the pool used for performance tests. # function recreate_perf_pool { [[ -n $PERFPOOL ]] || log_fail "The \$PERFPOOL variable isn't set." # # In case there's been some "leaked" zinject delays, or if the # performance test injected some delays itself, we clear all # delays before attempting to destroy the pool. Each delay # places a hold on the pool, so the destroy will fail if there # are any outstanding delays. # clear_zinject_delays # # This function handles the case where the pool already exists, # and will destroy the previous pool and recreate a new pool. # create_pool $PERFPOOL $DISKS } function verify_threads_per_fs { typeset threads=$1 typeset threads_per_fs=$2 log_must test -n $threads log_must test -n $threads_per_fs # # A value of "0" is treated as a "special value", and it is # interpreted to mean all threads will run using a single # filesystem. # [[ $threads_per_fs -eq 0 ]] && return # # The number of threads per filesystem must be a value greater # than or equal to zero; since we just verified the value isn't # 0 above, then it must be greater than zero here. # log_must test $threads_per_fs -ge 0 # # This restriction can be lifted later if needed, but for now, # we restrict the number of threads per filesystem to a value # that evenly divides the thread count. This way, the threads # will be evenly distributed over all the filesystems. # log_must test $((threads % threads_per_fs)) -eq 0 } function populate_perf_filesystems { typeset nfilesystems=${1:-1} export TESTFS="" for i in $(seq 1 $nfilesystems); do typeset dataset="$PERFPOOL/fs$i" create_dataset $dataset $PERF_FS_OPTS if [[ -z "$TESTFS" ]]; then TESTFS="$dataset" else TESTFS="$TESTFS $dataset" fi done } function get_nfilesystems { typeset filesystems=( $TESTFS ) echo ${#filesystems[@]} } function get_directory { typeset filesystems=( $TESTFS ) typeset directory= typeset idx=0 while [[ $idx -lt "${#filesystems[@]}" ]]; do mountpoint=$(get_prop mountpoint "${filesystems[$idx]}") if [[ -n $directory ]]; then directory=$directory:$mountpoint else directory=$mountpoint fi ((idx += 1)) done echo $directory } function get_max_arc_size { if is_linux; then typeset -l max_arc_size=`awk '$1 == "c_max" { print $3 }' \ /proc/spl/kstat/zfs/arcstats` else typeset -l max_arc_size=$(dtrace -qn 'BEGIN { printf("%u\n", `arc_stats.arcstat_c_max.value.ui64); exit(0); }') fi [[ $? -eq 0 ]] || log_fail "get_max_arc_size failed" echo $max_arc_size } function get_max_dbuf_cache_size { typeset -l max_dbuf_cache_size if is_linux; then max_dbuf_cache_size=$(get_tunable dbuf_cache_max_bytes) else max_dbuf_cache_size=$(dtrace -qn 'BEGIN { printf("%u\n", `dbuf_cache_max_bytes); exit(0); }') [[ $? -eq 0 ]] || log_fail "get_max_dbuf_cache_size failed" fi echo $max_dbuf_cache_size } # Create a file with some information about how this system is configured. function get_system_config { typeset config=$PERF_DATA_DIR/$1 echo "{" >>$config if is_linux; then echo " \"ncpus\": \"$(nproc --all)\"," >>$config echo " \"physmem\": \"$(free -b | \ awk '$1 == "Mem:" { print $2 }')\"," >>$config echo " \"c_max\": \"$(get_max_arc_size)\"," >>$config echo " \"hostname\": \"$(uname -n)\"," >>$config echo " \"kernel version\": \"$(uname -sr)\"," >>$config else dtrace -qn 'BEGIN{ printf(" \"ncpus\": %d,\n", `ncpus); printf(" \"physmem\": %u,\n", `physmem * `_pagesize); printf(" \"c_max\": %u,\n", `arc_stats.arcstat_c_max.value.ui64); printf(" \"kmem_flags\": \"0x%x\",", `kmem_flags); exit(0)}' >>$config echo " \"hostname\": \"$(uname -n)\"," >>$config echo " \"kernel version\": \"$(uname -v)\"," >>$config fi if is_linux; then lsblk -dino NAME,SIZE | awk 'BEGIN { printf(" \"disks\": {\n"); first = 1} {disk = $1} {size = $2; if (first != 1) {printf(",\n")} else {first = 0} printf(" \"%s\": \"%s\"", disk, size)} END {printf("\n },\n")}' >>$config zfs_tunables="/sys/module/zfs/parameters" printf " \"tunables\": {\n" >>$config for tunable in \ zfs_arc_max \ zfs_arc_meta_limit \ zfs_arc_sys_free \ zfs_dirty_data_max \ zfs_flags \ zfs_prefetch_disable \ zfs_txg_timeout \ zfs_vdev_aggregation_limit \ zfs_vdev_async_read_max_active \ zfs_vdev_async_write_max_active \ zfs_vdev_sync_read_max_active \ zfs_vdev_sync_write_max_active \ zio_slow_io_ms do if [ "$tunable" != "zfs_arc_max" ] then printf ",\n" >>$config fi printf " \"$tunable\": \"$(<$zfs_tunables/$tunable)\"" \ >>$config done printf "\n }\n" >>$config else iostat -En | awk 'BEGIN { printf(" \"disks\": {\n"); first = 1} /^c/ {disk = $1} /^Size: [^0]/ {size = $2; if (first != 1) {printf(",\n")} else {first = 0} printf(" \"%s\": \"%s\"", disk, size)} END {printf("\n },\n")}' >>$config sed -n 's/^set \(.*\)[ ]=[ ]\(.*\)/\1=\2/p' /etc/system | \ awk -F= 'BEGIN {printf(" \"system\": {\n"); first = 1} {if (first != 1) {printf(",\n")} else {first = 0}; printf(" \"%s\": %s", $1, $2)} END {printf("\n }\n")}' >>$config fi echo "}" >>$config } function num_jobs_by_cpu { if is_linux; then typeset ncpu=$($NPROC --all) else typeset ncpu=$(psrinfo | $WC -l) fi typeset num_jobs=$ncpu [[ $ncpu -gt 8 ]] && num_jobs=$(echo "$ncpu * 3 / 4" | bc) echo $num_jobs } # # On illumos this looks like: ":sd3:sd4:sd1:sd2:" # function pool_to_lun_list { typeset pool=$1 typeset ctd ctds devname lun typeset lun_list=':' if is_linux; then ctds=$(zpool list -HLv $pool | \ awk '/sd[a-z]*|loop[0-9]*|dm-[0-9]*/ {print $1}') for ctd in $ctds; do lun_list="$lun_list$ctd:" done else ctds=$(zpool list -v $pool | awk '/c[0-9]*t[0-9a-fA-F]*d[0-9]*/ {print $1}') for ctd in $ctds; do # Get the device name as it appears in /etc/path_to_inst devname=$(readlink -f /dev/dsk/${ctd}s0 | sed -n \ 's/\/devices\([^:]*\):.*/\1/p') # Add a string composed of the driver name and instance # number to the list for comparison with dev_statname. lun=$(sed 's/"//g' /etc/path_to_inst | grep \ $devname | awk '{print $3$2}') un_list="$lun_list$lun:" done fi echo $lun_list } # Create a perf_data directory to hold performance statistics and # configuration information. export PERF_DATA_DIR=$(get_perf_output_dir) [[ -f $PERF_DATA_DIR/config.json ]] || get_system_config config.json diff --git a/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh b/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh index 0f3256351c48..e0b253200954 100755 --- a/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh +++ b/tests/zfs-tests/tests/perf/regression/random_writes_zil.ksh @@ -1,90 +1,94 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2015, 2016 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/perf/perf.shlib function cleanup { # kill fio and iostat pkill fio pkill iostat # # We're using many filesystems depending on the number of # threads for each test, and there's no good way to get a list # of all the filesystems that should be destroyed on cleanup # (i.e. the list of filesystems used for the last test ran). # Thus, we simply recreate the pool as a way to destroy all # filesystems and leave a fresh pool behind. # recreate_perf_pool } trap "log_fail \"Measure IO stats during random write load\"" SIGTERM log_onexit cleanup recreate_perf_pool # Aim to fill the pool to 50% capacity while accounting for a 3x compressratio. export TOTAL_SIZE=$(($(get_prop avail $PERFPOOL) * 3 / 2)) if [[ -n $PERF_REGRESSION_WEEKLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_WEEKLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'weekly'} export PERF_NTHREADS=${PERF_NTHREADS:-'1 2 4 8 16 32 64 128'} export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} elif [[ -n $PERF_REGRESSION_NIGHTLY ]]; then export PERF_RUNTIME=${PERF_RUNTIME:-$PERF_RUNTIME_NIGHTLY} export PERF_RUNTYPE=${PERF_RUNTYPE:-'nightly'} export PERF_NTHREADS=${PERF_NTHREADS:-'1 4 16 64'} export PERF_NTHREADS_PER_FS=${PERF_NTHREADS_PER_FS:-'0 1'} export PERF_SYNC_TYPES=${PERF_SYNC_TYPES:-'1'} export PERF_IOSIZES=${PERF_IOSIZES:-'8k'} fi +# Until the performance tests over NFS can deal with multiple file systems, +# force the use of only one file system when testing over NFS. +[[ $NFS -eq 1 ]] && PERF_NTHREADS_PER_FS='0' + lun_list=$(pool_to_lun_list $PERFPOOL) log_note "Collecting backend IO stats with lun list $lun_list" if is_linux; then typeset perf_record_cmd="perf record -F 99 -a -g -q \ -o /dev/stdout -- sleep ${PERF_RUNTIME}" export collect_scripts=( "zpool iostat -lpvyL $PERFPOOL 1" "zpool.iostat" "vmstat -t 1" "vmstat" "mpstat -P ALL 1" "mpstat" "iostat -tdxyz 1" "iostat" "$perf_record_cmd" "perf" ) else export collect_scripts=( "kstat zfs:0 1" "kstat" "vmstat -T d 1" "vmstat" "mpstat -T d 1" "mpstat" "iostat -T d -xcnz 1" "iostat" "dtrace -Cs $PERF_SCRIPTS/io.d $PERFPOOL $lun_list 1" "io" "dtrace -s $PERF_SCRIPTS/zil.d $PERFPOOL 1" "zil" "dtrace -s $PERF_SCRIPTS/profile.d" "profile" "dtrace -s $PERF_SCRIPTS/offcpu-profile.d" "offcpu-profile" ) fi log_note "ZIL specific random write workload with $PERF_RUNTYPE settings" do_fio_run random_writes.fio true false log_pass "Measure IO stats during ZIL specific random write workload"