diff --git a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh index e51cf179d8ef..552a27e98102 100755 --- a/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh +++ b/tests/zfs-tests/tests/functional/arc/dbufstats_001_pos.ksh @@ -1,89 +1,84 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2017, Lawrence Livermore National Security, LLC. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/include/math.shlib # # DESCRIPTION: -# Ensure stats presented in /proc/spl/kstat/zfs/dbufstats are correct -# based on /proc/spl/kstat/zfs/dbufs. +# Ensure stats presented in the dbufstats kstat are correct based on the +# dbufs kstat. # # STRATEGY: # 1. Generate a file with random data in it # 2. Store output from dbufs kstat # 3. Store output from dbufstats kstat # 4. Compare stats presented in dbufstats with stat generated using # dbufstat and the dbufs kstat output # DBUFSTATS_FILE=$(mktemp $TEST_BASE_DIR/dbufstats.out.XXXXXX) DBUFS_FILE=$(mktemp $TEST_BASE_DIR/dbufs.out.XXXXXX) function cleanup { log_must rm -f $TESTDIR/file $DBUFS_FILE $DBUFSTATS_FILE } function testdbufstat # stat_name dbufstat_filter { name=$1 filter="" [[ -n "$2" ]] && filter="-F $2" - if is_linux; then - read -r _ _ from_dbufstat _ < <(grep -w "$name" "$DBUFSTATS_FILE") - else - from_dbufstat=$(awk "/dbufstats\.$name:/ { print \$2 }" \ - "$DBUFSTATS_FILE") - fi + from_dbufstat=$(grep "^$name " "$DBUFSTATS_FILE" | cut -f2 -d' ') from_dbufs=$(dbufstat -bxn -i "$DBUFS_FILE" "$filter" | wc -l) within_tolerance $from_dbufstat $from_dbufs 15 \ || log_fail "Stat $name exceeded tolerance" } verify_runnable "both" log_assert "dbufstats produces correct statistics" log_onexit cleanup log_must file_write -o create -f "$TESTDIR/file" -b 1048576 -c 20 -d R sync_all_pools log_must eval "kstat dbufs > $DBUFS_FILE" -log_must eval "kstat dbufstats '' > $DBUFSTATS_FILE" +log_must eval "kstat -g dbufstats > $DBUFSTATS_FILE" for level in {0..11}; do testdbufstat "cache_level_$level" "dbc=1,level=$level" done testdbufstat "cache_count" "dbc=1" testdbufstat "hash_elements" "" log_pass "dbufstats produces correct statistics passed" diff --git a/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh index fdda9ba22638..9d147f382042 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zdb/zdb_objset_id.ksh @@ -1,111 +1,106 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2020 by Datto, Inc. All rights reserved. # . $STF_SUITE/include/libtest.shlib # # Description: # zdb -d pool/ will display the dataset # # Strategy: # 1. Create a pool # 2. Write some data to a file # 3. Get the inode number (object number) of the file # 4. Run zdb -d to get the objset ID of the dataset # 5. Run zdb -dddddd pool/objsetID objectID (decimal) # 6. Confirm names # 7. Run zdb -dddddd pool/objsetID objectID (hex) # 8. Confirm names # 9. Repeat with zdb -NNNNNN pool/objsetID objectID -# 10. Obtain objsetID from /proc/spl/kstat/zfs/testpool/obset-0x -# (linux only) +# 10. Obtain dataset name from testpool.objset-0x.dataset_name kstat # 11. Run zdb -dddddd pool/objsetID (hex) -# 12. Match name from zdb against proc entry +# 12. Match name from zdb against kstat # 13. Create dataset with hex numeric name # 14. Create dataset with decimal numeric name # 15. zdb -d for numeric datasets succeeds # 16. zdb -N for numeric datasets fails # 17. zdb -dN for numeric datasets fails # function cleanup { datasetexists $TESTPOOL && destroy_pool $TESTPOOL } log_assert "Verify zdb -d / generates the correct names." log_onexit cleanup init_data=$TESTDIR/file1 write_count=8 blksize=131072 verify_runnable "global" verify_disk_count "$DISKS" 2 hex_ds=$TESTPOOL/0x400000 num_ds=$TESTPOOL/100000 default_mirror_setup_noexit $DISKS file_write -o create -w -f $init_data -b $blksize -c $write_count # get object number of file listing=$(ls -i $init_data) set -A array $listing obj=${array[0]} log_note "file $init_data has object number $obj" sync_pool $TESTPOOL IFS=", " read -r _ _ _ _ objset_id _ < <(zdb -d $TESTPOOL/$TESTFS) -objset_hex=$(printf "0x%X" $objset_id) +objset_hex=$(printf "0x%x" $objset_id) log_note "objset $TESTPOOL/$TESTFS has objset ID $objset_id ($objset_hex)" for id in "$objset_id" "$objset_hex" do log_note "zdb -dddddd $TESTPOOL/$id $obj" output=$(zdb -dddddd $TESTPOOL/$id $obj) echo $output | grep -q "$TESTPOOL/$TESTFS" || log_fail "zdb -dddddd $TESTPOOL/$id $obj failed ($TESTPOOL/$TESTFS not in zdb output)" echo $output | grep -q "file1" || log_fail "zdb -dddddd $TESTPOOL/$id $obj failed (file1 not in zdb output)" obj=$(printf "0x%X" $obj) log_note "zdb -NNNNNN $TESTPOOL/$id $obj" output=$(zdb -NNNNNN $TESTPOOL/$id $obj) echo $output | grep -q "$TESTPOOL/$TESTFS" || log_fail "zdb -NNNNNN $TESTPOOL/$id $obj failed ($TESTPOOL/$TESTFS not in zdb output)" echo $output | grep -q "file1" || log_fail "zdb -NNNNNN $TESTPOOL/$id $obj failed (file1 not in zdb output)" done -if is_linux; then - output=$(ls -1 /proc/spl/kstat/zfs/$TESTPOOL | grep objset- | tail -1) - objset_hex=${output#*-} - name_from_proc=$(grep dataset_name /proc/spl/kstat/zfs/$TESTPOOL/$output | cut -d' ' -f3) - log_note "checking zdb output for $name_from_proc" - log_must eval "zdb -dddddd $TESTPOOL/$objset_hex | grep -q \"$name_from_proc\"" -fi +name_from_proc=$(kstat_dataset -N $TESTPOOL/$objset_id dataset_name) +log_note "checking zdb output for $name_from_proc" +log_must eval "zdb -dddddd $TESTPOOL/$objset_hex | grep -q \"$name_from_proc\"" log_must zfs create $hex_ds log_must zfs create $num_ds log_must eval "zdb -d $hex_ds | grep -q \"$hex_ds\"" log_must eval "zdb -d $num_ds | grep -q \"$num_ds\"" # force numeric interpretation, expect fail log_mustnot zdb -N $hex_ds log_mustnot zdb -N $num_ds log_mustnot zdb -Nd $hex_ds log_mustnot zdb -Nd $num_ds log_pass "zdb -d / generates the correct names." diff --git a/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh b/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh index f1561b7282e5..5c165523fefd 100755 --- a/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh +++ b/tests/zfs-tests/tests/functional/deadman/deadman_sync.ksh @@ -1,90 +1,86 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2017 by Lawrence Livermore National Security, LLC. # Use is subject to license terms. # # DESCRIPTION: # Verify spa deadman detects a hung txg # # STRATEGY: # 1. Reduce the zfs_deadman_synctime_ms to 5s. # 2. Reduce the zfs_deadman_checktime_ms to 1s. # 3. Inject a 10s zio delay to force long IOs. # 4. Write enough data to force a long txg sync time due to the delay. # 5. Verify a "deadman" event is posted. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/deadman/deadman.cfg verify_runnable "both" function cleanup { log_must zinject -c all default_cleanup_noexit log_must set_tunable64 DEADMAN_SYNCTIME_MS $SYNCTIME_DEFAULT log_must set_tunable64 DEADMAN_CHECKTIME_MS $CHECKTIME_DEFAULT log_must set_tunable64 DEADMAN_FAILMODE $FAILMODE_DEFAULT } log_assert "Verify spa deadman detects a hung txg" log_onexit cleanup log_must set_tunable64 DEADMAN_SYNCTIME_MS 5000 log_must set_tunable64 DEADMAN_CHECKTIME_MS 1000 log_must set_tunable64 DEADMAN_FAILMODE "wait" # Create a new pool in order to use the updated deadman settings. default_setup_noexit $DISK1 log_must zpool events -c # Force each IO to take 10s but allow them to run concurrently. log_must zinject -d $DISK1 -D10000:10 $TESTPOOL mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS) log_must file_write -b 1048576 -c 8 -o create -d 0 -f $mntpnt/file sleep 10 log_must zinject -c all sync_all_pools # Log txg sync times for reference and the zpool event summary. -if is_freebsd; then - log_must sysctl -n kstat.zfs.$TESTPOOL.txgs -else - log_must cat /proc/spl/kstat/zfs/$TESTPOOL/txgs -fi +log_must kstat_pool $TESTPOOL txgs log_must zpool events # Verify at least 3 deadman events were logged. The first after 5 seconds, # and another each second thereafter until the delay is clearer. events=$(zpool events | grep -c ereport.fs.zfs.deadman) if [ "$events" -lt 3 ]; then log_fail "Expect >=3 deadman events, $events found" fi log_pass "Verify spa deadman detected a hung txg and $events deadman events" diff --git a/tests/zfs-tests/tests/functional/direct/dio.kshlib b/tests/zfs-tests/tests/functional/direct/dio.kshlib index 5b3f893e1ce1..49c43a0aaca3 100644 --- a/tests/zfs-tests/tests/functional/direct/dio.kshlib +++ b/tests/zfs-tests/tests/functional/direct/dio.kshlib @@ -1,333 +1,290 @@ # # CDDL HEADER START # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # CDDL HEADER END # # # Copyright (c) 2021 by Lawrence Livermore National Security, LLC. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/direct/dio.cfg function dio_cleanup { if poolexists $TESTPOOL1; then destroy_pool $TESTPOOL1 fi rm -f $DIO_VDEVS } # # Generate an IO workload using fio and then verify the resulting data. # function dio_and_verify # mode file-size block-size directory ioengine extra-args { typeset mode=$1 typeset size=$2 typeset bs=$3 typeset mntpnt=$4 typeset ioengine=$5 typeset extra_args=$6 # Invoke an fio workload via Direct I/O and verify with Direct I/O. log_must fio --directory=$mntpnt --name=direct-$mode \ --rw=$mode --size=$size --bs=$bs --direct=1 --numjobs=1 \ --verify=sha1 --ioengine=$ioengine --fallocate=none \ --group_reporting --minimal --do_verify=1 $extra_args # Now just read back the file without Direct I/O into the ARC as an # additional verfication step. log_must fio --directory=$mntpnt --name=direct-$mode \ --rw=read --size=$size --bs=$bs --direct=0 --numjobs=1 \ --ioengine=$ioengine --group_reporting --minimal log_must rm -f "$mntpnt/direct-*" } # # Get zpool status -d checksum verify failures # function get_zpool_status_chksum_verify_failures # pool_name vdev_type { typeset pool=$1 typeset vdev_type=$2 if [[ "$vdev_type" == "stripe" ]]; then val=$(zpool status -dp $pool | \ awk '{s+=$6} END {print s}' ) elif [[ "$vdev_type" == "mirror" || "$vdev_type" == "raidz" || "$vdev_type" == "draid" ]]; then val=$(zpool status -dp $pool | \ awk -v d="$vdev_type" '$0 ~ d {print $6}' ) else log_fail "Unsupported VDEV type in \ get_zpool_status_chksum_verify_failures(): $vdev_type" fi echo "$val" } # # Get ZED dio_verify events # function get_zed_dio_verify_events # pool { typeset pool=$1 typeset op=$2 val=$(zpool events $pool | grep -c "dio_verify_${op}") echo "$val" } # # Checking for checksum verify write failures with: # zpool status -d # zpool events # After getting that counts will clear the out the ZPool errors and events # function check_dio_chksum_verify_failures # pool vdev_type op expect_errors { typeset pool=$1 typeset vdev_type=$2 typeset expect_errors=$3 typeset op=$4 typeset note_str="expecting none" if [[ $expect_errors -ne 0 ]]; then note_str="expecting some" fi log_note "Checking for Direct I/O write checksum verify errors \ $note_str on ZPool: $pool with $vdev_type" status_failures=$(get_zpool_status_chksum_verify_failures $pool $vdev_type) zed_dio_verify_events=$(get_zed_dio_verify_events $pool $op) if [[ $expect_errors -ne 0 ]]; then if [[ $status_failures -eq 0 || $zed_dio_verify_events -eq 0 ]]; then zpool status -dp $pool zpool events $pool log_fail "Checksum verifies in zpool status -d \ $status_failures. ZED dio_verify events \ $zed_dio_verify_events. Neither should be 0." fi else if [[ $status_failures -ne 0 || $zed_dio_verify_events -ne 0 ]]; then zpool status -dp $pool zpool events $pool log_fail "Checksum verifies in zpool status -d \ $status_failures. ZED dio_verify events \ $zed_dio_verify_events. Both should be zero." fi fi log_must zpool clear $pool log_must zpool events -c } -# -# Get the value of a counter from -# Linux: /proc/spl/kstat/zfs/$pool/iostats file. -# FreeBSD: kstat.zfs.$pool.msic.iostats.$stat -# -function get_iostats_stat # pool stat -{ - typeset pool=$1 - typeset stat=$2 - - if is_linux; then - iostats_file=/proc/spl/kstat/zfs/$pool/iostats - val=$(grep -m1 "$stat" $iostats_file | awk '{ print $3 }') - else - val=$(sysctl -n kstat.zfs.$pool.misc.iostats.$stat) - fi - if [[ -z "$val" ]]; then - log_fail "Unable to read $stat counter" - fi - - echo "$val" -} - # # Evict any buffered blocks by overwritting them using an O_DIRECT request. # function evict_blocks { typeset pool=$1 typeset file=$2 typeset size=$3 log_must stride_dd -i /dev/urandom -o $file -b $size -c 1 -D } # # Perform FIO Direct I/O writes to a file with the given arguments. # Then verify thae minimum expected number of blocks were written as # Direct I/O. # function verify_dio_write_count #pool bs size mnpnt { typeset pool=$1 typeset bs=$2 typeset size=$3 typeset mntpnt=$4 typeset dio_wr_expected=$(((size / bs) -1)) log_note "Checking for $dio_wr_expected Direct I/O writes" - prev_dio_wr=$(get_iostats_stat $pool direct_write_count) + prev_dio_wr=$(kstat_pool $pool iostats.direct_write_count) dio_and_verify write $size $bs $mntpnt "sync" - curr_dio_wr=$(get_iostats_stat $pool direct_write_count) + curr_dio_wr=$(kstat_pool $pool iostats.direct_write_count) dio_wr_actual=$((curr_dio_wr - prev_dio_wr)) if [[ $dio_wr_actual -lt $dio_wr_expected ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Direct writes $dio_wr_actual of $dio_wr_expected" fi } # # Perform a stride_dd write command to the file with the given arguments. # Then verify the minimum expected number of blocks were written as either # buffered IO (by the ARC), or Direct I/O to the application (dd). # function check_write # pool file bs count seek flags buf_wr dio_wr { typeset pool=$1 typeset file=$2 typeset bs=$3 typeset count=$4 typeset seek=$5 typeset flags=$6 typeset buf_wr_expect=$7 typeset dio_wr_expect=$8 log_note "Checking $count * $bs write(s) at offset $seek, $flags" - prev_buf_wr=$(get_iostats_stat $pool arc_write_count) - prev_dio_wr=$(get_iostats_stat $pool direct_write_count) + prev_buf_wr=$(kstat_pool $pool iostats.arc_write_count) + prev_dio_wr=$(kstat_pool $pool iostats.direct_write_count) log_must stride_dd -i /dev/urandom -o $file -b $bs -c $count \ -k $seek $flags - curr_buf_wr=$(get_iostats_stat $pool arc_write_count) + curr_buf_wr=$(kstat_pool $pool iostats.arc_write_count) buf_wr_actual=$((curr_buf_wr - prev_buf_wr)) - curr_dio_wr=$(get_iostats_stat $pool direct_write_count) + curr_dio_wr=$(kstat_pool $pool iostats.direct_write_count) dio_wr_actual=$((curr_dio_wr - prev_dio_wr)) if [[ $buf_wr_actual -lt $buf_wr_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Buffered writes $buf_wr_actual of $buf_wr_expect" fi if [[ $dio_wr_actual -lt $dio_wr_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Direct writes $dio_wr_actual of $dio_wr_expect" fi } # # Perform a stride_dd read command to the file with the given arguments. # Then verify the minimum expected number of blocks were read as either # buffered IO (by the ARC), or Direct I/O to the application (dd). # function check_read # pool file bs count skip flags buf_rd dio_rd { typeset pool=$1 typeset file=$2 typeset bs=$3 typeset count=$4 typeset skip=$5 typeset flags=$6 typeset buf_rd_expect=$7 typeset dio_rd_expect=$8 log_note "Checking $count * $bs read(s) at offset $skip, $flags" - prev_buf_rd=$(get_iostats_stat $pool arc_read_count) - prev_dio_rd=$(get_iostats_stat $pool direct_read_count) + prev_buf_rd=$(kstat_pool $pool iostats.arc_read_count) + prev_dio_rd=$(kstat_pool $pool iostats.direct_read_count) log_must stride_dd -i $file -o /dev/null -b $bs -c $count \ -p $skip $flags - curr_buf_rd=$(get_iostats_stat $pool arc_read_count) + curr_buf_rd=$(kstat_pool $pool iostats.arc_read_count) buf_rd_actual=$((curr_buf_rd - prev_buf_rd)) - curr_dio_rd=$(get_iostats_stat $pool direct_read_count) + curr_dio_rd=$(kstat_pool $pool iostats.direct_read_count) dio_rd_actual=$((curr_dio_rd - prev_dio_rd)) if [[ $buf_rd_actual -lt $buf_rd_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Buffered reads $buf_rd_actual of $buf_rd_expect" fi if [[ $dio_rd_actual -lt $dio_rd_expect ]]; then - if is_linux; then - cat /proc/spl/kstat/zfs/$pool/iostats - else - sysctl kstat.zfs.$pool.misc.iostats - fi + kstat_pool -g $pool iostats log_fail "Direct reads $dio_rd_actual of $dio_rd_expect" fi } function get_file_size { typeset filename="$1" if is_linux; then filesize=$(stat -c %s $filename) else filesize=$(stat -s $filename | awk '{print $8}' | grep -o '[0-9]\+') fi echo $filesize } function do_truncate_reduce { typeset filename=$1 typeset size=$2 filesize=$(get_file_size $filename) eval "echo original filesize: $filesize" if is_linux; then truncate $filename -s $((filesize - size)) else truncate -s -$size $filename fi filesize=$(get_file_size $filename) eval "echo new filesize after truncate: $filesize" } diff --git a/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh b/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh index 456d429b1d99..67e0b4a7c700 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_read_verify.ksh @@ -1,107 +1,107 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2024 by Triad National Security, LLC. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/direct/dio.cfg . $STF_SUITE/tests/functional/direct/dio.kshlib # # DESCRIPTION: # Verify checksum verify works for Direct I/O reads. # # STRATEGY: # 1. Create a zpool from each vdev type. # 2. Start a Direct I/O read workload while manipulating the user buffer # contents. # 3. Verify there are Direct I/O read verify failures using # zpool status -d and checking for zevents. We also make sure there # are reported no data errors. # verify_runnable "global" log_assert "Verify checksum verify works for Direct I/O reads." log_onexit dio_cleanup NUMBLOCKS=300 BS=$((128 * 1024)) # 128k log_must truncate -s $MINVDEVSIZE $DIO_VDEVS # We will verify that there are no checksum errors for every Direct I/O read # while manipulating the buffer contents while the I/O is still in flight and # also that Direct I/O checksum verify failures and dio_verify_rd zevents are # reported. for type in "" "mirror" "raidz" "draid"; do typeset vdev_type=$type if [[ "${vdev_type}" == "" ]]; then vdev_type="stripe" fi log_note "Verifying every Direct I/O read verify with VDEV type \ ${vdev_type}" create_pool $TESTPOOL1 $type $DIO_VDEVS log_must eval "zfs create -o recordsize=128k -o compression=off \ $TESTPOOL1/$TESTFS1" mntpnt=$(get_prop mountpoint $TESTPOOL1/$TESTFS1) - prev_dio_rd=$(get_iostats_stat $TESTPOOL1 direct_read_count) - prev_arc_rd=$(get_iostats_stat $TESTPOOL1 arc_read_count) + prev_dio_rd=$(kstat_pool $TESTPOOL1 iostats.direct_read_count) + prev_arc_rd=$(kstat_pool $TESTPOOL1 iostats.arc_read_count) # Create the file before trying to manipulate the contents log_must stride_dd -o "$mntpnt/direct-write.iso" -i /dev/urandom \ -b $BS -c $NUMBLOCKS -D # Manipulate the buffer contents will reading the file with Direct I/O log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \ -n $NUMBLOCKS -b $BS -r # Getting new Direct I/O and ARC Write counts. - curr_dio_rd=$(get_iostats_stat $TESTPOOL1 direct_read_count) - curr_arc_rd=$(get_iostats_stat $TESTPOOL1 arc_read_count) + curr_dio_rd=$(kstat_pool $TESTPOOL1 iostats.direct_read_count) + curr_arc_rd=$(kstat_pool $TESTPOOL1 iostats.arc_read_count) total_dio_rd=$((curr_dio_rd - prev_dio_rd)) total_arc_rd=$((curr_arc_rd - prev_arc_rd)) log_note "Making sure there are no checksum errors with the ZPool" log_must check_pool_status $TESTPOOL "errors" "No known data errors" log_note "Making sure we have Direct I/O and ARC reads logged" if [[ $total_dio_rd -lt 1 ]]; then log_fail "No Direct I/O reads $total_dio_rd" fi if [[ $total_arc_rd -lt 1 ]]; then log_fail "No ARC reads $total_arc_rd" fi log_note "Making sure we have Direct I/O write checksum verifies with ZPool" check_dio_chksum_verify_failures "$TESTPOOL1" "$vdev_type" 1 "rd" destroy_pool $TESTPOOL1 done log_pass "Verified checksum verify works for Direct I/O reads." diff --git a/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh b/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh index 8bb363f1a983..6e2982ad7d46 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_unaligned_filesize.ksh @@ -1,91 +1,91 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2022 by Triad National Security, LLC. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/direct/dio.cfg . $STF_SUITE/tests/functional/direct/dio.kshlib # # DESCRIPTION: # Verify Direct I/O reads can read an entire file that is not # page-aligned in length. When a file is not page-aligned in total # length, as much that can be read using using O_DIRECT is done so and # the rest is read using the ARC. O_DIRECT requires page-size alignment. # # STRATEGY: # 1. Write a file that is page-aligned (buffered) # 2. Truncate the file to be 512 bytes less # 3. Export then import the Zpool flushing out the ARC # 4. Read back the file using O_DIRECT # 5. Verify the file is read back with both Direct I/O and buffered I/O # verify_runnable "global" function cleanup { log_must rm -f "$filename" log_must set recordsize=$rs $TESTPOOL/$TESTFS } log_assert "Verify Direct I/O reads can read an entire file that is not \ page-aligned" log_onexit cleanup mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS) rs=$(get_prop recordsize $TESTPOOL/$TESTFS) log_must zfs set recordsize=128k $TESTPOOL/$TESTFS bs=$((128 * 1024)) # bs=recordsize (128k) filename="$mntpnt/testfile.iso" log_must stride_dd -i /dev/urandom -o $filename -b $bs -c 2 # Truncating file so the total length is no longer page-size aligned log_must do_truncate_reduce $filename 512 # Exporting the Zpool to make sure all future reads happen from the ARC log_must zpool export $TESTPOOL log_must zpool import $TESTPOOL # Reading the file back using Direct I/O -prev_dio_read=$(get_iostats_stat $TESTPOOL direct_read_count) -prev_arc_read=$(get_iostats_stat $TESTPOOL arc_read_count) +prev_dio_read=$(kstat_pool $TESTPOOL iostats.direct_read_count) +prev_arc_read=$(kstat_pool $TESTPOOL iostats.arc_read_count) log_must stride_dd -i $filename -o /dev/null -b $bs -e -d -curr_dio_read=$(get_iostats_stat $TESTPOOL direct_read_count) -curr_arc_read=$(get_iostats_stat $TESTPOOL arc_read_count) +curr_dio_read=$(kstat_pool $TESTPOOL iostats.direct_read_count) +curr_arc_read=$(kstat_pool $TESTPOOL iostats.arc_read_count) total_dio_read=$((curr_dio_read - prev_dio_read)) total_arc_read=$((curr_arc_read - prev_arc_read)) # We should see both Direct I/O reads an ARC read to read the entire file that # is not page-size aligned if [[ $total_dio_read -lt 2 ]] || [[ $total_arc_read -lt 1 ]]; then log_fail "Expect 2 reads from Direct I/O and 1 from the ARC but \ Direct I/O: $total_dio_read ARC: $total_arc_read" fi log_pass "Verified Direct I/O read can read a none page-aligned length file" diff --git a/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh b/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh index ccdabc678a68..3d7f7089d7c8 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_write_stable_pages.ksh @@ -1,103 +1,103 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2022 by Triad National Security, LLC. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/direct/dio.cfg . $STF_SUITE/tests/functional/direct/dio.kshlib # # DESCRIPTION: # Verify stable pages work for O_DIRECT writes. # # STRATEGY: # 1. Start a Direct I/O write workload while manipulating the user # buffer. # 2. Verify we can Read the contents of the file using buffered reads. # 3. Verify there is no checksum errors reported from zpool status. # 4. Repeat steps 1 and 2 for 3 iterations. # 5. Repeat 1-3 but with compression disabled. # verify_runnable "global" function cleanup { log_must rm -f "$mntpnt/direct-write.iso" check_dio_chksum_verify_failures $TESTPOOL "raidz" 0 "wr" } log_assert "Verify stable pages work for Direct I/O writes." if is_linux; then log_unsupported "Linux does not support stable pages for O_DIRECT \ writes" fi log_onexit cleanup ITERATIONS=3 NUMBLOCKS=300 BS=$((128 * 1024)) #128k mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS) log_must zfs set recordsize=128k $TESTPOOL/$TESTFS for compress in "on" "off"; do log_must zfs set compression=$compress $TESTPOOL/$TESTFS for i in $(seq 1 $ITERATIONS); do log_note "Verifying stable pages for Direct I/O writes \ iteration $i of $ITERATIONS" - prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) # Manipulate the user's buffer while running O_DIRECT write # workload with the buffer. log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \ -n $NUMBLOCKS -b $BS -w # Reading back the contents of the file log_must stride_dd -i $mntpnt/direct-write.iso -o /dev/null \ -b $BS -c $NUMBLOCKS - curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) log_note "Making sure we have Direct I/O writes logged" if [[ $total_dio_wr -lt 1 ]]; then log_fail "No Direct I/O writes $total_dio_wr" fi # Making sure there are no data errors for the zpool log_note "Making sure there are no checksum errors with the ZPool" log_must check_pool_status $TESTPOOL "errors" \ "No known data errors" log_must rm -f "$mntpnt/direct-write.iso" done done log_pass "Verified stable pages work for Direct I/O writes." diff --git a/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh b/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh index 4eb9efe95ef1..1c1565cbbefb 100755 --- a/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh +++ b/tests/zfs-tests/tests/functional/direct/dio_write_verify.ksh @@ -1,196 +1,196 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2022 by Triad National Security, LLC. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/direct/dio.cfg . $STF_SUITE/tests/functional/direct/dio.kshlib # # DESCRIPTION: # Verify checksum verify works for Direct I/O writes. # # STRATEGY: # 1. Set the module parameter zfs_vdev_direct_write_verify to 0. # 2. Check that manipulating the user buffer while Direct I/O writes are # taking place does not cause any panics with compression turned on. # 3. Start a Direct I/O write workload while manipulating the user buffer # without compression. # 4. Verify there are Direct I/O write verify failures using # zpool status -d and checking for zevents. We also make sure there # are reported data errors when reading the file back. # 5. Repeat steps 3 and 4 for 3 iterations. # 6. Set zfs_vdev_direct_write_verify set to 1 and repeat 3. # 7. Verify there are Direct I/O write verify failures using # zpool status -d and checking for zevents. We also make sure there # there are no reported data errors when reading the file back because # with us checking every Direct I/O write and on checksum validation # failure those writes will not be committed to a VDEV. # verify_runnable "global" function cleanup { # Clearing out DIO counts for Zpool log_must zpool clear $TESTPOOL # Clearing out dio_verify from event logs log_must zpool events -c log_must set_tunable32 VDEV_DIRECT_WR_VERIFY $DIO_WR_VERIFY_TUNABLE } log_assert "Verify checksum verify works for Direct I/O writes." if is_freebsd; then log_unsupported "FreeBSD is capable of stable pages for O_DIRECT writes" fi log_onexit cleanup ITERATIONS=3 NUMBLOCKS=300 BS=$((128 * 1024)) # 128k mntpnt=$(get_prop mountpoint $TESTPOOL/$TESTFS) typeset DIO_WR_VERIFY_TUNABLE=$(get_tunable VDEV_DIRECT_WR_VERIFY) # Get a list of vdevs in our pool set -A array $(get_disklist_fullpath $TESTPOOL) # Get the first vdev firstvdev=${array[0]} log_must zfs set recordsize=128k $TESTPOOL/$TESTFS log_must set_tunable32 VDEV_DIRECT_WR_VERIFY 0 # First we will verify there are no panics while manipulating the contents of # the user buffer during Direct I/O writes with compression. The contents # will always be copied out of the ABD and there should never be any ABD ASSERT # failures log_note "Verifying no panics for Direct I/O writes with compression" log_must zfs set compression=on $TESTPOOL/$TESTFS -prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) +prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" -n $NUMBLOCKS \ -b $BS -w -curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) +curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) log_note "Making sure we have Direct I/O writes logged" if [[ $total_dio_wr -lt 1 ]]; then log_fail "No Direct I/O writes $total_dio_wr" fi # Clearing out DIO counts for Zpool log_must zpool clear $TESTPOOL # Clearing out dio_verify from event logs log_must zpool events -c log_must rm -f "$mntpnt/direct-write.iso" # Next we will verify there are checksum errors for Direct I/O writes while # manipulating the contents of the user pages. log_must zfs set compression=off $TESTPOOL/$TESTFS for i in $(seq 1 $ITERATIONS); do log_note "Verifying Direct I/O write checksums iteration \ $i of $ITERATIONS with zfs_vdev_direct_write_verify=0" - prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \ -n $NUMBLOCKS -b $BS -w # Reading file back to verify checksum errors filesize=$(get_file_size "$mntpnt/direct-write.iso") num_blocks=$((filesize / BS)) log_mustnot stride_dd -i "$mntpnt/direct-write.iso" -o /dev/null -b $BS \ -c $num_blocks # Getting new Direct I/O and ARC write counts. - curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) # Verifying there are checksum errors log_note "Making sure there are checksum errors for the ZPool" cksum=$(zpool status -P -v $TESTPOOL | awk -v v="$firstvdev" '$0 ~ v \ {print $5}') if [[ $cksum -eq 0 ]]; then zpool status -P -v $TESTPOOL log_fail "No checksum failures for ZPool $TESTPOOL" fi log_note "Making sure we have Direct I/O writes logged" if [[ $total_dio_wr -lt 1 ]]; then log_fail "No Direct I/O writes $total_dio_wr" fi log_note "Making sure we have no Direct I/O write checksum verifies \ with ZPool" check_dio_chksum_verify_failures $TESTPOOL "raidz" 0 "wr" log_must rm -f "$mntpnt/direct-write.iso" done log_must zpool status -v $TESTPOOL log_must zpool sync $TESTPOOL # Finally we will verfiy that with checking every Direct I/O write we have no # errors at all. # Create the file before trying to manipulate the contents log_must file_write -o create -f "$mntpnt/direct-write.iso" -b $BS \ -c $NUMBLOCKS -w log_must set_tunable32 VDEV_DIRECT_WR_VERIFY 1 for i in $(seq 1 $ITERATIONS); do log_note "Verifying every Direct I/O write checksums iteration $i of \ $ITERATIONS with zfs_vdev_direct_write_verify=1" - prev_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + prev_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) log_must manipulate_user_buffer -f "$mntpnt/direct-write.iso" \ -n $NUMBLOCKS -b $BS -e -w # Reading file back to verify there no are checksum errors filesize=$(get_file_size "$mntpnt/direct-write.iso") num_blocks=$((filesize / BS)) log_must stride_dd -i "$mntpnt/direct-write.iso" -o /dev/null -b $BS \ -c $num_blocks # Getting new Direct I/O write counts. - curr_dio_wr=$(get_iostats_stat $TESTPOOL direct_write_count) + curr_dio_wr=$(kstat_pool $TESTPOOL iostats.direct_write_count) total_dio_wr=$((curr_dio_wr - prev_dio_wr)) log_note "Making sure there are no checksum errors with the ZPool" log_must check_pool_status $TESTPOOL "errors" "No known data errors" log_note "Making sure we have Direct I/O writes logged" if [[ $total_dio_wr -lt 1 ]]; then log_fail "No Direct I/O writes $total_dio_wr" fi log_note "Making sure we have Direct I/O write checksum verifies with ZPool" check_dio_chksum_verify_failures "$TESTPOOL" "raidz" 1 "wr" done log_must rm -f "$mntpnt/direct-write.iso" log_pass "Verified checksum verify works for Direct I/O writes." diff --git a/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh b/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh index 7b7d1d379ac6..daeb93273a54 100755 --- a/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh +++ b/tests/zfs-tests/tests/functional/fadvise/fadvise_sequential.ksh @@ -1,80 +1,76 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Portions Copyright (c) 2022 Information2 Software, Inc. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/include/math.shlib # # DESCRIPTION: # Test posix_fadvise. # # STRATEGY: # 1. Set primarycache to metadata in order to disable prefetch # 2. Write some data to file # 3. get data_size field from arcstat # 4. call file_fadvise with POSIX_FADV_SEQUENTIAL # 5. get data_size field from arcstat again # 6. latter data_size should be bigger than former one # # NOTE: if HAVE_FILE_FADVISE is not defined former data_size # should less or eaqul to latter one verify_runnable "global" FILE=$TESTDIR/$TESTFILE0 BLKSZ=$(get_prop recordsize $TESTPOOL) function cleanup { log_must zfs set primarycache=all $TESTPOOL [[ -e $TESTDIR ]] && log_must rm -Rf $TESTDIR/* } -getstat() { - awk -v c="$1" '$1 == c {print $3; exit}' /proc/spl/kstat/zfs/arcstats -} - log_assert "Ensure fadvise prefetch data" log_onexit cleanup log_must zfs set primarycache=metadata $TESTPOOL log_must file_write -o create -f $FILE -b $BLKSZ -c 1000 sync_pool $TESTPOOL -data_size1=$(getstat data_size) +data_size1=$(kstat arcstats.data_size) log_must file_fadvise -f $FILE -a 2 sleep 10 -data_size2=$(getstat data_size) +data_size2=$(kstat arcstats.data_size) log_note "original data_size is $data_size1, final data_size is $data_size2" log_must [ $data_size1 -le $data_size2 ] log_pass "Ensure data could be prefetched" diff --git a/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh b/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh index d9261bb5d274..3f6edad6da9b 100755 --- a/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh +++ b/tests/zfs-tests/tests/functional/fault/suspend_on_probe_errors.ksh @@ -1,154 +1,154 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2024, Klara Inc. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/include/blkdev.shlib # # DESCRIPTION: Verify that 4 disks removed from a raidz3 will suspend the pool # # STRATEGY: # 1. Disable ZED -- this test is focused on vdev_probe errors # 2. Create a raidz3 pool where 4 disks can be removed (i.e., using scsi_debug) # 3. Add some data to it for a resilver workload # 4. Replace one of the child vdevs to start a replacing vdev # 5. During the resilver, remove 4 disks including one from the replacing vdev # 6. Verify that the pool is suspended (it used to remain online) # DEV_SIZE_MB=1024 FILE_VDEV_CNT=8 FILE_VDEV_SIZ=256M function cleanup { destroy_pool $TESTPOOL if [[ "$(cat /sys/block/$sd/device/state)" == "offline" ]]; then log_must eval "echo running > /sys/block/$sd/device/state" fi unload_scsi_debug rm -f $DATA_FILE for i in {0..$((FILE_VDEV_CNT - 1))}; do log_must rm -f "$TEST_BASE_DIR/dev-$i" done log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0 zed_start } log_onexit cleanup log_assert "VDEV probe errors for more disks than parity should suspend a pool" log_note "Stoping ZED process" zed_stop zpool events -c # Make a debug device that we can "unplug" and lose 4 drives at once unload_scsi_debug load_scsi_debug $DEV_SIZE_MB 1 1 1 '512b' sd=$(get_debug_device) # Create 4 partitions that match the FILE_VDEV_SIZ parted "/dev/${sd}" --script mklabel gpt parted "/dev/${sd}" --script mkpart primary 0% 25% parted "/dev/${sd}" --script mkpart primary 25% 50% parted "/dev/${sd}" --script mkpart primary 50% 75% parted "/dev/${sd}" --script mkpart primary 75% 100% block_device_wait "/dev/${sd}" blkdevs="/dev/${sd}1 /dev/${sd}2 /dev/${sd}3 /dev/${sd}4" # Create 8 file vdevs typeset -a filedevs for i in {0..$((FILE_VDEV_CNT - 1))}; do device=$TEST_BASE_DIR/dev-$i log_must truncate -s $FILE_VDEV_SIZ $device # Use all but the last one for pool create if [[ $i -lt "7" ]]; then filedevs[${#filedevs[*]}+1]=$device fi done # Create a raidz-3 pool that we can pull 4 disks from log_must zpool create -f $TESTPOOL raidz3 ${filedevs[@]} $blkdevs sync_pool $TESTPOOL # Add some data to the pool log_must zfs create $TESTPOOL/fs MNTPOINT="$(get_prop mountpoint $TESTPOOL/fs)" SECONDS=0 log_must fill_fs $MNTPOINT 1 200 4096 10 Z log_note "fill_fs took $SECONDS seconds" sync_pool $TESTPOOL # Start a replacing vdev, but suspend the resilver log_must set_tunable32 SCAN_SUSPEND_PROGRESS 1 log_must zpool replace -f $TESTPOOL /dev/${sd}4 $TEST_BASE_DIR/dev-7 # Remove 4 disks all at once log_must eval "echo offline > /sys/block/${sd}/device/state" log_must set_tunable32 SCAN_SUSPEND_PROGRESS 0 # Add some writes to drive the vdev probe errors log_must dd if=/dev/urandom of=$MNTPOINT/writes bs=1M count=1 # Wait until sync starts, and the pool suspends log_note "waiting for pool to suspend" typeset -i tries=30 -until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; do +until [[ $(kstat_pool $TESTPOOL state) == "SUSPENDED" ]] ; do if ((tries-- == 0)); then zpool status -s log_fail "UNEXPECTED -- pool did not suspend" fi sleep 1 done -log_note $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) +log_note $(kstat_pool $TESTPOOL state) # Put the missing disks back into service log_must eval "echo running > /sys/block/$sd/device/state" # Clear the vdev error states, which will reopen the vdevs and resume the pool log_must zpool clear $TESTPOOL # Wait until the pool resumes log_note "waiting for pool to resume" tries=30 -until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) != "SUSPENDED" ]] ; do +until [[ $(kstat_pool $TESTPOOL state) != "SUSPENDED" ]] ; do if ((tries-- == 0)); then log_fail "pool did not resume" fi sleep 1 done log_must zpool wait -t resilver $TESTPOOL sync_pool $TESTPOOL # Make sure a pool scrub comes back clean log_must zpool scrub -w $TESTPOOL log_must zpool status -v $TESTPOOL log_must check_pool_status $TESTPOOL "errors" "No known data errors" log_pass "VDEV probe errors for more disks than parity should suspend a pool" diff --git a/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh b/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh index b67059158a57..0dc5584e4fd5 100755 --- a/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh +++ b/tests/zfs-tests/tests/functional/fault/suspend_resume_single.ksh @@ -1,102 +1,100 @@ #!/bin/ksh -p # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2024, Klara Inc. # . $STF_SUITE/include/libtest.shlib -set -x - DATAFILE="$TMPDIR/datafile" function cleanup { destroy_pool $TESTPOOL unload_scsi_debug rm -f $DATA_FILE } log_onexit cleanup log_assert "ensure single-disk pool resumes properly after suspend and clear" # create a file, and take a checksum, so we can compare later log_must dd if=/dev/urandom of=$DATAFILE bs=128K count=1 typeset sum1=$(xxh128digest $DATAFILE) # make a debug device that we can "unplug" load_scsi_debug 100 1 1 1 '512b' sd=$(get_debug_device) # create a single-device pool log_must zpool create $TESTPOOL $sd log_must zpool sync # "pull" the disk log_must eval "echo offline > /sys/block/$sd/device/state" # copy data onto the pool. it'll appear to succeed, but only be in memory log_must cp $DATAFILE /$TESTPOOL/file # wait until sync starts, and the pool suspends log_note "waiting for pool to suspend" typeset -i tries=10 -until [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; do +until [[ $(kstat_pool $TESTPOOL state) == "SUSPENDED" ]] ; do if ((tries-- == 0)); then log_fail "pool didn't suspend" fi sleep 1 done # return the disk log_must eval "echo running > /sys/block/$sd/device/state" # clear the error states, which should reopen the vdev, get the pool back # online, and replay the failed IO log_must zpool clear $TESTPOOL # wait a while for everything to sync out. if something is going to go wrong, # this is where it will happen log_note "giving pool time to settle and complete txg" sleep 7 # if the pool suspended, then everything is bad -if [[ $(cat /proc/spl/kstat/zfs/$TESTPOOL/state) == "SUSPENDED" ]] ; then +if [[ $(kstat_pool $TESTPOOL state) == "SUSPENDED" ]] ; then log_fail "pool suspended" fi # export the pool, to make sure it exports clean, and also to clear the file # out of the cache log_must zpool export $TESTPOOL # import the pool log_must zpool import $TESTPOOL # sum the file we wrote earlier typeset sum2=$(xxh128digest /$TESTPOOL/file) # make sure the checksums match log_must test "$sum1" = "$sum2" log_pass "single-disk pool resumes properly after disk suspend and clear" diff --git a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh index f8dc2b108f0d..14063658e3c5 100755 --- a/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh +++ b/tests/zfs-tests/tests/functional/l2arc/persist_l2arc_003_neg.ksh @@ -1,89 +1,88 @@ #!/bin/ksh -p # # CDDL HEADER START # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # CDDL HEADER END # # # Copyright (c) 2020, George Amanakis. All rights reserved. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/l2arc/l2arc.cfg # # DESCRIPTION: # Persistent L2ARC fails as expected when L2ARC_REBUILD_ENABLED = 0 # # STRATEGY: # 1. Set L2ARC_REBUILD_ENABLED = 0 # 2. Create pool with a cache device. # 3. Create a random file in that pool and random read for 10 sec. # 4. Export pool. # 5. Import pool. # 6. Check in zpool iostat if the cache device has space allocated. -# 7. Read the file written in (3) and check if l2_hits in -# /proc/spl/kstat/zfs/arcstats increased. +# 7. Read the file written in (3) and check if arcstats.l2_hits increased. # verify_runnable "global" command -v fio > /dev/null || log_unsupported "fio missing" log_assert "Persistent L2ARC fails as expected when L2ARC_REBUILD_ENABLED = 0." function cleanup { if poolexists $TESTPOOL ; then destroy_pool $TESTPOOL fi log_must set_tunable32 L2ARC_REBUILD_ENABLED $rebuild_enabled log_must set_tunable32 L2ARC_NOPREFETCH $noprefetch } log_onexit cleanup # L2ARC_NOPREFETCH is set to 0 to let L2ARC handle prefetches typeset noprefetch=$(get_tunable L2ARC_NOPREFETCH) log_must set_tunable32 L2ARC_NOPREFETCH 0 # disable L2ARC rebuild typeset rebuild_enabled=$(get_tunable L2ARC_REBUILD_ENABLED) log_must set_tunable32 L2ARC_REBUILD_ENABLED 0 typeset fill_mb=800 typeset cache_sz=$(( 2 * $fill_mb )) export FILE_SIZE=$(( floor($fill_mb / $NUMJOBS) ))M log_must truncate -s ${cache_sz}M $VDEV_CACHE log_must zpool create -f $TESTPOOL $VDEV cache $VDEV_CACHE log_must fio $FIO_SCRIPTS/mkfiles.fio log_must fio $FIO_SCRIPTS/random_reads.fio log_must zpool export $TESTPOOL typeset l2_success_start=$(get_arcstat l2_rebuild_success) log_must zpool import -d $VDIR $TESTPOOL log_mustnot test "$(zpool iostat -Hpv $TESTPOOL $VDEV_CACHE | awk '{print $2}')" -gt 80000000 typeset l2_success_end=$(get_arcstat l2_rebuild_success) log_mustnot test $l2_success_end -gt $l2_success_start log_must zpool destroy -f $TESTPOOL log_must set_tunable32 L2ARC_REBUILD_ENABLED $rebuild_enabled log_pass "Persistent L2ARC fails as expected when L2ARC_REBUILD_ENABLED = 0." diff --git a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib index 5071830c489a..01e4f2b735fa 100644 --- a/tests/zfs-tests/tests/functional/mmp/mmp.kshlib +++ b/tests/zfs-tests/tests/functional/mmp/mmp.kshlib @@ -1,283 +1,283 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or https://opensource.org/licenses/CDDL-1.0. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2017 by Lawrence Livermore National Security, LLC. # Use is subject to license terms. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/mmp/mmp.cfg function check_pool_import # pool opts token keyword { typeset pool=${1:-$MMP_POOL} typeset opts=$2 typeset token=$3 typeset keyword=$4 zpool import $opts 2>&1 | \ awk -v token="$token:" '($1==token) {print}' | \ grep -iq "$keyword" } function is_pool_imported # pool opts { typeset pool=${1:-$MMP_POOL} typeset opts=$2 check_pool_import "$pool" "$opts" "status" \ "The pool is currently imported" } function wait_pool_imported # pool opts { typeset pool=${1:-$MMP_POOL} typeset opts=$2 while is_pool_imported "$pool" "$opts"; do log_must sleep 5 done } function try_pool_import # pool opts message { typeset pool=${1:-$MMP_POOL} typeset opts=$2 typeset msg=$3 zpool import $opts $pool 2>&1 | grep -i "$msg" } function mmp_set_hostid { typeset hostid=$1 zgenhostid $1 [ $(hostid) = "$hostid" ] } function mmp_clear_hostid { rm -f $HOSTID_FILE } function mmp_pool_create_simple # pool dir { typeset pool=${1:-$MMP_POOL} typeset dir=${2:-$MMP_DIR} log_must mkdir -p $dir log_must rm -f $dir/* log_must truncate -s $MINVDEVSIZE $dir/vdev1 $dir/vdev2 log_must mmp_clear_hostid log_must mmp_set_hostid $HOSTID1 log_must zpool create -f -o cachefile=$MMP_CACHE $pool \ mirror $dir/vdev1 $dir/vdev2 log_must zpool set multihost=on $pool } function mmp_pool_create # pool dir { typeset pool=${1:-$MMP_POOL} typeset dir=${2:-$MMP_DIR} typeset opts="-VVVVV -T120 -M -k0 -f $dir -E -p $pool" mmp_pool_create_simple $pool $dir log_must mv $MMP_CACHE ${MMP_CACHE}.stale log_must zpool export $pool log_must mmp_clear_hostid log_must mmp_set_hostid $HOSTID2 log_note "Starting ztest in the background as hostid $HOSTID1" log_must eval "ZFS_HOSTID=$HOSTID1 ztest $opts >$MMP_ZTEST_LOG 2>&1 &" while ! is_pool_imported "$pool" "-d $dir"; do log_must pgrep ztest log_must sleep 5 done } function mmp_pool_destroy # pool dir { typeset pool=${1:-$MMP_POOL} typeset dir=${2:-$MMP_DIR} ZTESTPID=$(pgrep ztest) if [ -n "$ZTESTPID" ]; then log_must kill $ZTESTPID wait $ZTESTPID fi if poolexists $pool; then destroy_pool $pool fi log_must rm -f $dir/* mmp_clear_hostid } function mmp_pool_set_hostid # pool hostid { typeset pool=$1 typeset hostid=$2 log_must mmp_clear_hostid log_must mmp_set_hostid $hostid log_must zpool export $pool log_must zpool import $pool return 0 } function import_no_activity_check # pool opts { typeset pool=$1 typeset opts=$2 typeset max_duration=$((MMP_TEST_DURATION_DEFAULT-1)) SECONDS=0 zpool import $opts $pool typeset rc=$? if [[ $SECONDS -gt $max_duration ]]; then log_fail "ERROR: import_no_activity_check unexpected activity \ check (${SECONDS}s gt $max_duration)" fi return $rc } function import_activity_check # pool opts act_test_duration { typeset pool=$1 typeset opts=$2 typeset min_duration=${3:-$MMP_TEST_DURATION_DEFAULT} SECONDS=0 zpool import $opts $pool typeset rc=$? if [[ $SECONDS -le $min_duration ]]; then log_fail "ERROR: import_activity_check expected activity check \ (${SECONDS}s le min_duration $min_duration)" fi return $rc } function clear_mmp_history { log_must set_tunable64 MULTIHOST_HISTORY $MMP_HISTORY_OFF log_must set_tunable64 MULTIHOST_HISTORY $MMP_HISTORY } function count_skipped_mmp_writes # pool duration { typeset pool=$1 typeset -i duration=$2 - typeset hist_path="/proc/spl/kstat/zfs/$pool/multihost" sleep $duration - awk 'BEGIN {count=0}; $NF == "-" {count++}; END {print count};' "$hist_path" + kstat_pool $pool multihost | \ + awk 'BEGIN {count=0}; $NF == "-" {count++}; END {print count};' } function count_mmp_writes # pool duration { typeset pool=$1 typeset -i duration=$2 - typeset hist_path="/proc/spl/kstat/zfs/$pool/multihost" sleep $duration - awk 'BEGIN {count=0}; $NF != "-" {count++}; END {print count};' "$hist_path" + kstat_pool $pool multihost | \ + awk 'BEGIN {count=0}; $NF != "-" {count++}; END {print count};' } function summarize_uberblock_mmp # device { typeset device=$1 zdb -luuuu $device | awk ' BEGIN {write_fail_present=0; write_fail_missing=0; uber_invalid=0;} /Uberblock\[[0-9][0-9]*\]/ {delay=-99; write=-99; fail=-99; total++; if (/invalid/) {uber_invalid++};}; /mmp_fail/ {fail=$3}; /mmp_seq/ {seq=$3}; /mmp_write/ {write=$3}; /mmp_delay/ {delay=$3; if (delay==0) {delay_zero++};}; /mmp_valid/ && delay>0 && write>0 && fail>0 {write_fail_present++}; /mmp_valid/ && delay>0 && (write<=0 || fail<=0) {write_fail_missing++}; /mmp_valid/ && delay>0 && write<=0 {write_missing++}; /mmp_valid/ && delay>0 && fail<=0 {fail_missing++}; /mmp_valid/ && delay>0 && seq>0 {seq_nonzero++}; END { print "total_uberblocks " total; print "delay_zero " delay_zero; print "write_fail_present " write_fail_present; print "write_fail_missing " write_fail_missing; print "write_missing " write_missing; print "fail_missing " fail_missing; print "seq_nonzero " seq_nonzero; print "uberblock_invalid " uber_invalid; }' } function count_mmp_write_fail_present # device { typeset device=$1 summarize_uberblock_mmp $device | awk '/write_fail_present/ {print $NF}' } function count_mmp_write_fail_missing # device { typeset device=$1 summarize_uberblock_mmp $device | awk '/write_fail_missing/ {print $NF}' } function verify_mmp_write_fail_present # device { typeset device=$1 count=$(count_mmp_write_fail_present $device) log_note "present count: $count" if [ $count -eq 0 ]; then summarize_uberblock_mmp $device log_note "----- snip -----" zdb -luuuu $device log_note "----- snip -----" log_fail "No Uberblocks contain valid mmp_write and fail values" fi count=$(count_mmp_write_fail_missing $device) log_note "missing count: $count" if [ $count -gt 0 ]; then summarize_uberblock_mmp $device log_note "----- snip -----" zdb -luuuu $device log_note "----- snip -----" log_fail "Uberblocks missing mmp_write or mmp_fail" fi } diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh index 1ac254aa1dab..6f34974770d1 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_write_distribution.ksh @@ -1,92 +1,91 @@ #!/bin/ksh -p # # CDDL HEADER START # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # CDDL HEADER END # # # Copyright (c) 2017 by Lawrence Livermore National Security, LLC. # # DESCRIPTION: # Verify MMP writes are distributed evenly among leaves # # STRATEGY: # 1. Create an asymmetric mirrored pool # 2. Enable multihost and multihost_history # 3. Delay for MMP writes to occur # 4. Verify the MMP writes are distributed evenly across leaf vdevs # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/mmp/mmp.cfg . $STF_SUITE/tests/functional/mmp/mmp.kshlib verify_runnable "both" function cleanup { log_must zpool destroy $MMP_POOL log_must rm $MMP_DIR/file.{0..7} log_must rm $MMP_HISTORY_TMP log_must rmdir $MMP_DIR log_must mmp_clear_hostid } log_assert "mmp writes are evenly distributed across leaf vdevs" log_onexit cleanup MMP_HISTORY_TMP=$MMP_DIR/history -MMP_HISTORY=/proc/spl/kstat/zfs/$MMP_POOL/multihost # Step 1 log_must mkdir -p $MMP_DIR log_must truncate -s 128M $MMP_DIR/file.{0..7} log_must zpool create -f $MMP_POOL mirror $MMP_DIR/file.{0..1} mirror $MMP_DIR/file.{2..7} # Step 2 log_must mmp_set_hostid $HOSTID1 log_must zpool set multihost=on $MMP_POOL set_tunable64 MULTIHOST_HISTORY 0 set_tunable64 MULTIHOST_HISTORY 40 # Step 3 # default settings, every leaf written once/second sleep 4 # Step 4 typeset -i min_writes=999 typeset -i max_writes=0 typeset -i write_count # copy to get as close to a consistent view as possible -cp $MMP_HISTORY $MMP_HISTORY_TMP +kstat_pool $MMP_POOL multihost > $MMP_HISTORY_TMP for x in {0..7}; do write_count=$(grep -c file.${x} $MMP_HISTORY_TMP) if [ $write_count -lt $min_writes ]; then min_writes=$write_count fi if [ $write_count -gt $max_writes ]; then max_writes=$write_count fi done log_note "mmp min_writes $min_writes max_writes $max_writes" if [ $min_writes -lt 1 ]; then log_fail "mmp writes were not counted correctly" fi if [ $((max_writes - min_writes)) -gt 1 ]; then log_fail "mmp writes were not evenly distributed across leaf vdevs" fi log_pass "mmp writes were evenly distributed across leaf vdevs" diff --git a/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh b/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh index 8b118684aa7f..e45aedd450d2 100755 --- a/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh +++ b/tests/zfs-tests/tests/functional/mmp/mmp_write_slow_disk.ksh @@ -1,97 +1,98 @@ #!/bin/ksh -p # # CDDL HEADER START # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # CDDL HEADER END # # # Copyright (c) 2024, Klara Inc # # DESCRIPTION: # Verify that long VDEV probes do not cause MMP checks to suspend pool # Note: without PR-15839 fix, this test will suspend the pool. # # A device that is returning unexpected errors will trigger a vdev_probe. # When the device additionally has slow response times, the probe can hold # the spa config lock as a writer for a long period of time such that the # mmp uberblock updates stall when trying to acquire the spa config lock. # # STRATEGY: # 1. Create a pool with multiple leaf vdevs # 2. Enable multihost and multihost_history # 3. Delay for MMP writes to occur # 4. Verify that a long VDEV probe didn't cause MMP check to suspend pool # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/mmp/mmp.cfg . $STF_SUITE/tests/functional/mmp/mmp.kshlib verify_runnable "both" function cleanup { log_must zinject -c all if [[ $(zpool list -H -o health $MMP_POOL) == "SUSPENDED" ]]; then log_must zpool clear $MMP_POOL zpool get state $MMP_POOL $MMP_DIR/file.3 zpool events | grep ".fs.zfs." | grep -v "history_event" fi poolexists $MMP_POOL && destroy_pool $MMP_POOL log_must rm -r $MMP_DIR log_must mmp_clear_hostid } log_assert "A long VDEV probe doesn't cause a MMP check suspend" log_onexit cleanup -MMP_HISTORY_URL=/proc/spl/kstat/zfs/$MMP_POOL/multihost +MMP_HISTORY_TMP=$MMP_DIR/history # Create a multiple drive pool log_must zpool events -c log_must mkdir -p $MMP_DIR log_must truncate -s 128M $MMP_DIR/file.{0,1,2,3,4,5} log_must zpool create -f $MMP_POOL \ mirror $MMP_DIR/file.{0,1,2} \ mirror $MMP_DIR/file.{3,4,5} # Enable MMP log_must mmp_set_hostid $HOSTID1 log_must zpool set multihost=on $MMP_POOL clear_mmp_history # Inject vdev write error along with a delay log_must zinject -f 33 -e io -L pad2 -T write -d $MMP_DIR/file.3 $MMP_POOL log_must zinject -f 50 -e io -L uber -T write -d $MMP_DIR/file.3 $MMP_POOL log_must zinject -D 2000:4 -T write -d $MMP_DIR/file.3 $MMP_POOL log_must dd if=/dev/urandom of=/$MMP_POOL/data bs=1M count=5 sleep 10 sync_pool $MMP_POOL # Confirm mmp writes to the non-slow disks have taken place +kstat_pool $MMP_POOL multihost > $MMP_HISTORY_TMP for x in {0,1,2,4}; do - write_count=$(grep -c file.${x} $MMP_HISTORY_URL) + write_count=$(grep -c file.${x} $MMP_HISTORY_TMP) [[ $write_count -gt 0 ]] || log_fail "expecting mmp writes" done # Expect that the pool was not suspended log_must check_state $MMP_POOL "" "ONLINE" health=$(zpool list -H -o health $MMP_POOL) log_note "$MMP_POOL health is $health" [[ "$health" == "SUSPENDED" ]] && log_fail "$MMP_POOL $health unexpected" log_pass "A long VDEV probe doesn't cause a MMP check suspend" diff --git a/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh b/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh index 40045a7a96b5..9e93c1784dbf 100755 --- a/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh +++ b/tests/zfs-tests/tests/functional/mount/umount_unlinked_drain.ksh @@ -1,118 +1,120 @@ #!/bin/ksh -p # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright 2018 Datto Inc. # . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # Test async unlinked drain to ensure mounting is not held up when there are # entries in the unlinked set. We also try to test that the list is able to be # filled up and drained at the same time. # # STRATEGY: # 1. Use zfs_unlink_suspend_progress tunable to disable freeing to build up # the unlinked set # 2. Make sure mount happens even when there are entries in the unlinked set # 3. Drain and build up the unlinked list at the same time to test for races # function cleanup { log_must set_tunable32 UNLINK_SUSPEND_PROGRESS $default_unlink_sp for fs in $(seq 1 3); do mounted $TESTDIR.$fs || zfs mount $TESTPOOL/$TESTFS.$fs rm -f $TESTDIR.$fs/file-* zfs set xattr=on $TESTPOOL/$TESTFS.$fs done } function unlinked_size_is { + typeset -i expect=$1 + typeset dataset=$2 + MAX_ITERS=5 # iteration to do before we consider reported number stable iters=0 last_usize=0 while [[ $iters -le $MAX_ITERS ]]; do - kstat_file=$(grep -nrwl /proc/spl/kstat/zfs/$2/objset-0x* -e $3) - nunlinks=$(awk '/nunlinks/ {print $3}' $kstat_file) - nunlinked=$(awk '/nunlinked/ {print $3}' $kstat_file) + nunlinks=$(kstat_dataset $dataset nunlinks) + nunlinked=$(kstat_dataset $dataset nunlinked) usize=$(($nunlinks - $nunlinked)) if [[ $iters == $MAX_ITERS && $usize == $1 ]]; then return 0 fi if [[ $usize == $last_usize ]]; then (( iters++ )) else iters=0 fi last_usize=$usize done log_note "Unexpected unlinked set size: $last_usize, expected $1" return 1 } default_unlink_sp=$(get_tunable UNLINK_SUSPEND_PROGRESS) log_onexit cleanup log_assert "Unlinked list drain does not hold up mounting of fs" for fs in 1 2 3; do set -A xattrs on sa off for xa in ${xattrs[@]}; do # setup fs and ensure all deleted files got into unliked set log_must mounted $TESTDIR.$fs log_must zfs set xattr=$xa $TESTPOOL/$TESTFS.$fs if [[ $xa == off ]]; then for fn in $(seq 1 175); do log_must mkfile 128k $TESTDIR.$fs/file-$fn done else log_must xattrtest -f 175 -x 3 -r -k -p $TESTDIR.$fs fi log_must set_tunable32 UNLINK_SUSPEND_PROGRESS 1 - log_must unlinked_size_is 0 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 0 $TESTPOOL/$TESTFS.$fs # build up unlinked set for fn in $(seq 1 100); do log_must eval "rm $TESTDIR.$fs/file-$fn &" done - log_must unlinked_size_is 100 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 100 $TESTPOOL/$TESTFS.$fs # test that we can mount fs without emptying the unlinked list log_must zfs umount $TESTPOOL/$TESTFS.$fs log_must unmounted $TESTDIR.$fs log_must zfs mount $TESTPOOL/$TESTFS.$fs log_must mounted $TESTDIR.$fs - log_must unlinked_size_is 100 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 100 $TESTPOOL/$TESTFS.$fs # confirm we can drain and add to unlinked set at the same time log_must set_tunable32 UNLINK_SUSPEND_PROGRESS 0 log_must zfs umount $TESTPOOL/$TESTFS.$fs log_must zfs mount $TESTPOOL/$TESTFS.$fs for fn in $(seq 101 175); do log_must eval "rm $TESTDIR.$fs/file-$fn &" done - log_must unlinked_size_is 0 $TESTPOOL $TESTPOOL/$TESTFS.$fs + log_must unlinked_size_is 0 $TESTPOOL/$TESTFS.$fs done done log_pass "Confirmed unlinked list drain does not hold up mounting of fs"