diff --git a/.github/workflows/checkstyle.yaml b/.github/workflows/checkstyle.yaml index 553d5df39796..a102b6e8aa1f 100644 --- a/.github/workflows/checkstyle.yaml +++ b/.github/workflows/checkstyle.yaml @@ -1,50 +1,50 @@ name: checkstyle on: push: pull_request: jobs: checkstyle: runs-on: ubuntu-20.04 steps: - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} - name: Install dependencies run: | sudo apt-get update sudo apt-get install --yes -qq build-essential autoconf libtool gawk alien fakeroot linux-headers-$(uname -r) - sudo apt-get install --yes -qq zlib1g-dev uuid-dev libattr1-dev libblkid-dev libselinux-dev libudev-dev libssl-dev python-dev python-setuptools python-cffi python3 python3-dev python3-setuptools python3-cffi + sudo apt-get install --yes -qq zlib1g-dev uuid-dev libattr1-dev libblkid-dev libselinux-dev libudev-dev libssl-dev python3 python3-dev python3-setuptools python3-cffi # packages for tests sudo apt-get install --yes -qq parted lsscsi ksh attr acl nfs-kernel-server fio sudo apt-get install --yes -qq mandoc cppcheck pax-utils devscripts sudo -E pip --quiet install flake8 - name: Prepare run: | sh ./autogen.sh ./configure make -j$(nproc) - name: Checkstyle run: | make checkstyle - name: Lint run: | make lint - name: CheckABI id: CheckABI run: | sudo docker run -v $(pwd):/source ghcr.io/openzfs/libabigail make checkabi - name: StoreABI if: failure() && steps.CheckABI.outcome == 'failure' run: | sudo docker run -v $(pwd):/source ghcr.io/openzfs/libabigail make storeabi - name: Prepare artifacts if: failure() && steps.CheckABI.outcome == 'failure' run: | find -name *.abi | tar -cf abi_files.tar -T - - uses: actions/upload-artifact@v2 if: failure() && steps.CheckABI.outcome == 'failure' with: name: New ABI files (use only if you're sure about interface changes) path: abi_files.tar diff --git a/.github/workflows/zfs-tests-functional.yml b/.github/workflows/zfs-tests-functional.yml index 2987cdac6d64..19d3f57baec9 100644 --- a/.github/workflows/zfs-tests-functional.yml +++ b/.github/workflows/zfs-tests-functional.yml @@ -1,82 +1,81 @@ name: zfs-tests-functional on: push: pull_request: jobs: tests-functional-ubuntu: strategy: fail-fast: false matrix: os: [18.04, 20.04] runs-on: ubuntu-${{ matrix.os }} steps: - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} - name: Install dependencies run: | sudo apt-get update sudo apt-get install --yes -qq build-essential autoconf libtool gdb lcov \ git alien fakeroot wget curl bc fio acl \ sysstat mdadm lsscsi parted gdebi attr dbench watchdog ksh \ nfs-kernel-server samba rng-tools xz-utils \ zlib1g-dev uuid-dev libblkid-dev libselinux-dev \ xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \ libssl-dev libffi-dev libaio-dev libelf-dev libmount-dev \ - libpam0g-dev pamtester python-dev python-setuptools python-cffi \ - python-packaging python3 python3-dev python3-setuptools python3-cffi \ - libcurl4-openssl-dev python3-packaging + libpam0g-dev pamtester libcurl4-openssl-dev \ + python3 python3-dev python3-setuptools python3-cffi python3-packaging - name: Autogen.sh run: | sh autogen.sh - name: Configure run: | ./configure --enable-debug --enable-debuginfo - name: Make run: | make --no-print-directory -s pkg-utils pkg-kmod - name: Install run: | sudo dpkg -i *.deb # Update order of directories to search for modules, otherwise # Ubuntu will load kernel-shipped ones. sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf sudo depmod sudo modprobe zfs # Workaround for cloud-init bug # see https://github.com/openzfs/zfs/issues/12644 FILE=/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules if [ -r "${FILE}" ]; then HASH=$(md5sum "${FILE}" | awk '{ print $1 }') if [ "${HASH}" = "121ff0ef1936cd2ef65aec0458a35772" ]; then # Just shove a zd* exclusion right above the hotplug hook... sudo sed -i -e s/'LABEL="cloudinit_hook"'/'KERNEL=="zd*", GOTO="cloudinit_end"\n&'/ "${FILE}" sudo udevadm control --reload-rules fi fi # Workaround to provide additional free space for testing. # https://github.com/actions/virtual-environments/issues/2840 sudo rm -rf /usr/share/dotnet sudo rm -rf /opt/ghc sudo rm -rf "/usr/local/share/boost" sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Tests run: | /usr/share/zfs/zfs-tests.sh -vR -s 3G - name: Prepare artifacts if: failure() run: | RESULTS_PATH=$(readlink -f /var/tmp/test_results/current) sudo dmesg > $RESULTS_PATH/dmesg sudo cp /var/log/syslog $RESULTS_PATH/ sudo chmod +r $RESULTS_PATH/* # Replace ':' in dir names, actions/upload-artifact doesn't support it for f in $(find /var/tmp/test_results -name '*:*'); do mv "$f" "${f//:/__}"; done - uses: actions/upload-artifact@v2 if: failure() with: name: Test logs Ubuntu-${{ matrix.os }} path: /var/tmp/test_results/20*/ if-no-files-found: ignore diff --git a/.github/workflows/zfs-tests-sanity.yml b/.github/workflows/zfs-tests-sanity.yml index 8cba6d78dde2..2b97fd61822f 100644 --- a/.github/workflows/zfs-tests-sanity.yml +++ b/.github/workflows/zfs-tests-sanity.yml @@ -1,78 +1,77 @@ name: zfs-tests-sanity on: push: pull_request: jobs: tests: runs-on: ubuntu-latest steps: - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} - name: Install dependencies run: | sudo apt-get update sudo apt-get install --yes -qq build-essential autoconf libtool gdb lcov \ git alien fakeroot wget curl bc fio acl \ sysstat mdadm lsscsi parted gdebi attr dbench watchdog ksh \ nfs-kernel-server samba rng-tools xz-utils \ zlib1g-dev uuid-dev libblkid-dev libselinux-dev \ xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \ libssl-dev libffi-dev libaio-dev libelf-dev libmount-dev \ - libpam0g-dev pamtester python-dev python-setuptools python-cffi \ - python-packaging python3 python3-dev python3-setuptools python3-cffi \ - python3-packaging libcurl4-openssl-dev + libpam0g-dev pamtester libcurl4-openssl-dev \ + python3 python3-dev python3-setuptools python3-cffi python3-packaging - name: Autogen.sh run: | sh autogen.sh - name: Configure run: | ./configure --enable-debug --enable-debuginfo - name: Make run: | make --no-print-directory -s pkg-utils pkg-kmod - name: Install run: | sudo dpkg -i *.deb # Update order of directories to search for modules, otherwise # Ubuntu will load kernel-shipped ones. sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf sudo depmod sudo modprobe zfs # Workaround for cloud-init bug # see https://github.com/openzfs/zfs/issues/12644 FILE=/lib/udev/rules.d/10-cloud-init-hook-hotplug.rules if [ -r "${FILE}" ]; then HASH=$(md5sum "${FILE}" | awk '{ print $1 }') if [ "${HASH}" = "121ff0ef1936cd2ef65aec0458a35772" ]; then # Just shove a zd* exclusion right above the hotplug hook... sudo sed -i -e s/'LABEL="cloudinit_hook"'/'KERNEL=="zd*", GOTO="cloudinit_end"\n&'/ "${FILE}" sudo udevadm control --reload-rules fi fi # Workaround to provide additional free space for testing. # https://github.com/actions/virtual-environments/issues/2840 sudo rm -rf /usr/share/dotnet sudo rm -rf /opt/ghc sudo rm -rf "/usr/local/share/boost" sudo rm -rf "$AGENT_TOOLSDIRECTORY" - name: Tests run: | /usr/share/zfs/zfs-tests.sh -vR -s 3G -r sanity - name: Prepare artifacts if: failure() run: | RESULTS_PATH=$(readlink -f /var/tmp/test_results/current) sudo dmesg > $RESULTS_PATH/dmesg sudo cp /var/log/syslog $RESULTS_PATH/ sudo chmod +r $RESULTS_PATH/* # Replace ':' in dir names, actions/upload-artifact doesn't support it for f in $(find /var/tmp/test_results -name '*:*'); do mv "$f" "${f//:/__}"; done - uses: actions/upload-artifact@v2 if: failure() with: name: Test logs path: /var/tmp/test_results/20*/ if-no-files-found: ignore diff --git a/.github/workflows/zloop.yml b/.github/workflows/zloop.yml index cf81ad4bcafc..5c1b9bd1ce22 100644 --- a/.github/workflows/zloop.yml +++ b/.github/workflows/zloop.yml @@ -1,67 +1,66 @@ name: zloop on: push: pull_request: jobs: tests: runs-on: ubuntu-latest env: TEST_DIR: /var/tmp/zloop steps: - uses: actions/checkout@v2 with: ref: ${{ github.event.pull_request.head.sha }} - name: Install dependencies run: | sudo apt-get update sudo apt-get install --yes -qq build-essential autoconf libtool gdb \ git alien fakeroot \ zlib1g-dev uuid-dev libblkid-dev libselinux-dev \ xfslibs-dev libattr1-dev libacl1-dev libudev-dev libdevmapper-dev \ libssl-dev libffi-dev libaio-dev libelf-dev libmount-dev \ libpam0g-dev \ - python-dev python-setuptools python-cffi python-packaging \ python3 python3-dev python3-setuptools python3-cffi python3-packaging - name: Autogen.sh run: | sh autogen.sh - name: Configure run: | ./configure --enable-debug --enable-debuginfo - name: Make run: | make --no-print-directory -s pkg-utils pkg-kmod - name: Install run: | sudo dpkg -i *.deb # Update order of directories to search for modules, otherwise # Ubuntu will load kernel-shipped ones. sudo sed -i.bak 's/updates/extra updates/' /etc/depmod.d/ubuntu.conf sudo depmod sudo modprobe zfs - name: Tests run: | sudo mkdir -p $TEST_DIR # run for 20 minutes to have a total runner time of 30 minutes sudo /usr/share/zfs/zloop.sh -t 1200 -l -m1 -- -T 120 -P 60 - name: Prepare artifacts if: failure() run: | sudo chmod +r -R $TEST_DIR/ - uses: actions/upload-artifact@v2 if: failure() with: name: Logs path: | /var/tmp/zloop/*/ !/var/tmp/zloop/*/vdev/ if-no-files-found: ignore - uses: actions/upload-artifact@v2 if: failure() with: name: Pool files path: | /var/tmp/zloop/*/vdev/ if-no-files-found: ignore diff --git a/cmd/arc_summary/Makefile.am b/cmd/arc_summary/Makefile.am index 1a26c2c199f8..f419f07e0eda 100644 --- a/cmd/arc_summary/Makefile.am +++ b/cmd/arc_summary/Makefile.am @@ -1,13 +1,8 @@ bin_SCRIPTS = arc_summary CLEANFILES = arc_summary -EXTRA_DIST = arc_summary2 arc_summary3 - -if USING_PYTHON_2 -SCRIPT = arc_summary2 -else +EXTRA_DIST = arc_summary3 SCRIPT = arc_summary3 -endif arc_summary: $(SCRIPT) cp $< $@ diff --git a/cmd/arc_summary/arc_summary2 b/cmd/arc_summary/arc_summary2 deleted file mode 100755 index 3302a802d146..000000000000 --- a/cmd/arc_summary/arc_summary2 +++ /dev/null @@ -1,1180 +0,0 @@ -#!/usr/bin/env python2 -# -# $Id: arc_summary.pl,v 388:e27800740aa2 2011-07-08 02:53:29Z jhell $ -# -# Copyright (c) 2008 Ben Rockwood , -# Copyright (c) 2010 Martin Matuska , -# Copyright (c) 2010-2011 Jason J. Hellenthal , -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in the -# documentation and/or other materials provided with the distribution. -# -# THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND -# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -# ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE -# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -# SUCH DAMAGE. -# -# If you are having troubles when using this script from cron(8) please try -# adjusting your PATH before reporting problems. -# -# Note some of this code uses older code (eg getopt instead of argparse, -# subprocess.Popen() instead of subprocess.run()) because we need to support -# some very old versions of Python. -# - -"""Print statistics on the ZFS Adjustable Replacement Cache (ARC) - -Provides basic information on the ARC, its efficiency, the L2ARC (if present), -the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See the -in-source documentation and code at -https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details. -""" - -import getopt -import os -import sys -import time -import errno - -from subprocess import Popen, PIPE -from decimal import Decimal as D - - -if sys.platform.startswith('freebsd'): - # Requires py27-sysctl on FreeBSD - import sysctl - - def is_value(ctl): - return ctl.type != sysctl.CTLTYPE_NODE - - def load_kstats(namespace): - """Collect information on a specific subsystem of the ARC""" - - base = 'kstat.zfs.misc.%s.' % namespace - fmt = lambda kstat: (kstat.name, D(kstat.value)) - kstats = sysctl.filter(base) - return [fmt(kstat) for kstat in kstats if is_value(kstat)] - - def load_tunables(): - ctls = sysctl.filter('vfs.zfs') - return dict((ctl.name, ctl.value) for ctl in ctls if is_value(ctl)) - -elif sys.platform.startswith('linux'): - - def load_kstats(namespace): - """Collect information on a specific subsystem of the ARC""" - - kstat = 'kstat.zfs.misc.%s.%%s' % namespace - path = '/proc/spl/kstat/zfs/%s' % namespace - with open(path) as f: - entries = [line.strip().split() for line in f][2:] # Skip header - return [(kstat % name, D(value)) for name, _, value in entries] - - def load_tunables(): - basepath = '/sys/module/zfs/parameters' - tunables = {} - for name in os.listdir(basepath): - if not name: - continue - path = '%s/%s' % (basepath, name) - with open(path) as f: - value = f.read() - tunables[name] = value.strip() - return tunables - - -show_tunable_descriptions = False -alternate_tunable_layout = False - - -def get_Kstat(): - """Collect information on the ZFS subsystem from the /proc virtual - file system. The name "kstat" is a holdover from the Solaris utility - of the same name. - """ - - Kstat = {} - Kstat.update(load_kstats('arcstats')) - Kstat.update(load_kstats('zfetchstats')) - Kstat.update(load_kstats('vdev_cache_stats')) - return Kstat - - -def fBytes(b=0): - """Return human-readable representation of a byte value in - powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal - points. Values smaller than one KiB are returned without - decimal points. - """ - - prefixes = [ - [2**80, "YiB"], # yobibytes (yotta) - [2**70, "ZiB"], # zebibytes (zetta) - [2**60, "EiB"], # exbibytes (exa) - [2**50, "PiB"], # pebibytes (peta) - [2**40, "TiB"], # tebibytes (tera) - [2**30, "GiB"], # gibibytes (giga) - [2**20, "MiB"], # mebibytes (mega) - [2**10, "KiB"]] # kibibytes (kilo) - - if b >= 2**10: - - for limit, unit in prefixes: - - if b >= limit: - value = b / limit - break - - result = "%0.2f\t%s" % (value, unit) - - else: - - result = "%d\tBytes" % b - - return result - - -def fHits(hits=0): - """Create a human-readable representation of the number of hits. - The single-letter symbols used are SI to avoid the confusion caused - by the different "short scale" and "long scale" representations in - English, which use the same words for different values. See - https://en.wikipedia.org/wiki/Names_of_large_numbers and - https://physics.nist.gov/cuu/Units/prefixes.html - """ - - numbers = [ - [10**24, 'Y'], # yotta (septillion) - [10**21, 'Z'], # zetta (sextillion) - [10**18, 'E'], # exa (quintrillion) - [10**15, 'P'], # peta (quadrillion) - [10**12, 'T'], # tera (trillion) - [10**9, 'G'], # giga (billion) - [10**6, 'M'], # mega (million) - [10**3, 'k']] # kilo (thousand) - - if hits >= 1000: - - for limit, symbol in numbers: - - if hits >= limit: - value = hits/limit - break - - result = "%0.2f%s" % (value, symbol) - - else: - - result = "%d" % hits - - return result - - -def fPerc(lVal=0, rVal=0, Decimal=2): - """Calculate percentage value and return in human-readable format""" - - if rVal > 0: - return str("%0." + str(Decimal) + "f") % (100 * (lVal / rVal)) + "%" - else: - return str("%0." + str(Decimal) + "f") % 100 + "%" - - -def get_arc_summary(Kstat): - """Collect general data on the ARC""" - - output = {} - memory_throttle_count = Kstat[ - "kstat.zfs.misc.arcstats.memory_throttle_count" - ] - - if memory_throttle_count > 0: - output['health'] = 'THROTTLED' - else: - output['health'] = 'HEALTHY' - - output['memory_throttle_count'] = fHits(memory_throttle_count) - - # ARC Misc. - deleted = Kstat["kstat.zfs.misc.arcstats.deleted"] - mutex_miss = Kstat["kstat.zfs.misc.arcstats.mutex_miss"] - evict_skip = Kstat["kstat.zfs.misc.arcstats.evict_skip"] - evict_l2_cached = Kstat["kstat.zfs.misc.arcstats.evict_l2_cached"] - evict_l2_eligible = Kstat["kstat.zfs.misc.arcstats.evict_l2_eligible"] - evict_l2_eligible_mfu = Kstat["kstat.zfs.misc.arcstats.evict_l2_eligible_mfu"] - evict_l2_eligible_mru = Kstat["kstat.zfs.misc.arcstats.evict_l2_eligible_mru"] - evict_l2_ineligible = Kstat["kstat.zfs.misc.arcstats.evict_l2_ineligible"] - evict_l2_skip = Kstat["kstat.zfs.misc.arcstats.evict_l2_skip"] - - # ARC Misc. - output["arc_misc"] = {} - output["arc_misc"]["deleted"] = fHits(deleted) - output["arc_misc"]["mutex_miss"] = fHits(mutex_miss) - output["arc_misc"]["evict_skips"] = fHits(evict_skip) - output["arc_misc"]["evict_l2_skip"] = fHits(evict_l2_skip) - output["arc_misc"]["evict_l2_cached"] = fBytes(evict_l2_cached) - output["arc_misc"]["evict_l2_eligible"] = fBytes(evict_l2_eligible) - output["arc_misc"]["evict_l2_eligible_mfu"] = { - 'per': fPerc(evict_l2_eligible_mfu, evict_l2_eligible), - 'num': fBytes(evict_l2_eligible_mfu), - } - output["arc_misc"]["evict_l2_eligible_mru"] = { - 'per': fPerc(evict_l2_eligible_mru, evict_l2_eligible), - 'num': fBytes(evict_l2_eligible_mru), - } - output["arc_misc"]["evict_l2_ineligible"] = fBytes(evict_l2_ineligible) - - # ARC Sizing - arc_size = Kstat["kstat.zfs.misc.arcstats.size"] - mru_size = Kstat["kstat.zfs.misc.arcstats.mru_size"] - mfu_size = Kstat["kstat.zfs.misc.arcstats.mfu_size"] - meta_limit = Kstat["kstat.zfs.misc.arcstats.arc_meta_limit"] - meta_size = Kstat["kstat.zfs.misc.arcstats.arc_meta_used"] - dnode_limit = Kstat["kstat.zfs.misc.arcstats.arc_dnode_limit"] - dnode_size = Kstat["kstat.zfs.misc.arcstats.dnode_size"] - target_max_size = Kstat["kstat.zfs.misc.arcstats.c_max"] - target_min_size = Kstat["kstat.zfs.misc.arcstats.c_min"] - target_size = Kstat["kstat.zfs.misc.arcstats.c"] - - target_size_ratio = (target_max_size / target_min_size) - - # ARC Sizing - output['arc_sizing'] = {} - output['arc_sizing']['arc_size'] = { - 'per': fPerc(arc_size, target_max_size), - 'num': fBytes(arc_size), - } - output['arc_sizing']['target_max_size'] = { - 'ratio': target_size_ratio, - 'num': fBytes(target_max_size), - } - output['arc_sizing']['target_min_size'] = { - 'per': fPerc(target_min_size, target_max_size), - 'num': fBytes(target_min_size), - } - output['arc_sizing']['target_size'] = { - 'per': fPerc(target_size, target_max_size), - 'num': fBytes(target_size), - } - output['arc_sizing']['meta_limit'] = { - 'per': fPerc(meta_limit, target_max_size), - 'num': fBytes(meta_limit), - } - output['arc_sizing']['meta_size'] = { - 'per': fPerc(meta_size, meta_limit), - 'num': fBytes(meta_size), - } - output['arc_sizing']['dnode_limit'] = { - 'per': fPerc(dnode_limit, meta_limit), - 'num': fBytes(dnode_limit), - } - output['arc_sizing']['dnode_size'] = { - 'per': fPerc(dnode_size, dnode_limit), - 'num': fBytes(dnode_size), - } - - # ARC Hash Breakdown - output['arc_hash_break'] = {} - output['arc_hash_break']['hash_chain_max'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_chain_max" - ] - output['arc_hash_break']['hash_chains'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_chains" - ] - output['arc_hash_break']['hash_collisions'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_collisions" - ] - output['arc_hash_break']['hash_elements'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_elements" - ] - output['arc_hash_break']['hash_elements_max'] = Kstat[ - "kstat.zfs.misc.arcstats.hash_elements_max" - ] - - output['arc_size_break'] = {} - output['arc_size_break']['recently_used_cache_size'] = { - 'per': fPerc(mru_size, mru_size + mfu_size), - 'num': fBytes(mru_size), - } - output['arc_size_break']['frequently_used_cache_size'] = { - 'per': fPerc(mfu_size, mru_size + mfu_size), - 'num': fBytes(mfu_size), - } - - # ARC Hash Breakdown - hash_chain_max = Kstat["kstat.zfs.misc.arcstats.hash_chain_max"] - hash_chains = Kstat["kstat.zfs.misc.arcstats.hash_chains"] - hash_collisions = Kstat["kstat.zfs.misc.arcstats.hash_collisions"] - hash_elements = Kstat["kstat.zfs.misc.arcstats.hash_elements"] - hash_elements_max = Kstat["kstat.zfs.misc.arcstats.hash_elements_max"] - - output['arc_hash_break'] = {} - output['arc_hash_break']['elements_max'] = fHits(hash_elements_max) - output['arc_hash_break']['elements_current'] = { - 'per': fPerc(hash_elements, hash_elements_max), - 'num': fHits(hash_elements), - } - output['arc_hash_break']['collisions'] = fHits(hash_collisions) - output['arc_hash_break']['chain_max'] = fHits(hash_chain_max) - output['arc_hash_break']['chains'] = fHits(hash_chains) - - return output - - -def _arc_summary(Kstat): - """Print information on the ARC""" - - # ARC Sizing - arc = get_arc_summary(Kstat) - - sys.stdout.write("ARC Summary: (%s)\n" % arc['health']) - - sys.stdout.write("\tMemory Throttle Count:\t\t\t%s\n" % - arc['memory_throttle_count']) - sys.stdout.write("\n") - - # ARC Misc. - sys.stdout.write("ARC Misc:\n") - sys.stdout.write("\tDeleted:\t\t\t\t%s\n" % arc['arc_misc']['deleted']) - sys.stdout.write("\tMutex Misses:\t\t\t\t%s\n" % - arc['arc_misc']['mutex_miss']) - sys.stdout.write("\tEviction Skips:\t\t\t\t%s\n" % - arc['arc_misc']['evict_skips']) - sys.stdout.write("\tEviction Skips Due to L2 Writes:\t%s\n" % - arc['arc_misc']['evict_l2_skip']) - sys.stdout.write("\tL2 Cached Evictions:\t\t\t%s\n" % - arc['arc_misc']['evict_l2_cached']) - sys.stdout.write("\tL2 Eligible Evictions:\t\t\t%s\n" % - arc['arc_misc']['evict_l2_eligible']) - sys.stdout.write("\tL2 Eligible MFU Evictions:\t%s\t%s\n" % ( - arc['arc_misc']['evict_l2_eligible_mfu']['per'], - arc['arc_misc']['evict_l2_eligible_mfu']['num'], - ) - ) - sys.stdout.write("\tL2 Eligible MRU Evictions:\t%s\t%s\n" % ( - arc['arc_misc']['evict_l2_eligible_mru']['per'], - arc['arc_misc']['evict_l2_eligible_mru']['num'], - ) - ) - sys.stdout.write("\tL2 Ineligible Evictions:\t\t%s\n" % - arc['arc_misc']['evict_l2_ineligible']) - sys.stdout.write("\n") - - # ARC Sizing - sys.stdout.write("ARC Size:\t\t\t\t%s\t%s\n" % ( - arc['arc_sizing']['arc_size']['per'], - arc['arc_sizing']['arc_size']['num'] - ) - ) - sys.stdout.write("\tTarget Size: (Adaptive)\t\t%s\t%s\n" % ( - arc['arc_sizing']['target_size']['per'], - arc['arc_sizing']['target_size']['num'], - ) - ) - - sys.stdout.write("\tMin Size (Hard Limit):\t\t%s\t%s\n" % ( - arc['arc_sizing']['target_min_size']['per'], - arc['arc_sizing']['target_min_size']['num'], - ) - ) - - sys.stdout.write("\tMax Size (High Water):\t\t%d:1\t%s\n" % ( - arc['arc_sizing']['target_max_size']['ratio'], - arc['arc_sizing']['target_max_size']['num'], - ) - ) - - sys.stdout.write("\nARC Size Breakdown:\n") - sys.stdout.write("\tRecently Used Cache Size:\t%s\t%s\n" % ( - arc['arc_size_break']['recently_used_cache_size']['per'], - arc['arc_size_break']['recently_used_cache_size']['num'], - ) - ) - sys.stdout.write("\tFrequently Used Cache Size:\t%s\t%s\n" % ( - arc['arc_size_break']['frequently_used_cache_size']['per'], - arc['arc_size_break']['frequently_used_cache_size']['num'], - ) - ) - sys.stdout.write("\tMetadata Size (Hard Limit):\t%s\t%s\n" % ( - arc['arc_sizing']['meta_limit']['per'], - arc['arc_sizing']['meta_limit']['num'], - ) - ) - sys.stdout.write("\tMetadata Size:\t\t\t%s\t%s\n" % ( - arc['arc_sizing']['meta_size']['per'], - arc['arc_sizing']['meta_size']['num'], - ) - ) - sys.stdout.write("\tDnode Size (Hard Limit):\t%s\t%s\n" % ( - arc['arc_sizing']['dnode_limit']['per'], - arc['arc_sizing']['dnode_limit']['num'], - ) - ) - sys.stdout.write("\tDnode Size:\t\t\t%s\t%s\n" % ( - arc['arc_sizing']['dnode_size']['per'], - arc['arc_sizing']['dnode_size']['num'], - ) - ) - - sys.stdout.write("\n") - - # ARC Hash Breakdown - sys.stdout.write("ARC Hash Breakdown:\n") - sys.stdout.write("\tElements Max:\t\t\t\t%s\n" % - arc['arc_hash_break']['elements_max']) - sys.stdout.write("\tElements Current:\t\t%s\t%s\n" % ( - arc['arc_hash_break']['elements_current']['per'], - arc['arc_hash_break']['elements_current']['num'], - ) - ) - sys.stdout.write("\tCollisions:\t\t\t\t%s\n" % - arc['arc_hash_break']['collisions']) - sys.stdout.write("\tChain Max:\t\t\t\t%s\n" % - arc['arc_hash_break']['chain_max']) - sys.stdout.write("\tChains:\t\t\t\t\t%s\n" % - arc['arc_hash_break']['chains']) - - -def get_arc_efficiency(Kstat): - """Collect information on the efficiency of the ARC""" - - output = {} - - arc_hits = Kstat["kstat.zfs.misc.arcstats.hits"] - arc_misses = Kstat["kstat.zfs.misc.arcstats.misses"] - demand_data_hits = Kstat["kstat.zfs.misc.arcstats.demand_data_hits"] - demand_data_misses = Kstat["kstat.zfs.misc.arcstats.demand_data_misses"] - demand_metadata_hits = Kstat[ - "kstat.zfs.misc.arcstats.demand_metadata_hits" - ] - demand_metadata_misses = Kstat[ - "kstat.zfs.misc.arcstats.demand_metadata_misses" - ] - mfu_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mfu_ghost_hits"] - mfu_hits = Kstat["kstat.zfs.misc.arcstats.mfu_hits"] - mru_ghost_hits = Kstat["kstat.zfs.misc.arcstats.mru_ghost_hits"] - mru_hits = Kstat["kstat.zfs.misc.arcstats.mru_hits"] - prefetch_data_hits = Kstat["kstat.zfs.misc.arcstats.prefetch_data_hits"] - prefetch_data_misses = Kstat[ - "kstat.zfs.misc.arcstats.prefetch_data_misses" - ] - prefetch_metadata_hits = Kstat[ - "kstat.zfs.misc.arcstats.prefetch_metadata_hits" - ] - prefetch_metadata_misses = Kstat[ - "kstat.zfs.misc.arcstats.prefetch_metadata_misses" - ] - - anon_hits = arc_hits - ( - mfu_hits + mru_hits + mfu_ghost_hits + mru_ghost_hits - ) - arc_accesses_total = (arc_hits + arc_misses) - demand_data_total = (demand_data_hits + demand_data_misses) - prefetch_data_total = (prefetch_data_hits + prefetch_data_misses) - real_hits = (mfu_hits + mru_hits) - - output["total_accesses"] = fHits(arc_accesses_total) - output["cache_hit_ratio"] = { - 'per': fPerc(arc_hits, arc_accesses_total), - 'num': fHits(arc_hits), - } - output["cache_miss_ratio"] = { - 'per': fPerc(arc_misses, arc_accesses_total), - 'num': fHits(arc_misses), - } - output["actual_hit_ratio"] = { - 'per': fPerc(real_hits, arc_accesses_total), - 'num': fHits(real_hits), - } - output["data_demand_efficiency"] = { - 'per': fPerc(demand_data_hits, demand_data_total), - 'num': fHits(demand_data_total), - } - - if prefetch_data_total > 0: - output["data_prefetch_efficiency"] = { - 'per': fPerc(prefetch_data_hits, prefetch_data_total), - 'num': fHits(prefetch_data_total), - } - - if anon_hits > 0: - output["cache_hits_by_cache_list"] = {} - output["cache_hits_by_cache_list"]["anonymously_used"] = { - 'per': fPerc(anon_hits, arc_hits), - 'num': fHits(anon_hits), - } - - output["most_recently_used"] = { - 'per': fPerc(mru_hits, arc_hits), - 'num': fHits(mru_hits), - } - output["most_frequently_used"] = { - 'per': fPerc(mfu_hits, arc_hits), - 'num': fHits(mfu_hits), - } - output["most_recently_used_ghost"] = { - 'per': fPerc(mru_ghost_hits, arc_hits), - 'num': fHits(mru_ghost_hits), - } - output["most_frequently_used_ghost"] = { - 'per': fPerc(mfu_ghost_hits, arc_hits), - 'num': fHits(mfu_ghost_hits), - } - - output["cache_hits_by_data_type"] = {} - output["cache_hits_by_data_type"]["demand_data"] = { - 'per': fPerc(demand_data_hits, arc_hits), - 'num': fHits(demand_data_hits), - } - output["cache_hits_by_data_type"]["prefetch_data"] = { - 'per': fPerc(prefetch_data_hits, arc_hits), - 'num': fHits(prefetch_data_hits), - } - output["cache_hits_by_data_type"]["demand_metadata"] = { - 'per': fPerc(demand_metadata_hits, arc_hits), - 'num': fHits(demand_metadata_hits), - } - output["cache_hits_by_data_type"]["prefetch_metadata"] = { - 'per': fPerc(prefetch_metadata_hits, arc_hits), - 'num': fHits(prefetch_metadata_hits), - } - - output["cache_misses_by_data_type"] = {} - output["cache_misses_by_data_type"]["demand_data"] = { - 'per': fPerc(demand_data_misses, arc_misses), - 'num': fHits(demand_data_misses), - } - output["cache_misses_by_data_type"]["prefetch_data"] = { - 'per': fPerc(prefetch_data_misses, arc_misses), - 'num': fHits(prefetch_data_misses), - } - output["cache_misses_by_data_type"]["demand_metadata"] = { - 'per': fPerc(demand_metadata_misses, arc_misses), - 'num': fHits(demand_metadata_misses), - } - output["cache_misses_by_data_type"]["prefetch_metadata"] = { - 'per': fPerc(prefetch_metadata_misses, arc_misses), - 'num': fHits(prefetch_metadata_misses), - } - - return output - - -def _arc_efficiency(Kstat): - """Print information on the efficiency of the ARC""" - - arc = get_arc_efficiency(Kstat) - - sys.stdout.write("ARC Total accesses:\t\t\t\t\t%s\n" % - arc['total_accesses']) - sys.stdout.write("\tCache Hit Ratio:\t\t%s\t%s\n" % ( - arc['cache_hit_ratio']['per'], - arc['cache_hit_ratio']['num'], - ) - ) - sys.stdout.write("\tCache Miss Ratio:\t\t%s\t%s\n" % ( - arc['cache_miss_ratio']['per'], - arc['cache_miss_ratio']['num'], - ) - ) - - sys.stdout.write("\tActual Hit Ratio:\t\t%s\t%s\n" % ( - arc['actual_hit_ratio']['per'], - arc['actual_hit_ratio']['num'], - ) - ) - - sys.stdout.write("\n") - sys.stdout.write("\tData Demand Efficiency:\t\t%s\t%s\n" % ( - arc['data_demand_efficiency']['per'], - arc['data_demand_efficiency']['num'], - ) - ) - - if 'data_prefetch_efficiency' in arc: - sys.stdout.write("\tData Prefetch Efficiency:\t%s\t%s\n" % ( - arc['data_prefetch_efficiency']['per'], - arc['data_prefetch_efficiency']['num'], - ) - ) - sys.stdout.write("\n") - - sys.stdout.write("\tCACHE HITS BY CACHE LIST:\n") - if 'cache_hits_by_cache_list' in arc: - sys.stdout.write("\t Anonymously Used:\t\t%s\t%s\n" % ( - arc['cache_hits_by_cache_list']['anonymously_used']['per'], - arc['cache_hits_by_cache_list']['anonymously_used']['num'], - ) - ) - sys.stdout.write("\t Most Recently Used:\t\t%s\t%s\n" % ( - arc['most_recently_used']['per'], - arc['most_recently_used']['num'], - ) - ) - sys.stdout.write("\t Most Frequently Used:\t\t%s\t%s\n" % ( - arc['most_frequently_used']['per'], - arc['most_frequently_used']['num'], - ) - ) - sys.stdout.write("\t Most Recently Used Ghost:\t%s\t%s\n" % ( - arc['most_recently_used_ghost']['per'], - arc['most_recently_used_ghost']['num'], - ) - ) - sys.stdout.write("\t Most Frequently Used Ghost:\t%s\t%s\n" % ( - arc['most_frequently_used_ghost']['per'], - arc['most_frequently_used_ghost']['num'], - ) - ) - - sys.stdout.write("\n\tCACHE HITS BY DATA TYPE:\n") - sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['demand_data']['per'], - arc["cache_hits_by_data_type"]['demand_data']['num'], - ) - ) - sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['prefetch_data']['per'], - arc["cache_hits_by_data_type"]['prefetch_data']['num'], - ) - ) - sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['demand_metadata']['per'], - arc["cache_hits_by_data_type"]['demand_metadata']['num'], - ) - ) - sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % ( - arc["cache_hits_by_data_type"]['prefetch_metadata']['per'], - arc["cache_hits_by_data_type"]['prefetch_metadata']['num'], - ) - ) - - sys.stdout.write("\n\tCACHE MISSES BY DATA TYPE:\n") - sys.stdout.write("\t Demand Data:\t\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['demand_data']['per'], - arc["cache_misses_by_data_type"]['demand_data']['num'], - ) - ) - sys.stdout.write("\t Prefetch Data:\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['prefetch_data']['per'], - arc["cache_misses_by_data_type"]['prefetch_data']['num'], - ) - ) - sys.stdout.write("\t Demand Metadata:\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['demand_metadata']['per'], - arc["cache_misses_by_data_type"]['demand_metadata']['num'], - ) - ) - sys.stdout.write("\t Prefetch Metadata:\t\t%s\t%s\n" % ( - arc["cache_misses_by_data_type"]['prefetch_metadata']['per'], - arc["cache_misses_by_data_type"]['prefetch_metadata']['num'], - ) - ) - - -def get_l2arc_summary(Kstat): - """Collection information on the L2ARC""" - - output = {} - - l2_abort_lowmem = Kstat["kstat.zfs.misc.arcstats.l2_abort_lowmem"] - l2_cksum_bad = Kstat["kstat.zfs.misc.arcstats.l2_cksum_bad"] - l2_evict_lock_retry = Kstat["kstat.zfs.misc.arcstats.l2_evict_lock_retry"] - l2_evict_reading = Kstat["kstat.zfs.misc.arcstats.l2_evict_reading"] - l2_feeds = Kstat["kstat.zfs.misc.arcstats.l2_feeds"] - l2_free_on_write = Kstat["kstat.zfs.misc.arcstats.l2_free_on_write"] - l2_hdr_size = Kstat["kstat.zfs.misc.arcstats.l2_hdr_size"] - l2_hits = Kstat["kstat.zfs.misc.arcstats.l2_hits"] - l2_io_error = Kstat["kstat.zfs.misc.arcstats.l2_io_error"] - l2_misses = Kstat["kstat.zfs.misc.arcstats.l2_misses"] - l2_rw_clash = Kstat["kstat.zfs.misc.arcstats.l2_rw_clash"] - l2_size = Kstat["kstat.zfs.misc.arcstats.l2_size"] - l2_asize = Kstat["kstat.zfs.misc.arcstats.l2_asize"] - l2_writes_done = Kstat["kstat.zfs.misc.arcstats.l2_writes_done"] - l2_writes_error = Kstat["kstat.zfs.misc.arcstats.l2_writes_error"] - l2_writes_sent = Kstat["kstat.zfs.misc.arcstats.l2_writes_sent"] - l2_mfu_asize = Kstat["kstat.zfs.misc.arcstats.l2_mfu_asize"] - l2_mru_asize = Kstat["kstat.zfs.misc.arcstats.l2_mru_asize"] - l2_prefetch_asize = Kstat["kstat.zfs.misc.arcstats.l2_prefetch_asize"] - l2_bufc_data_asize = Kstat["kstat.zfs.misc.arcstats.l2_bufc_data_asize"] - l2_bufc_metadata_asize = Kstat["kstat.zfs.misc.arcstats.l2_bufc_metadata_asize"] - - l2_access_total = (l2_hits + l2_misses) - output['l2_health_count'] = (l2_writes_error + l2_cksum_bad + l2_io_error) - - output['l2_access_total'] = l2_access_total - output['l2_size'] = l2_size - output['l2_asize'] = l2_asize - - if l2_size > 0 and l2_access_total > 0: - - if output['l2_health_count'] > 0: - output["health"] = "DEGRADED" - else: - output["health"] = "HEALTHY" - - output["low_memory_aborts"] = fHits(l2_abort_lowmem) - output["free_on_write"] = fHits(l2_free_on_write) - output["rw_clashes"] = fHits(l2_rw_clash) - output["bad_checksums"] = fHits(l2_cksum_bad) - output["io_errors"] = fHits(l2_io_error) - - output["l2_arc_size"] = {} - output["l2_arc_size"]["adaptive"] = fBytes(l2_size) - output["l2_arc_size"]["actual"] = { - 'per': fPerc(l2_asize, l2_size), - 'num': fBytes(l2_asize) - } - output["l2_arc_size"]["head_size"] = { - 'per': fPerc(l2_hdr_size, l2_size), - 'num': fBytes(l2_hdr_size), - } - output["l2_arc_size"]["mfu_asize"] = { - 'per': fPerc(l2_mfu_asize, l2_asize), - 'num': fBytes(l2_mfu_asize), - } - output["l2_arc_size"]["mru_asize"] = { - 'per': fPerc(l2_mru_asize, l2_asize), - 'num': fBytes(l2_mru_asize), - } - output["l2_arc_size"]["prefetch_asize"] = { - 'per': fPerc(l2_prefetch_asize, l2_asize), - 'num': fBytes(l2_prefetch_asize), - } - output["l2_arc_size"]["bufc_data_asize"] = { - 'per': fPerc(l2_bufc_data_asize, l2_asize), - 'num': fBytes(l2_bufc_data_asize), - } - output["l2_arc_size"]["bufc_metadata_asize"] = { - 'per': fPerc(l2_bufc_metadata_asize, l2_asize), - 'num': fBytes(l2_bufc_metadata_asize), - } - - output["l2_arc_evicts"] = {} - output["l2_arc_evicts"]['lock_retries'] = fHits(l2_evict_lock_retry) - output["l2_arc_evicts"]['reading'] = fHits(l2_evict_reading) - - output['l2_arc_breakdown'] = {} - output['l2_arc_breakdown']['value'] = fHits(l2_access_total) - output['l2_arc_breakdown']['hit_ratio'] = { - 'per': fPerc(l2_hits, l2_access_total), - 'num': fHits(l2_hits), - } - output['l2_arc_breakdown']['miss_ratio'] = { - 'per': fPerc(l2_misses, l2_access_total), - 'num': fHits(l2_misses), - } - output['l2_arc_breakdown']['feeds'] = fHits(l2_feeds) - - output['l2_arc_buffer'] = {} - - output['l2_arc_writes'] = {} - output['l2_writes_done'] = l2_writes_done - output['l2_writes_sent'] = l2_writes_sent - if l2_writes_done != l2_writes_sent: - output['l2_arc_writes']['writes_sent'] = { - 'value': "FAULTED", - 'num': fHits(l2_writes_sent), - } - output['l2_arc_writes']['done_ratio'] = { - 'per': fPerc(l2_writes_done, l2_writes_sent), - 'num': fHits(l2_writes_done), - } - output['l2_arc_writes']['error_ratio'] = { - 'per': fPerc(l2_writes_error, l2_writes_sent), - 'num': fHits(l2_writes_error), - } - else: - output['l2_arc_writes']['writes_sent'] = { - 'per': fPerc(100), - 'num': fHits(l2_writes_sent), - } - - return output - - -def _l2arc_summary(Kstat): - """Print information on the L2ARC""" - - arc = get_l2arc_summary(Kstat) - - if arc['l2_size'] > 0 and arc['l2_access_total'] > 0: - sys.stdout.write("L2 ARC Summary: ") - if arc['l2_health_count'] > 0: - sys.stdout.write("(DEGRADED)\n") - else: - sys.stdout.write("(HEALTHY)\n") - sys.stdout.write("\tLow Memory Aborts:\t\t\t%s\n" % - arc['low_memory_aborts']) - sys.stdout.write("\tFree on Write:\t\t\t\t%s\n" % arc['free_on_write']) - sys.stdout.write("\tR/W Clashes:\t\t\t\t%s\n" % arc['rw_clashes']) - sys.stdout.write("\tBad Checksums:\t\t\t\t%s\n" % arc['bad_checksums']) - sys.stdout.write("\tIO Errors:\t\t\t\t%s\n" % arc['io_errors']) - sys.stdout.write("\n") - - sys.stdout.write("L2 ARC Size: (Adaptive)\t\t\t\t%s\n" % - arc["l2_arc_size"]["adaptive"]) - sys.stdout.write("\tCompressed:\t\t\t%s\t%s\n" % ( - arc["l2_arc_size"]["actual"]["per"], - arc["l2_arc_size"]["actual"]["num"], - ) - ) - sys.stdout.write("\tHeader Size:\t\t\t%s\t%s\n" % ( - arc["l2_arc_size"]["head_size"]["per"], - arc["l2_arc_size"]["head_size"]["num"], - ) - ) - sys.stdout.write("\tMFU Alloc. Size:\t\t%s\t%s\n" % ( - arc["l2_arc_size"]["mfu_asize"]["per"], - arc["l2_arc_size"]["mfu_asize"]["num"], - ) - ) - sys.stdout.write("\tMRU Alloc. Size:\t\t%s\t%s\n" % ( - arc["l2_arc_size"]["mru_asize"]["per"], - arc["l2_arc_size"]["mru_asize"]["num"], - ) - ) - sys.stdout.write("\tPrefetch Alloc. Size:\t\t%s\t%s\n" % ( - arc["l2_arc_size"]["prefetch_asize"]["per"], - arc["l2_arc_size"]["prefetch_asize"]["num"], - ) - ) - sys.stdout.write("\tData (buf content) Alloc. Size:\t%s\t%s\n" % ( - arc["l2_arc_size"]["bufc_data_asize"]["per"], - arc["l2_arc_size"]["bufc_data_asize"]["num"], - ) - ) - sys.stdout.write("\tMetadata (buf content) Size:\t%s\t%s\n" % ( - arc["l2_arc_size"]["bufc_metadata_asize"]["per"], - arc["l2_arc_size"]["bufc_metadata_asize"]["num"], - ) - ) - sys.stdout.write("\n") - - if arc["l2_arc_evicts"]['lock_retries'] != '0' or \ - arc["l2_arc_evicts"]["reading"] != '0': - sys.stdout.write("L2 ARC Evictions:\n") - sys.stdout.write("\tLock Retries:\t\t\t\t%s\n" % - arc["l2_arc_evicts"]['lock_retries']) - sys.stdout.write("\tUpon Reading:\t\t\t\t%s\n" % - arc["l2_arc_evicts"]["reading"]) - sys.stdout.write("\n") - - sys.stdout.write("L2 ARC Breakdown:\t\t\t\t%s\n" % - arc['l2_arc_breakdown']['value']) - sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_breakdown']['hit_ratio']['per'], - arc['l2_arc_breakdown']['hit_ratio']['num'], - ) - ) - - sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_breakdown']['miss_ratio']['per'], - arc['l2_arc_breakdown']['miss_ratio']['num'], - ) - ) - - sys.stdout.write("\tFeeds:\t\t\t\t\t%s\n" % - arc['l2_arc_breakdown']['feeds']) - sys.stdout.write("\n") - - sys.stdout.write("L2 ARC Writes:\n") - if arc['l2_writes_done'] != arc['l2_writes_sent']: - sys.stdout.write("\tWrites Sent: (%s)\t\t\t\t%s\n" % ( - arc['l2_arc_writes']['writes_sent']['value'], - arc['l2_arc_writes']['writes_sent']['num'], - ) - ) - sys.stdout.write("\t Done Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_writes']['done_ratio']['per'], - arc['l2_arc_writes']['done_ratio']['num'], - ) - ) - sys.stdout.write("\t Error Ratio:\t\t\t%s\t%s\n" % ( - arc['l2_arc_writes']['error_ratio']['per'], - arc['l2_arc_writes']['error_ratio']['num'], - ) - ) - else: - sys.stdout.write("\tWrites Sent:\t\t\t%s\t%s\n" % ( - arc['l2_arc_writes']['writes_sent']['per'], - arc['l2_arc_writes']['writes_sent']['num'], - ) - ) - - -def get_dmu_summary(Kstat): - """Collect information on the DMU""" - - output = {} - - zfetch_hits = Kstat["kstat.zfs.misc.zfetchstats.hits"] - zfetch_misses = Kstat["kstat.zfs.misc.zfetchstats.misses"] - - zfetch_access_total = (zfetch_hits + zfetch_misses) - output['zfetch_access_total'] = zfetch_access_total - - if zfetch_access_total > 0: - output['dmu'] = {} - output['dmu']['efficiency'] = {} - output['dmu']['efficiency']['value'] = fHits(zfetch_access_total) - output['dmu']['efficiency']['hit_ratio'] = { - 'per': fPerc(zfetch_hits, zfetch_access_total), - 'num': fHits(zfetch_hits), - } - output['dmu']['efficiency']['miss_ratio'] = { - 'per': fPerc(zfetch_misses, zfetch_access_total), - 'num': fHits(zfetch_misses), - } - - return output - - -def _dmu_summary(Kstat): - """Print information on the DMU""" - - arc = get_dmu_summary(Kstat) - - if arc['zfetch_access_total'] > 0: - sys.stdout.write("DMU Prefetch Efficiency:\t\t\t\t\t%s\n" % - arc['dmu']['efficiency']['value']) - sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( - arc['dmu']['efficiency']['hit_ratio']['per'], - arc['dmu']['efficiency']['hit_ratio']['num'], - ) - ) - sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( - arc['dmu']['efficiency']['miss_ratio']['per'], - arc['dmu']['efficiency']['miss_ratio']['num'], - ) - ) - - sys.stdout.write("\n") - - -def get_vdev_summary(Kstat): - """Collect information on the VDEVs""" - - output = {} - - vdev_cache_delegations = \ - Kstat["kstat.zfs.misc.vdev_cache_stats.delegations"] - vdev_cache_misses = Kstat["kstat.zfs.misc.vdev_cache_stats.misses"] - vdev_cache_hits = Kstat["kstat.zfs.misc.vdev_cache_stats.hits"] - vdev_cache_total = (vdev_cache_misses + vdev_cache_hits + - vdev_cache_delegations) - - output['vdev_cache_total'] = vdev_cache_total - - if vdev_cache_total > 0: - output['summary'] = fHits(vdev_cache_total) - output['hit_ratio'] = { - 'per': fPerc(vdev_cache_hits, vdev_cache_total), - 'num': fHits(vdev_cache_hits), - } - output['miss_ratio'] = { - 'per': fPerc(vdev_cache_misses, vdev_cache_total), - 'num': fHits(vdev_cache_misses), - } - output['delegations'] = { - 'per': fPerc(vdev_cache_delegations, vdev_cache_total), - 'num': fHits(vdev_cache_delegations), - } - - return output - - -def _vdev_summary(Kstat): - """Print information on the VDEVs""" - - arc = get_vdev_summary(Kstat) - - if arc['vdev_cache_total'] > 0: - sys.stdout.write("VDEV Cache Summary:\t\t\t\t%s\n" % arc['summary']) - sys.stdout.write("\tHit Ratio:\t\t\t%s\t%s\n" % ( - arc['hit_ratio']['per'], - arc['hit_ratio']['num'], - )) - sys.stdout.write("\tMiss Ratio:\t\t\t%s\t%s\n" % ( - arc['miss_ratio']['per'], - arc['miss_ratio']['num'], - )) - sys.stdout.write("\tDelegations:\t\t\t%s\t%s\n" % ( - arc['delegations']['per'], - arc['delegations']['num'], - )) - - -def _tunable_summary(Kstat): - """Print information on tunables, including descriptions if requested""" - - global show_tunable_descriptions - global alternate_tunable_layout - - tunables = load_tunables() - descriptions = {} - - if show_tunable_descriptions: - - command = ["/sbin/modinfo", "zfs", "-0"] - - try: - p = Popen(command, stdin=PIPE, stdout=PIPE, - stderr=PIPE, shell=False, close_fds=True) - p.wait() - - # By default, Python 2 returns a string as the first element of the - # tuple from p.communicate(), while Python 3 returns bytes which - # must be decoded first. The better way to do this would be with - # subprocess.run() or at least .check_output(), but this fails on - # CentOS 6 because of its old version of Python 2 - desc = bytes.decode(p.communicate()[0]) - description_list = desc.strip().split('\0') - - if p.returncode == 0: - for tunable in description_list: - if tunable[0:5] == 'parm:': - tunable = tunable[5:].strip() - name, description = tunable.split(':', 1) - if not description: - description = "Description unavailable" - descriptions[name] = description - else: - sys.stderr.write("%s: '%s' exited with code %i\n" % - (sys.argv[0], command[0], p.returncode)) - sys.stderr.write("Tunable descriptions will be disabled.\n") - except OSError as e: - sys.stderr.write("%s: Cannot run '%s': %s\n" % - (sys.argv[0], command[0], e.strerror)) - sys.stderr.write("Tunable descriptions will be disabled.\n") - - sys.stdout.write("ZFS Tunables:\n") - - if alternate_tunable_layout: - fmt = "\t%s=%s\n" - else: - fmt = "\t%-50s%s\n" - - for name in sorted(tunables.keys()): - if show_tunable_descriptions and name in descriptions: - sys.stdout.write("\t# %s\n" % descriptions[name]) - - sys.stdout.write(fmt % (name, tunables[name])) - - -unSub = [ - _arc_summary, - _arc_efficiency, - _l2arc_summary, - _dmu_summary, - _vdev_summary, - _tunable_summary -] - - -def zfs_header(): - """Print title string with date""" - - daydate = time.strftime('%a %b %d %H:%M:%S %Y') - - sys.stdout.write('\n'+'-'*72+'\n') - sys.stdout.write('ZFS Subsystem Report\t\t\t\t%s' % daydate) - sys.stdout.write('\n') - - -def usage(): - """Print usage information""" - - sys.stdout.write("Usage: arc_summary [-h] [-a] [-d] [-p PAGE]\n\n") - sys.stdout.write("\t -h, --help : " - "Print this help message and exit\n") - sys.stdout.write("\t -a, --alternate : " - "Show an alternate sysctl layout\n") - sys.stdout.write("\t -d, --description : " - "Show the sysctl descriptions\n") - sys.stdout.write("\t -p PAGE, --page=PAGE : " - "Select a single output page to display,\n") - sys.stdout.write("\t " - "should be an integer between 1 and " + - str(len(unSub)) + "\n\n") - sys.stdout.write("Examples:\n") - sys.stdout.write("\tarc_summary -a\n") - sys.stdout.write("\tarc_summary -p 4\n") - sys.stdout.write("\tarc_summary -ad\n") - sys.stdout.write("\tarc_summary --page=2\n") - - -def main(): - """Main function""" - - global show_tunable_descriptions - global alternate_tunable_layout - - try: - try: - opts, args = getopt.getopt( - sys.argv[1:], - "adp:h", ["alternate", "description", "page=", "help"] - ) - except getopt.error as e: - sys.stderr.write("Error: %s\n" % e.msg) - usage() - sys.exit(1) - - args = {} - for opt, arg in opts: - if opt in ('-a', '--alternate'): - args['a'] = True - if opt in ('-d', '--description'): - args['d'] = True - if opt in ('-p', '--page'): - args['p'] = arg - if opt in ('-h', '--help'): - usage() - sys.exit(0) - - Kstat = get_Kstat() - - alternate_tunable_layout = 'a' in args - show_tunable_descriptions = 'd' in args - - pages = [] - - if 'p' in args: - try: - pages.append(unSub[int(args['p']) - 1]) - except IndexError: - sys.stderr.write('the argument to -p must be between 1 and ' + - str(len(unSub)) + '\n') - sys.exit(1) - else: - pages = unSub - - zfs_header() - for page in pages: - page(Kstat) - sys.stdout.write("\n") - except IOError as ex: - if (ex.errno == errno.EPIPE): - sys.exit(0) - raise - except KeyboardInterrupt: - sys.exit(0) - - -if __name__ == '__main__': - main() diff --git a/cmd/arc_summary/arc_summary3 b/cmd/arc_summary/arc_summary3 index 7b28012ede4d..4f275813d973 100755 --- a/cmd/arc_summary/arc_summary3 +++ b/cmd/arc_summary/arc_summary3 @@ -1,986 +1,978 @@ #!/usr/bin/env python3 # # Copyright (c) 2008 Ben Rockwood , # Copyright (c) 2010 Martin Matuska , # Copyright (c) 2010-2011 Jason J. Hellenthal , # Copyright (c) 2017 Scot W. Stevenson # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions # are met: # # 1. Redistributions of source code must retain the above copyright # notice, this list of conditions and the following disclaimer. # 2. Redistributions in binary form must reproduce the above copyright # notice, this list of conditions and the following disclaimer in the # documentation and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY AUTHOR AND CONTRIBUTORS ``AS IS'' AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE # ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS # OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) # HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT # LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY # OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF # SUCH DAMAGE. """Print statistics on the ZFS ARC Cache and other information Provides basic information on the ARC, its efficiency, the L2ARC (if present), the Data Management Unit (DMU), Virtual Devices (VDEVs), and tunables. See the in-source documentation and code at https://github.com/openzfs/zfs/blob/master/module/zfs/arc.c for details. The original introduction to arc_summary can be found at http://cuddletech.com/?p=454 """ import argparse import os import subprocess import sys import time import errno # We can't use env -S portably, and we need python3 -u to handle pipes in # the shell abruptly closing the way we want to, so... import io if isinstance(sys.__stderr__.buffer, io.BufferedWriter): os.execv(sys.executable, [sys.executable, "-u"] + sys.argv) DESCRIPTION = 'Print ARC and other statistics for OpenZFS' INDENT = ' '*8 LINE_LENGTH = 72 DATE_FORMAT = '%a %b %d %H:%M:%S %Y' TITLE = 'ZFS Subsystem Report' SECTIONS = 'arc archits dmu l2arc spl tunables vdev zil'.split() SECTION_HELP = 'print info from one section ('+' '.join(SECTIONS)+')' # Tunables and SPL are handled separately because they come from # different sources SECTION_PATHS = {'arc': 'arcstats', 'dmu': 'dmu_tx', 'l2arc': 'arcstats', # L2ARC stuff lives in arcstats 'vdev': 'vdev_cache_stats', 'zfetch': 'zfetchstats', 'zil': 'zil'} parser = argparse.ArgumentParser(description=DESCRIPTION) parser.add_argument('-a', '--alternate', action='store_true', default=False, help='use alternate formatting for tunables and SPL', dest='alt') parser.add_argument('-d', '--description', action='store_true', default=False, help='print descriptions with tunables and SPL', dest='desc') parser.add_argument('-g', '--graph', action='store_true', default=False, help='print graph on ARC use and exit', dest='graph') parser.add_argument('-p', '--page', type=int, dest='page', help='print page by number (DEPRECATED, use "-s")') parser.add_argument('-r', '--raw', action='store_true', default=False, help='dump all available data with minimal formatting', dest='raw') parser.add_argument('-s', '--section', dest='section', help=SECTION_HELP) ARGS = parser.parse_args() if sys.platform.startswith('freebsd'): # Requires py36-sysctl on FreeBSD import sysctl VDEV_CACHE_SIZE = 'vdev.cache_size' def is_value(ctl): return ctl.type != sysctl.CTLTYPE_NODE def namefmt(ctl, base='vfs.zfs.'): # base is removed from the name cut = len(base) return ctl.name[cut:] def load_kstats(section): base = 'kstat.zfs.misc.{section}.'.format(section=section) fmt = lambda kstat: '{name} : {value}'.format(name=namefmt(kstat, base), value=kstat.value) kstats = sysctl.filter(base) return [fmt(kstat) for kstat in kstats if is_value(kstat)] def get_params(base): ctls = sysctl.filter(base) return {namefmt(ctl): str(ctl.value) for ctl in ctls if is_value(ctl)} def get_tunable_params(): return get_params('vfs.zfs') def get_vdev_params(): return get_params('vfs.zfs.vdev') def get_version_impl(request): # FreeBSD reports versions for zpl and spa instead of zfs and spl. name = {'zfs': 'zpl', 'spl': 'spa'}[request] mib = 'vfs.zfs.version.{}'.format(name) version = sysctl.filter(mib)[0].value return '{} version {}'.format(name, version) def get_descriptions(_request): ctls = sysctl.filter('vfs.zfs') return {namefmt(ctl): ctl.description for ctl in ctls if is_value(ctl)} elif sys.platform.startswith('linux'): KSTAT_PATH = '/proc/spl/kstat/zfs' SPL_PATH = '/sys/module/spl/parameters' TUNABLES_PATH = '/sys/module/zfs/parameters' VDEV_CACHE_SIZE = 'zfs_vdev_cache_size' def load_kstats(section): path = os.path.join(KSTAT_PATH, section) with open(path) as f: return list(f)[2:] # Get rid of header def get_params(basepath): """Collect information on the Solaris Porting Layer (SPL) or the tunables, depending on the PATH given. Does not check if PATH is legal. """ result = {} for name in os.listdir(basepath): path = os.path.join(basepath, name) with open(path) as f: value = f.read() result[name] = value.strip() return result def get_spl_params(): return get_params(SPL_PATH) def get_tunable_params(): return get_params(TUNABLES_PATH) def get_vdev_params(): return get_params(TUNABLES_PATH) def get_version_impl(request): # The original arc_summary called /sbin/modinfo/{spl,zfs} to get # the version information. We switch to /sys/module/{spl,zfs}/version # to make sure we get what is really loaded in the kernel try: with open("/sys/module/{}/version".format(request)) as f: return f.read().strip() except: return "(unknown)" def get_descriptions(request): """Get the descriptions of the Solaris Porting Layer (SPL) or the tunables, return with minimal formatting. """ if request not in ('spl', 'zfs'): print('ERROR: description of "{0}" requested)'.format(request)) sys.exit(1) descs = {} target_prefix = 'parm:' # We would prefer to do this with /sys/modules -- see the discussion at # get_version() -- but there isn't a way to get the descriptions from # there, so we fall back on modinfo command = ["/sbin/modinfo", request, "-0"] - # The recommended way to do this is with subprocess.run(). However, - # some installed versions of Python are < 3.5, so we offer them - # the option of doing it the old way (for now) info = '' try: - if 'run' in dir(subprocess): - info = subprocess.run(command, stdout=subprocess.PIPE, - universal_newlines=True) - raw_output = info.stdout.split('\0') - else: - info = subprocess.check_output(command, - universal_newlines=True) - raw_output = info.split('\0') + info = subprocess.run(command, stdout=subprocess.PIPE, + check=True, universal_newlines=True) + raw_output = info.stdout.split('\0') except subprocess.CalledProcessError: print("Error: Descriptions not available", "(can't access kernel module)") sys.exit(1) for line in raw_output: if not line.startswith(target_prefix): continue line = line[len(target_prefix):].strip() name, raw_desc = line.split(':', 1) desc = raw_desc.rsplit('(', 1)[0] if desc == '': desc = '(No description found)' descs[name.strip()] = desc.strip() return descs def handle_unraisableException(exc_type, exc_value=None, exc_traceback=None, err_msg=None, object=None): handle_Exception(exc_type, object, exc_traceback) def handle_Exception(ex_cls, ex, tb): if ex_cls is KeyboardInterrupt: sys.exit() if ex_cls is BrokenPipeError: # It turns out that while sys.exit() triggers an exception # not handled message on Python 3.8+, os._exit() does not. os._exit(0) if ex_cls is OSError: if ex.errno == errno.ENOTCONN: sys.exit() raise ex if hasattr(sys,'unraisablehook'): # Python 3.8+ sys.unraisablehook = handle_unraisableException sys.excepthook = handle_Exception def cleanup_line(single_line): """Format a raw line of data from /proc and isolate the name value part, returning a tuple with each. Currently, this gets rid of the middle '4'. For example "arc_no_grow 4 0" returns the tuple ("arc_no_grow", "0"). """ name, _, value = single_line.split() return name, value def draw_graph(kstats_dict): """Draw a primitive graph representing the basic information on the ARC -- its size and the proportion used by MFU and MRU -- and quit. We use max size of the ARC to calculate how full it is. This is a very rough representation. """ arc_stats = isolate_section('arcstats', kstats_dict) GRAPH_INDENT = ' '*4 GRAPH_WIDTH = 60 arc_size = f_bytes(arc_stats['size']) arc_perc = f_perc(arc_stats['size'], arc_stats['c_max']) mfu_size = f_bytes(arc_stats['mfu_size']) mru_size = f_bytes(arc_stats['mru_size']) meta_limit = f_bytes(arc_stats['arc_meta_limit']) meta_size = f_bytes(arc_stats['arc_meta_used']) dnode_limit = f_bytes(arc_stats['arc_dnode_limit']) dnode_size = f_bytes(arc_stats['dnode_size']) info_form = ('ARC: {0} ({1}) MFU: {2} MRU: {3} META: {4} ({5}) ' 'DNODE {6} ({7})') info_line = info_form.format(arc_size, arc_perc, mfu_size, mru_size, meta_size, meta_limit, dnode_size, dnode_limit) info_spc = ' '*int((GRAPH_WIDTH-len(info_line))/2) info_line = GRAPH_INDENT+info_spc+info_line graph_line = GRAPH_INDENT+'+'+('-'*(GRAPH_WIDTH-2))+'+' mfu_perc = float(int(arc_stats['mfu_size'])/int(arc_stats['c_max'])) mru_perc = float(int(arc_stats['mru_size'])/int(arc_stats['c_max'])) arc_perc = float(int(arc_stats['size'])/int(arc_stats['c_max'])) total_ticks = float(arc_perc)*GRAPH_WIDTH mfu_ticks = mfu_perc*GRAPH_WIDTH mru_ticks = mru_perc*GRAPH_WIDTH other_ticks = total_ticks-(mfu_ticks+mru_ticks) core_form = 'F'*int(mfu_ticks)+'R'*int(mru_ticks)+'O'*int(other_ticks) core_spc = ' '*(GRAPH_WIDTH-(2+len(core_form))) core_line = GRAPH_INDENT+'|'+core_form+core_spc+'|' for line in ('', info_line, graph_line, core_line, graph_line, ''): print(line) def f_bytes(byte_string): """Return human-readable representation of a byte value in powers of 2 (eg "KiB" for "kibibytes", etc) to two decimal points. Values smaller than one KiB are returned without decimal points. Note "bytes" is a reserved keyword. """ prefixes = ([2**80, "YiB"], # yobibytes (yotta) [2**70, "ZiB"], # zebibytes (zetta) [2**60, "EiB"], # exbibytes (exa) [2**50, "PiB"], # pebibytes (peta) [2**40, "TiB"], # tebibytes (tera) [2**30, "GiB"], # gibibytes (giga) [2**20, "MiB"], # mebibytes (mega) [2**10, "KiB"]) # kibibytes (kilo) bites = int(byte_string) if bites >= 2**10: for limit, unit in prefixes: if bites >= limit: value = bites / limit break result = '{0:.1f} {1}'.format(value, unit) else: result = '{0} Bytes'.format(bites) return result def f_hits(hits_string): """Create a human-readable representation of the number of hits. The single-letter symbols used are SI to avoid the confusion caused by the different "short scale" and "long scale" representations in English, which use the same words for different values. See https://en.wikipedia.org/wiki/Names_of_large_numbers and: https://physics.nist.gov/cuu/Units/prefixes.html """ numbers = ([10**24, 'Y'], # yotta (septillion) [10**21, 'Z'], # zetta (sextillion) [10**18, 'E'], # exa (quintrillion) [10**15, 'P'], # peta (quadrillion) [10**12, 'T'], # tera (trillion) [10**9, 'G'], # giga (billion) [10**6, 'M'], # mega (million) [10**3, 'k']) # kilo (thousand) hits = int(hits_string) if hits >= 1000: for limit, symbol in numbers: if hits >= limit: value = hits/limit break result = "%0.1f%s" % (value, symbol) else: result = "%d" % hits return result def f_perc(value1, value2): """Calculate percentage and return in human-readable form. If rounding produces the result '0.0' though the first number is not zero, include a 'less-than' symbol to avoid confusion. Division by zero is handled by returning 'n/a'; no error is called. """ v1 = float(value1) v2 = float(value2) try: perc = 100 * v1/v2 except ZeroDivisionError: result = 'n/a' else: result = '{0:0.1f} %'.format(perc) if result == '0.0 %' and v1 > 0: result = '< 0.1 %' return result def format_raw_line(name, value): """For the --raw option for the tunable and SPL outputs, decide on the correct formatting based on the --alternate flag. """ if ARGS.alt: result = '{0}{1}={2}'.format(INDENT, name, value) else: # Right-align the value within the line length if it fits, # otherwise just separate it from the name by a single space. fit = LINE_LENGTH - len(INDENT) - len(name) overflow = len(value) + 1 w = max(fit, overflow) result = '{0}{1}{2:>{w}}'.format(INDENT, name, value, w=w) return result def get_kstats(): """Collect information on the ZFS subsystem. The step does not perform any further processing, giving us the option to only work on what is actually needed. The name "kstat" is a holdover from the Solaris utility of the same name. """ result = {} for section in SECTION_PATHS.values(): if section not in result: result[section] = load_kstats(section) return result def get_version(request): """Get the version number of ZFS or SPL on this machine for header. Returns an error string, but does not raise an error, if we can't get the ZFS/SPL version. """ if request not in ('spl', 'zfs'): error_msg = '(ERROR: "{0}" requested)'.format(request) return error_msg return get_version_impl(request) def print_header(): """Print the initial heading with date and time as well as info on the kernel and ZFS versions. This is not called for the graph. """ # datetime is now recommended over time but we keep the exact formatting # from the older version of arc_summary in case there are scripts # that expect it in this way daydate = time.strftime(DATE_FORMAT) spc_date = LINE_LENGTH-len(daydate) sys_version = os.uname() sys_msg = sys_version.sysname+' '+sys_version.release zfs = get_version('zfs') spc_zfs = LINE_LENGTH-len(zfs) machine_msg = 'Machine: '+sys_version.nodename+' ('+sys_version.machine+')' spl = get_version('spl') spc_spl = LINE_LENGTH-len(spl) print('\n'+('-'*LINE_LENGTH)) print('{0:<{spc}}{1}'.format(TITLE, daydate, spc=spc_date)) print('{0:<{spc}}{1}'.format(sys_msg, zfs, spc=spc_zfs)) print('{0:<{spc}}{1}\n'.format(machine_msg, spl, spc=spc_spl)) def print_raw(kstats_dict): """Print all available data from the system in a minimally sorted format. This can be used as a source to be piped through 'grep'. """ sections = sorted(kstats_dict.keys()) for section in sections: print('\n{0}:'.format(section.upper())) lines = sorted(kstats_dict[section]) for line in lines: name, value = cleanup_line(line) print(format_raw_line(name, value)) # Tunables and SPL must be handled separately because they come from a # different source and have descriptions the user might request print() section_spl() section_tunables() def isolate_section(section_name, kstats_dict): """From the complete information on all sections, retrieve only those for one section. """ try: section_data = kstats_dict[section_name] except KeyError: print('ERROR: Data on {0} not available'.format(section_data)) sys.exit(1) section_dict = dict(cleanup_line(l) for l in section_data) return section_dict # Formatted output helper functions def prt_1(text, value): """Print text and one value, no indent""" spc = ' '*(LINE_LENGTH-(len(text)+len(value))) print('{0}{spc}{1}'.format(text, value, spc=spc)) def prt_i1(text, value): """Print text and one value, with indent""" spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(value))) print(INDENT+'{0}{spc}{1}'.format(text, value, spc=spc)) def prt_2(text, value1, value2): """Print text and two values, no indent""" values = '{0:>9} {1:>9}'.format(value1, value2) spc = ' '*(LINE_LENGTH-(len(text)+len(values)+2)) print('{0}{spc} {1}'.format(text, values, spc=spc)) def prt_i2(text, value1, value2): """Print text and two values, with indent""" values = '{0:>9} {1:>9}'.format(value1, value2) spc = ' '*(LINE_LENGTH-(len(INDENT)+len(text)+len(values)+2)) print(INDENT+'{0}{spc} {1}'.format(text, values, spc=spc)) # The section output concentrates on important parameters instead of # being exhaustive (that is what the --raw parameter is for) def section_arc(kstats_dict): """Give basic information on the ARC, MRU and MFU. This is the first and most used section. """ arc_stats = isolate_section('arcstats', kstats_dict) throttle = arc_stats['memory_throttle_count'] if throttle == '0': health = 'HEALTHY' else: health = 'THROTTLED' prt_1('ARC status:', health) prt_i1('Memory throttle count:', throttle) print() arc_size = arc_stats['size'] arc_target_size = arc_stats['c'] arc_max = arc_stats['c_max'] arc_min = arc_stats['c_min'] mfu_size = arc_stats['mfu_size'] mru_size = arc_stats['mru_size'] meta_limit = arc_stats['arc_meta_limit'] meta_size = arc_stats['arc_meta_used'] dnode_limit = arc_stats['arc_dnode_limit'] dnode_size = arc_stats['dnode_size'] target_size_ratio = '{0}:1'.format(int(arc_max) // int(arc_min)) prt_2('ARC size (current):', f_perc(arc_size, arc_max), f_bytes(arc_size)) prt_i2('Target size (adaptive):', f_perc(arc_target_size, arc_max), f_bytes(arc_target_size)) prt_i2('Min size (hard limit):', f_perc(arc_min, arc_max), f_bytes(arc_min)) prt_i2('Max size (high water):', target_size_ratio, f_bytes(arc_max)) caches_size = int(mfu_size)+int(mru_size) prt_i2('Most Frequently Used (MFU) cache size:', f_perc(mfu_size, caches_size), f_bytes(mfu_size)) prt_i2('Most Recently Used (MRU) cache size:', f_perc(mru_size, caches_size), f_bytes(mru_size)) prt_i2('Metadata cache size (hard limit):', f_perc(meta_limit, arc_max), f_bytes(meta_limit)) prt_i2('Metadata cache size (current):', f_perc(meta_size, meta_limit), f_bytes(meta_size)) prt_i2('Dnode cache size (hard limit):', f_perc(dnode_limit, meta_limit), f_bytes(dnode_limit)) prt_i2('Dnode cache size (current):', f_perc(dnode_size, dnode_limit), f_bytes(dnode_size)) print() print('ARC hash breakdown:') prt_i1('Elements max:', f_hits(arc_stats['hash_elements_max'])) prt_i2('Elements current:', f_perc(arc_stats['hash_elements'], arc_stats['hash_elements_max']), f_hits(arc_stats['hash_elements'])) prt_i1('Collisions:', f_hits(arc_stats['hash_collisions'])) prt_i1('Chain max:', f_hits(arc_stats['hash_chain_max'])) prt_i1('Chains:', f_hits(arc_stats['hash_chains'])) print() print('ARC misc:') prt_i1('Deleted:', f_hits(arc_stats['deleted'])) prt_i1('Mutex misses:', f_hits(arc_stats['mutex_miss'])) prt_i1('Eviction skips:', f_hits(arc_stats['evict_skip'])) prt_i1('Eviction skips due to L2 writes:', f_hits(arc_stats['evict_l2_skip'])) prt_i1('L2 cached evictions:', f_bytes(arc_stats['evict_l2_cached'])) prt_i1('L2 eligible evictions:', f_bytes(arc_stats['evict_l2_eligible'])) prt_i2('L2 eligible MFU evictions:', f_perc(arc_stats['evict_l2_eligible_mfu'], arc_stats['evict_l2_eligible']), f_bytes(arc_stats['evict_l2_eligible_mfu'])) prt_i2('L2 eligible MRU evictions:', f_perc(arc_stats['evict_l2_eligible_mru'], arc_stats['evict_l2_eligible']), f_bytes(arc_stats['evict_l2_eligible_mru'])) prt_i1('L2 ineligible evictions:', f_bytes(arc_stats['evict_l2_ineligible'])) print() def section_archits(kstats_dict): """Print information on how the caches are accessed ("arc hits"). """ arc_stats = isolate_section('arcstats', kstats_dict) all_accesses = int(arc_stats['hits'])+int(arc_stats['misses']) actual_hits = int(arc_stats['mfu_hits'])+int(arc_stats['mru_hits']) prt_1('ARC total accesses (hits + misses):', f_hits(all_accesses)) ta_todo = (('Cache hit ratio:', arc_stats['hits']), ('Cache miss ratio:', arc_stats['misses']), ('Actual hit ratio (MFU + MRU hits):', actual_hits)) for title, value in ta_todo: prt_i2(title, f_perc(value, all_accesses), f_hits(value)) dd_total = int(arc_stats['demand_data_hits']) +\ int(arc_stats['demand_data_misses']) prt_i2('Data demand efficiency:', f_perc(arc_stats['demand_data_hits'], dd_total), f_hits(dd_total)) dp_total = int(arc_stats['prefetch_data_hits']) +\ int(arc_stats['prefetch_data_misses']) prt_i2('Data prefetch efficiency:', f_perc(arc_stats['prefetch_data_hits'], dp_total), f_hits(dp_total)) known_hits = int(arc_stats['mfu_hits']) +\ int(arc_stats['mru_hits']) +\ int(arc_stats['mfu_ghost_hits']) +\ int(arc_stats['mru_ghost_hits']) anon_hits = int(arc_stats['hits'])-known_hits print() print('Cache hits by cache type:') cl_todo = (('Most frequently used (MFU):', arc_stats['mfu_hits']), ('Most recently used (MRU):', arc_stats['mru_hits']), ('Most frequently used (MFU) ghost:', arc_stats['mfu_ghost_hits']), ('Most recently used (MRU) ghost:', arc_stats['mru_ghost_hits'])) for title, value in cl_todo: prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value)) # For some reason, anon_hits can turn negative, which is weird. Until we # have figured out why this happens, we just hide the problem, following # the behavior of the original arc_summary. if anon_hits >= 0: prt_i2('Anonymously used:', f_perc(anon_hits, arc_stats['hits']), f_hits(anon_hits)) print() print('Cache hits by data type:') dt_todo = (('Demand data:', arc_stats['demand_data_hits']), ('Demand prefetch data:', arc_stats['prefetch_data_hits']), ('Demand metadata:', arc_stats['demand_metadata_hits']), ('Demand prefetch metadata:', arc_stats['prefetch_metadata_hits'])) for title, value in dt_todo: prt_i2(title, f_perc(value, arc_stats['hits']), f_hits(value)) print() print('Cache misses by data type:') dm_todo = (('Demand data:', arc_stats['demand_data_misses']), ('Demand prefetch data:', arc_stats['prefetch_data_misses']), ('Demand metadata:', arc_stats['demand_metadata_misses']), ('Demand prefetch metadata:', arc_stats['prefetch_metadata_misses'])) for title, value in dm_todo: prt_i2(title, f_perc(value, arc_stats['misses']), f_hits(value)) print() def section_dmu(kstats_dict): """Collect information on the DMU""" zfetch_stats = isolate_section('zfetchstats', kstats_dict) zfetch_access_total = int(zfetch_stats['hits'])+int(zfetch_stats['misses']) prt_1('DMU prefetch efficiency:', f_hits(zfetch_access_total)) prt_i2('Hit ratio:', f_perc(zfetch_stats['hits'], zfetch_access_total), f_hits(zfetch_stats['hits'])) prt_i2('Miss ratio:', f_perc(zfetch_stats['misses'], zfetch_access_total), f_hits(zfetch_stats['misses'])) print() def section_l2arc(kstats_dict): """Collect information on L2ARC device if present. If not, tell user that we're skipping the section. """ # The L2ARC statistics live in the same section as the normal ARC stuff arc_stats = isolate_section('arcstats', kstats_dict) if arc_stats['l2_size'] == '0': print('L2ARC not detected, skipping section\n') return l2_errors = int(arc_stats['l2_writes_error']) +\ int(arc_stats['l2_cksum_bad']) +\ int(arc_stats['l2_io_error']) l2_access_total = int(arc_stats['l2_hits'])+int(arc_stats['l2_misses']) health = 'HEALTHY' if l2_errors > 0: health = 'DEGRADED' prt_1('L2ARC status:', health) l2_todo = (('Low memory aborts:', 'l2_abort_lowmem'), ('Free on write:', 'l2_free_on_write'), ('R/W clashes:', 'l2_rw_clash'), ('Bad checksums:', 'l2_cksum_bad'), ('I/O errors:', 'l2_io_error')) for title, value in l2_todo: prt_i1(title, f_hits(arc_stats[value])) print() prt_1('L2ARC size (adaptive):', f_bytes(arc_stats['l2_size'])) prt_i2('Compressed:', f_perc(arc_stats['l2_asize'], arc_stats['l2_size']), f_bytes(arc_stats['l2_asize'])) prt_i2('Header size:', f_perc(arc_stats['l2_hdr_size'], arc_stats['l2_size']), f_bytes(arc_stats['l2_hdr_size'])) prt_i2('MFU allocated size:', f_perc(arc_stats['l2_mfu_asize'], arc_stats['l2_asize']), f_bytes(arc_stats['l2_mfu_asize'])) prt_i2('MRU allocated size:', f_perc(arc_stats['l2_mru_asize'], arc_stats['l2_asize']), f_bytes(arc_stats['l2_mru_asize'])) prt_i2('Prefetch allocated size:', f_perc(arc_stats['l2_prefetch_asize'], arc_stats['l2_asize']), f_bytes(arc_stats['l2_prefetch_asize'])) prt_i2('Data (buffer content) allocated size:', f_perc(arc_stats['l2_bufc_data_asize'], arc_stats['l2_asize']), f_bytes(arc_stats['l2_bufc_data_asize'])) prt_i2('Metadata (buffer content) allocated size:', f_perc(arc_stats['l2_bufc_metadata_asize'], arc_stats['l2_asize']), f_bytes(arc_stats['l2_bufc_metadata_asize'])) print() prt_1('L2ARC breakdown:', f_hits(l2_access_total)) prt_i2('Hit ratio:', f_perc(arc_stats['l2_hits'], l2_access_total), f_hits(arc_stats['l2_hits'])) prt_i2('Miss ratio:', f_perc(arc_stats['l2_misses'], l2_access_total), f_hits(arc_stats['l2_misses'])) prt_i1('Feeds:', f_hits(arc_stats['l2_feeds'])) print() print('L2ARC writes:') if arc_stats['l2_writes_done'] != arc_stats['l2_writes_sent']: prt_i2('Writes sent:', 'FAULTED', f_hits(arc_stats['l2_writes_sent'])) prt_i2('Done ratio:', f_perc(arc_stats['l2_writes_done'], arc_stats['l2_writes_sent']), f_hits(arc_stats['l2_writes_done'])) prt_i2('Error ratio:', f_perc(arc_stats['l2_writes_error'], arc_stats['l2_writes_sent']), f_hits(arc_stats['l2_writes_error'])) else: prt_i2('Writes sent:', '100 %', f_hits(arc_stats['l2_writes_sent'])) print() print('L2ARC evicts:') prt_i1('Lock retries:', f_hits(arc_stats['l2_evict_lock_retry'])) prt_i1('Upon reading:', f_hits(arc_stats['l2_evict_reading'])) print() def section_spl(*_): """Print the SPL parameters, if requested with alternative format and/or descriptions. This does not use kstats. """ if sys.platform.startswith('freebsd'): # No SPL support in FreeBSD return spls = get_spl_params() keylist = sorted(spls.keys()) print('Solaris Porting Layer (SPL):') if ARGS.desc: descriptions = get_descriptions('spl') for key in keylist: value = spls[key] if ARGS.desc: try: print(INDENT+'#', descriptions[key]) except KeyError: print(INDENT+'# (No description found)') # paranoid print(format_raw_line(key, value)) print() def section_tunables(*_): """Print the tunables, if requested with alternative format and/or descriptions. This does not use kstasts. """ tunables = get_tunable_params() keylist = sorted(tunables.keys()) print('Tunables:') if ARGS.desc: descriptions = get_descriptions('zfs') for key in keylist: value = tunables[key] if ARGS.desc: try: print(INDENT+'#', descriptions[key]) except KeyError: print(INDENT+'# (No description found)') # paranoid print(format_raw_line(key, value)) print() def section_vdev(kstats_dict): """Collect information on VDEV caches""" # Currently [Nov 2017] the VDEV cache is disabled, because it is actually # harmful. When this is the case, we just skip the whole entry. See # https://github.com/openzfs/zfs/blob/master/module/zfs/vdev_cache.c # for details tunables = get_vdev_params() if tunables[VDEV_CACHE_SIZE] == '0': print('VDEV cache disabled, skipping section\n') return vdev_stats = isolate_section('vdev_cache_stats', kstats_dict) vdev_cache_total = int(vdev_stats['hits']) +\ int(vdev_stats['misses']) +\ int(vdev_stats['delegations']) prt_1('VDEV cache summary:', f_hits(vdev_cache_total)) prt_i2('Hit ratio:', f_perc(vdev_stats['hits'], vdev_cache_total), f_hits(vdev_stats['hits'])) prt_i2('Miss ratio:', f_perc(vdev_stats['misses'], vdev_cache_total), f_hits(vdev_stats['misses'])) prt_i2('Delegations:', f_perc(vdev_stats['delegations'], vdev_cache_total), f_hits(vdev_stats['delegations'])) print() def section_zil(kstats_dict): """Collect information on the ZFS Intent Log. Some of the information taken from https://github.com/openzfs/zfs/blob/master/include/sys/zil.h """ zil_stats = isolate_section('zil', kstats_dict) prt_1('ZIL committed transactions:', f_hits(zil_stats['zil_itx_count'])) prt_i1('Commit requests:', f_hits(zil_stats['zil_commit_count'])) prt_i1('Flushes to stable storage:', f_hits(zil_stats['zil_commit_writer_count'])) prt_i2('Transactions to SLOG storage pool:', f_bytes(zil_stats['zil_itx_metaslab_slog_bytes']), f_hits(zil_stats['zil_itx_metaslab_slog_count'])) prt_i2('Transactions to non-SLOG storage pool:', f_bytes(zil_stats['zil_itx_metaslab_normal_bytes']), f_hits(zil_stats['zil_itx_metaslab_normal_count'])) print() section_calls = {'arc': section_arc, 'archits': section_archits, 'dmu': section_dmu, 'l2arc': section_l2arc, 'spl': section_spl, 'tunables': section_tunables, 'vdev': section_vdev, 'zil': section_zil} def main(): """Run program. The options to draw a graph and to print all data raw are treated separately because they come with their own call. """ kstats = get_kstats() if ARGS.graph: draw_graph(kstats) sys.exit(0) print_header() if ARGS.raw: print_raw(kstats) elif ARGS.section: try: section_calls[ARGS.section](kstats) except KeyError: print('Error: Section "{0}" unknown'.format(ARGS.section)) sys.exit(1) elif ARGS.page: print('WARNING: Pages are deprecated, please use "--section"\n') pages_to_calls = {1: 'arc', 2: 'archits', 3: 'l2arc', 4: 'dmu', 5: 'vdev', 6: 'tunables'} try: call = pages_to_calls[ARGS.page] except KeyError: print('Error: Page "{0}" not supported'.format(ARGS.page)) sys.exit(1) else: section_calls[call](kstats) else: # If no parameters were given, we print all sections. We might want to # change the sequence by hand calls = sorted(section_calls.keys()) for section in calls: section_calls[section](kstats) sys.exit(0) if __name__ == '__main__': main() diff --git a/cmd/arcstat/arcstat.in b/cmd/arcstat/arcstat.in index cd9a803a2414..9327f644f544 100755 --- a/cmd/arcstat/arcstat.in +++ b/cmd/arcstat/arcstat.in @@ -1,554 +1,554 @@ #!/usr/bin/env @PYTHON_SHEBANG@ # # Print out ZFS ARC Statistics exported via kstat(1) # For a definition of fields, or usage, use arcstat -v # # This script was originally a fork of the original arcstat.pl (0.1) # by Neelakanth Nadgir, originally published on his Sun blog on # 09/18/2007 # http://blogs.sun.com/realneel/entry/zfs_arc_statistics # # A new version aimed to improve upon the original by adding features # and fixing bugs as needed. This version was maintained by Mike # Harsch and was hosted in a public open source repository: # http://github.com/mharsch/arcstat # # but has since moved to the illumos-gate repository. # # This Python port was written by John Hixson for FreeNAS, introduced # in commit e2c29f: # https://github.com/freenas/freenas # # and has been improved by many people since. # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License, Version 1.0 only # (the "License"). You may not use this file except in compliance # with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Fields have a fixed width. Every interval, we fill the "v" # hash with its corresponding value (v[field]=value) using calculate(). # @hdr is the array of fields that needs to be printed, so we # just iterate over this array and print the values using our pretty printer. # -# This script must remain compatible with Python 2.6+ and Python 3.4+. +# This script must remain compatible with Python 3.6+. # import sys import time import getopt import re import copy from signal import signal, SIGINT, SIGWINCH, SIG_DFL cols = { # HDR: [Size, Scale, Description] "time": [8, -1, "Time"], "hits": [4, 1000, "ARC reads per second"], "miss": [4, 1000, "ARC misses per second"], "read": [4, 1000, "Total ARC accesses per second"], "hit%": [4, 100, "ARC hit percentage"], "miss%": [5, 100, "ARC miss percentage"], "dhit": [4, 1000, "Demand hits per second"], "dmis": [4, 1000, "Demand misses per second"], "dh%": [3, 100, "Demand hit percentage"], "dm%": [3, 100, "Demand miss percentage"], "phit": [4, 1000, "Prefetch hits per second"], "pmis": [4, 1000, "Prefetch misses per second"], "ph%": [3, 100, "Prefetch hits percentage"], "pm%": [3, 100, "Prefetch miss percentage"], "mhit": [4, 1000, "Metadata hits per second"], "mmis": [4, 1000, "Metadata misses per second"], "mread": [5, 1000, "Metadata accesses per second"], "mh%": [3, 100, "Metadata hit percentage"], "mm%": [3, 100, "Metadata miss percentage"], "arcsz": [5, 1024, "ARC size"], "size": [4, 1024, "ARC size"], "c": [4, 1024, "ARC target size"], "mfu": [4, 1000, "MFU list hits per second"], "mru": [4, 1000, "MRU list hits per second"], "mfug": [4, 1000, "MFU ghost list hits per second"], "mrug": [4, 1000, "MRU ghost list hits per second"], "eskip": [5, 1000, "evict_skip per second"], "el2skip": [7, 1000, "evict skip, due to l2 writes, per second"], "el2cach": [7, 1024, "Size of L2 cached evictions per second"], "el2el": [5, 1024, "Size of L2 eligible evictions per second"], "el2mfu": [6, 1024, "Size of L2 eligible MFU evictions per second"], "el2mru": [6, 1024, "Size of L2 eligible MRU evictions per second"], "el2inel": [7, 1024, "Size of L2 ineligible evictions per second"], "mtxmis": [6, 1000, "mutex_miss per second"], "dread": [5, 1000, "Demand accesses per second"], "pread": [5, 1000, "Prefetch accesses per second"], "l2hits": [6, 1000, "L2ARC hits per second"], "l2miss": [6, 1000, "L2ARC misses per second"], "l2read": [6, 1000, "Total L2ARC accesses per second"], "l2hit%": [6, 100, "L2ARC access hit percentage"], "l2miss%": [7, 100, "L2ARC access miss percentage"], "l2pref": [6, 1024, "L2ARC prefetch allocated size"], "l2mfu": [5, 1024, "L2ARC MFU allocated size"], "l2mru": [5, 1024, "L2ARC MRU allocated size"], "l2data": [6, 1024, "L2ARC data allocated size"], "l2meta": [6, 1024, "L2ARC metadata allocated size"], "l2pref%": [7, 100, "L2ARC prefetch percentage"], "l2mfu%": [6, 100, "L2ARC MFU percentage"], "l2mru%": [6, 100, "L2ARC MRU percentage"], "l2data%": [7, 100, "L2ARC data percentage"], "l2meta%": [7, 100, "L2ARC metadata percentage"], "l2asize": [7, 1024, "Actual (compressed) size of the L2ARC"], "l2size": [6, 1024, "Size of the L2ARC"], "l2bytes": [7, 1024, "Bytes read per second from the L2ARC"], "grow": [4, 1000, "ARC grow disabled"], "need": [4, 1024, "ARC reclaim need"], "free": [4, 1024, "ARC free memory"], "avail": [5, 1024, "ARC available memory"], "waste": [5, 1024, "Wasted memory due to round up to pagesize"], } v = {} hdr = ["time", "read", "miss", "miss%", "dmis", "dm%", "pmis", "pm%", "mmis", "mm%", "size", "c", "avail"] xhdr = ["time", "mfu", "mru", "mfug", "mrug", "eskip", "mtxmis", "dread", "pread", "read"] sint = 1 # Default interval is 1 second count = 1 # Default count is 1 hdr_intr = 20 # Print header every 20 lines of output opfile = None sep = " " # Default separator is 2 spaces version = "0.4" l2exist = False cmd = ("Usage: arcstat [-havxp] [-f fields] [-o file] [-s string] [interval " "[count]]\n") cur = {} d = {} out = None kstat = None pretty_print = True if sys.platform.startswith('freebsd'): # Requires py-sysctl on FreeBSD import sysctl def kstat_update(): global kstat k = [ctl for ctl in sysctl.filter('kstat.zfs.misc.arcstats') if ctl.type != sysctl.CTLTYPE_NODE] if not k: sys.exit(1) kstat = {} for s in k: if not s: continue name, value = s.name, s.value # Trims 'kstat.zfs.misc.arcstats' from the name kstat[name[24:]] = int(value) elif sys.platform.startswith('linux'): def kstat_update(): global kstat k = [line.strip() for line in open('/proc/spl/kstat/zfs/arcstats')] if not k: sys.exit(1) del k[0:2] kstat = {} for s in k: if not s: continue name, unused, value = s.split() kstat[name] = int(value) def detailed_usage(): sys.stderr.write("%s\n" % cmd) sys.stderr.write("Field definitions are as follows:\n") for key in cols: sys.stderr.write("%11s : %s\n" % (key, cols[key][2])) sys.stderr.write("\n") sys.exit(0) def usage(): sys.stderr.write("%s\n" % cmd) sys.stderr.write("\t -h : Print this help message\n") sys.stderr.write("\t -a : Print all possible stats\n") sys.stderr.write("\t -v : List all possible field headers and definitions" "\n") sys.stderr.write("\t -x : Print extended stats\n") sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n") sys.stderr.write("\t -o : Redirect output to the specified file\n") sys.stderr.write("\t -s : Override default field separator with custom " "character or string\n") sys.stderr.write("\t -p : Disable auto-scaling of numerical fields\n") sys.stderr.write("\nExamples:\n") sys.stderr.write("\tarcstat -o /tmp/a.log 2 10\n") sys.stderr.write("\tarcstat -s \",\" -o /tmp/a.log 2 10\n") sys.stderr.write("\tarcstat -v\n") sys.stderr.write("\tarcstat -f time,hit%,dh%,ph%,mh% 1\n") sys.stderr.write("\n") sys.exit(1) def snap_stats(): global cur global kstat prev = copy.deepcopy(cur) kstat_update() cur = kstat for key in cur: if re.match(key, "class"): continue if key in prev: d[key] = cur[key] - prev[key] else: d[key] = cur[key] def prettynum(sz, scale, num=0): suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] index = 0 save = 0 # Special case for date field if scale == -1: return "%s" % num # Rounding error, return 0 elif 0 < num < 1: num = 0 while abs(num) > scale and index < 5: save = num num = num / scale index += 1 if index == 0: return "%*d" % (sz, num) if abs(save / scale) < 10: return "%*.1f%s" % (sz - 1, num, suffix[index]) else: return "%*d%s" % (sz - 1, num, suffix[index]) def print_values(): global hdr global sep global v global pretty_print if pretty_print: fmt = lambda col: prettynum(cols[col][0], cols[col][1], v[col]) else: fmt = lambda col: v[col] sys.stdout.write(sep.join(fmt(col) for col in hdr)) sys.stdout.write("\n") sys.stdout.flush() def print_header(): global hdr global sep global pretty_print if pretty_print: fmt = lambda col: "%*s" % (cols[col][0], col) else: fmt = lambda col: col sys.stdout.write(sep.join(fmt(col) for col in hdr)) sys.stdout.write("\n") def get_terminal_lines(): try: import fcntl import termios import struct data = fcntl.ioctl(sys.stdout.fileno(), termios.TIOCGWINSZ, '1234') sz = struct.unpack('hh', data) return sz[0] except Exception: pass def update_hdr_intr(): global hdr_intr lines = get_terminal_lines() if lines and lines > 3: hdr_intr = lines - 3 def resize_handler(signum, frame): update_hdr_intr() def init(): global sint global count global hdr global xhdr global opfile global sep global out global l2exist global pretty_print desired_cols = None aflag = False xflag = False hflag = False vflag = False i = 1 try: opts, args = getopt.getopt( sys.argv[1:], "axo:hvs:f:p", [ "all", "extended", "outfile", "help", "verbose", "separator", "columns", "parsable" ] ) except getopt.error as msg: sys.stderr.write("Error: %s\n" % str(msg)) usage() opts = None for opt, arg in opts: if opt in ('-a', '--all'): aflag = True if opt in ('-x', '--extended'): xflag = True if opt in ('-o', '--outfile'): opfile = arg i += 1 if opt in ('-h', '--help'): hflag = True if opt in ('-v', '--verbose'): vflag = True if opt in ('-s', '--separator'): sep = arg i += 1 if opt in ('-f', '--columns'): desired_cols = arg i += 1 if opt in ('-p', '--parsable'): pretty_print = False i += 1 argv = sys.argv[i:] sint = int(argv[0]) if argv else sint count = int(argv[1]) if len(argv) > 1 else (0 if len(argv) > 0 else 1) if hflag or (xflag and desired_cols): usage() if vflag: detailed_usage() if xflag: hdr = xhdr update_hdr_intr() # check if L2ARC exists snap_stats() l2_size = cur.get("l2_size") if l2_size: l2exist = True if desired_cols: hdr = desired_cols.split(",") invalid = [] incompat = [] for ele in hdr: if ele not in cols: invalid.append(ele) elif not l2exist and ele.startswith("l2"): sys.stdout.write("No L2ARC Here\n%s\n" % ele) incompat.append(ele) if len(invalid) > 0: sys.stderr.write("Invalid column definition! -- %s\n" % invalid) usage() if len(incompat) > 0: sys.stderr.write("Incompatible field specified! -- %s\n" % incompat) usage() if aflag: if l2exist: hdr = cols.keys() else: hdr = [col for col in cols.keys() if not col.startswith("l2")] if opfile: try: out = open(opfile, "w") sys.stdout = out except IOError: sys.stderr.write("Cannot open %s for writing\n" % opfile) sys.exit(1) def calculate(): global d global v global l2exist v = dict() v["time"] = time.strftime("%H:%M:%S", time.localtime()) v["hits"] = d["hits"] // sint v["miss"] = d["misses"] // sint v["read"] = v["hits"] + v["miss"] v["hit%"] = 100 * v["hits"] // v["read"] if v["read"] > 0 else 0 v["miss%"] = 100 - v["hit%"] if v["read"] > 0 else 0 v["dhit"] = (d["demand_data_hits"] + d["demand_metadata_hits"]) // sint v["dmis"] = (d["demand_data_misses"] + d["demand_metadata_misses"]) // sint v["dread"] = v["dhit"] + v["dmis"] v["dh%"] = 100 * v["dhit"] // v["dread"] if v["dread"] > 0 else 0 v["dm%"] = 100 - v["dh%"] if v["dread"] > 0 else 0 v["phit"] = (d["prefetch_data_hits"] + d["prefetch_metadata_hits"]) // sint v["pmis"] = (d["prefetch_data_misses"] + d["prefetch_metadata_misses"]) // sint v["pread"] = v["phit"] + v["pmis"] v["ph%"] = 100 * v["phit"] // v["pread"] if v["pread"] > 0 else 0 v["pm%"] = 100 - v["ph%"] if v["pread"] > 0 else 0 v["mhit"] = (d["prefetch_metadata_hits"] + d["demand_metadata_hits"]) // sint v["mmis"] = (d["prefetch_metadata_misses"] + d["demand_metadata_misses"]) // sint v["mread"] = v["mhit"] + v["mmis"] v["mh%"] = 100 * v["mhit"] // v["mread"] if v["mread"] > 0 else 0 v["mm%"] = 100 - v["mh%"] if v["mread"] > 0 else 0 v["arcsz"] = cur["size"] v["size"] = cur["size"] v["c"] = cur["c"] v["mfu"] = d["mfu_hits"] // sint v["mru"] = d["mru_hits"] // sint v["mrug"] = d["mru_ghost_hits"] // sint v["mfug"] = d["mfu_ghost_hits"] // sint v["eskip"] = d["evict_skip"] // sint v["el2skip"] = d["evict_l2_skip"] // sint v["el2cach"] = d["evict_l2_cached"] // sint v["el2el"] = d["evict_l2_eligible"] // sint v["el2mfu"] = d["evict_l2_eligible_mfu"] // sint v["el2mru"] = d["evict_l2_eligible_mru"] // sint v["el2inel"] = d["evict_l2_ineligible"] // sint v["mtxmis"] = d["mutex_miss"] // sint if l2exist: v["l2hits"] = d["l2_hits"] // sint v["l2miss"] = d["l2_misses"] // sint v["l2read"] = v["l2hits"] + v["l2miss"] v["l2hit%"] = 100 * v["l2hits"] // v["l2read"] if v["l2read"] > 0 else 0 v["l2miss%"] = 100 - v["l2hit%"] if v["l2read"] > 0 else 0 v["l2asize"] = cur["l2_asize"] v["l2size"] = cur["l2_size"] v["l2bytes"] = d["l2_read_bytes"] // sint v["l2pref"] = cur["l2_prefetch_asize"] v["l2mfu"] = cur["l2_mfu_asize"] v["l2mru"] = cur["l2_mru_asize"] v["l2data"] = cur["l2_bufc_data_asize"] v["l2meta"] = cur["l2_bufc_metadata_asize"] v["l2pref%"] = 100 * v["l2pref"] // v["l2asize"] v["l2mfu%"] = 100 * v["l2mfu"] // v["l2asize"] v["l2mru%"] = 100 * v["l2mru"] // v["l2asize"] v["l2data%"] = 100 * v["l2data"] // v["l2asize"] v["l2meta%"] = 100 * v["l2meta"] // v["l2asize"] v["grow"] = 0 if cur["arc_no_grow"] else 1 v["need"] = cur["arc_need_free"] v["free"] = cur["memory_free_bytes"] v["avail"] = cur["memory_available_bytes"] v["waste"] = cur["abd_chunk_waste_size"] def main(): global sint global count global hdr_intr i = 0 count_flag = 0 init() if count > 0: count_flag = 1 signal(SIGINT, SIG_DFL) signal(SIGWINCH, resize_handler) while True: if i == 0: print_header() snap_stats() calculate() print_values() if count_flag == 1: if count <= 1: break count -= 1 i = 0 if i >= hdr_intr else i + 1 time.sleep(sint) if out: out.close() if __name__ == '__main__': main() diff --git a/cmd/dbufstat/dbufstat.in b/cmd/dbufstat/dbufstat.in index 82250353f5eb..b716a0c9749b 100755 --- a/cmd/dbufstat/dbufstat.in +++ b/cmd/dbufstat/dbufstat.in @@ -1,684 +1,684 @@ #!/usr/bin/env @PYTHON_SHEBANG@ # # Print out statistics for all cached dmu buffers. This information # is available through the dbufs kstat and may be post-processed as # needed by the script. # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License, Version 1.0 only # (the "License"). You may not use this file except in compliance # with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # Copyright (C) 2013 Lawrence Livermore National Security, LLC. # Produced at Lawrence Livermore National Laboratory (cf, DISCLAIMER). # -# This script must remain compatible with Python 2.6+ and Python 3.4+. +# This script must remain compatible with and Python 3.6+. # import sys import getopt import errno import re bhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize"] bxhdr = ["pool", "objset", "object", "level", "blkid", "offset", "dbsize", "meta", "state", "dbholds", "dbc", "list", "atype", "flags", "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", "l2_comp", "aholds", "dtype", "btype", "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"] bincompat = ["cached", "direct", "indirect", "bonus", "spill"] dhdr = ["pool", "objset", "object", "dtype", "cached"] dxhdr = ["pool", "objset", "object", "dtype", "btype", "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize", "cached", "direct", "indirect", "bonus", "spill"] dincompat = ["level", "blkid", "offset", "dbsize", "meta", "state", "dbholds", "dbc", "list", "atype", "flags", "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", "l2_comp", "aholds"] thdr = ["pool", "objset", "dtype", "cached"] txhdr = ["pool", "objset", "dtype", "cached", "direct", "indirect", "bonus", "spill"] tincompat = ["object", "level", "blkid", "offset", "dbsize", "meta", "state", "dbc", "dbholds", "list", "atype", "flags", "count", "asize", "access", "mru", "gmru", "mfu", "gmfu", "l2", "l2_dattr", "l2_asize", "l2_comp", "aholds", "btype", "data_bs", "meta_bs", "bsize", "lvls", "dholds", "blocks", "dsize"] cols = { # hdr: [size, scale, description] "pool": [15, -1, "pool name"], "objset": [6, -1, "dataset identification number"], "object": [10, -1, "object number"], "level": [5, -1, "indirection level of buffer"], "blkid": [8, -1, "block number of buffer"], "offset": [12, 1024, "offset in object of buffer"], "dbsize": [7, 1024, "size of buffer"], "meta": [4, -1, "is this buffer metadata?"], "state": [5, -1, "state of buffer (read, cached, etc)"], "dbholds": [7, 1000, "number of holds on buffer"], "dbc": [3, -1, "in dbuf cache"], "list": [4, -1, "which ARC list contains this buffer"], "atype": [7, -1, "ARC header type (data or metadata)"], "flags": [9, -1, "ARC read flags"], "count": [5, -1, "ARC data count"], "asize": [7, 1024, "size of this ARC buffer"], "access": [10, -1, "time this ARC buffer was last accessed"], "mru": [5, 1000, "hits while on the ARC's MRU list"], "gmru": [5, 1000, "hits while on the ARC's MRU ghost list"], "mfu": [5, 1000, "hits while on the ARC's MFU list"], "gmfu": [5, 1000, "hits while on the ARC's MFU ghost list"], "l2": [5, 1000, "hits while on the L2ARC"], "l2_dattr": [8, -1, "L2ARC disk address/offset"], "l2_asize": [8, 1024, "L2ARC alloc'd size (depending on compression)"], "l2_comp": [21, -1, "L2ARC compression algorithm for buffer"], "aholds": [6, 1000, "number of holds on this ARC buffer"], "dtype": [27, -1, "dnode type"], "btype": [27, -1, "bonus buffer type"], "data_bs": [7, 1024, "data block size"], "meta_bs": [7, 1024, "metadata block size"], "bsize": [6, 1024, "bonus buffer size"], "lvls": [6, -1, "number of indirection levels"], "dholds": [6, 1000, "number of holds on dnode"], "blocks": [8, 1000, "number of allocated blocks"], "dsize": [12, 1024, "size of dnode"], "cached": [6, 1024, "bytes cached for all blocks"], "direct": [6, 1024, "bytes cached for direct blocks"], "indirect": [8, 1024, "bytes cached for indirect blocks"], "bonus": [5, 1024, "bytes cached for bonus buffer"], "spill": [5, 1024, "bytes cached for spill block"], } hdr = None xhdr = None sep = " " # Default separator is 2 spaces cmd = ("Usage: dbufstat [-bdhnrtvx] [-i file] [-f fields] [-o file] " "[-s string] [-F filter]\n") raw = 0 if sys.platform.startswith("freebsd"): import io # Requires py-sysctl on FreeBSD import sysctl def default_ifile(): dbufs = sysctl.filter("kstat.zfs.misc.dbufs")[0].value sys.stdin = io.StringIO(dbufs) return "-" elif sys.platform.startswith("linux"): def default_ifile(): return "/proc/spl/kstat/zfs/dbufs" def print_incompat_helper(incompat): cnt = 0 for key in sorted(incompat): if cnt == 0: sys.stderr.write("\t") elif cnt > 8: sys.stderr.write(",\n\t") cnt = 0 else: sys.stderr.write(", ") sys.stderr.write("%s" % key) cnt += 1 sys.stderr.write("\n\n") def detailed_usage(): sys.stderr.write("%s\n" % cmd) sys.stderr.write("Field definitions incompatible with '-b' option:\n") print_incompat_helper(bincompat) sys.stderr.write("Field definitions incompatible with '-d' option:\n") print_incompat_helper(dincompat) sys.stderr.write("Field definitions incompatible with '-t' option:\n") print_incompat_helper(tincompat) sys.stderr.write("Field definitions are as follows:\n") for key in sorted(cols.keys()): sys.stderr.write("%11s : %s\n" % (key, cols[key][2])) sys.stderr.write("\n") sys.exit(0) def usage(): sys.stderr.write("%s\n" % cmd) sys.stderr.write("\t -b : Print table of information for each dbuf\n") sys.stderr.write("\t -d : Print table of information for each dnode\n") sys.stderr.write("\t -h : Print this help message\n") sys.stderr.write("\t -n : Exclude header from output\n") sys.stderr.write("\t -r : Print raw values\n") sys.stderr.write("\t -t : Print table of information for each dnode type" "\n") sys.stderr.write("\t -v : List all possible field headers and definitions" "\n") sys.stderr.write("\t -x : Print extended stats\n") sys.stderr.write("\t -i : Redirect input from the specified file\n") sys.stderr.write("\t -f : Specify specific fields to print (see -v)\n") sys.stderr.write("\t -o : Redirect output to the specified file\n") sys.stderr.write("\t -s : Override default field separator with custom " "character or string\n") sys.stderr.write("\t -F : Filter output by value or regex\n") sys.stderr.write("\nExamples:\n") sys.stderr.write("\tdbufstat -d -o /tmp/d.log\n") sys.stderr.write("\tdbufstat -t -s \",\" -o /tmp/t.log\n") sys.stderr.write("\tdbufstat -v\n") sys.stderr.write("\tdbufstat -d -f pool,object,objset,dsize,cached\n") sys.stderr.write("\tdbufstat -bx -F dbc=1,objset=54,pool=testpool\n") sys.stderr.write("\n") sys.exit(1) def prettynum(sz, scale, num=0): global raw suffix = [' ', 'K', 'M', 'G', 'T', 'P', 'E', 'Z'] index = 0 save = 0 if raw or scale == -1: return "%*s" % (sz, num) # Rounding error, return 0 elif 0 < num < 1: num = 0 while num > scale and index < 5: save = num num = num / scale index += 1 if index == 0: return "%*d" % (sz, num) if (save / scale) < 10: return "%*.1f%s" % (sz - 1, num, suffix[index]) else: return "%*d%s" % (sz - 1, num, suffix[index]) def print_values(v): global hdr global sep try: for col in hdr: sys.stdout.write("%s%s" % ( prettynum(cols[col][0], cols[col][1], v[col]), sep)) sys.stdout.write("\n") except IOError as e: if e.errno == errno.EPIPE: sys.exit(1) def print_header(): global hdr global sep try: for col in hdr: sys.stdout.write("%*s%s" % (cols[col][0], col, sep)) sys.stdout.write("\n") except IOError as e: if e.errno == errno.EPIPE: sys.exit(1) def get_typestring(t): ot_strings = [ "DMU_OT_NONE", # general: "DMU_OT_OBJECT_DIRECTORY", "DMU_OT_OBJECT_ARRAY", "DMU_OT_PACKED_NVLIST", "DMU_OT_PACKED_NVLIST_SIZE", "DMU_OT_BPOBJ", "DMU_OT_BPOBJ_HDR", # spa: "DMU_OT_SPACE_MAP_HEADER", "DMU_OT_SPACE_MAP", # zil: "DMU_OT_INTENT_LOG", # dmu: "DMU_OT_DNODE", "DMU_OT_OBJSET", # dsl: "DMU_OT_DSL_DIR", "DMU_OT_DSL_DIR_CHILD_MAP", "DMU_OT_DSL_DS_SNAP_MAP", "DMU_OT_DSL_PROPS", "DMU_OT_DSL_DATASET", # zpl: "DMU_OT_ZNODE", "DMU_OT_OLDACL", "DMU_OT_PLAIN_FILE_CONTENTS", "DMU_OT_DIRECTORY_CONTENTS", "DMU_OT_MASTER_NODE", "DMU_OT_UNLINKED_SET", # zvol: "DMU_OT_ZVOL", "DMU_OT_ZVOL_PROP", # other; for testing only! "DMU_OT_PLAIN_OTHER", "DMU_OT_UINT64_OTHER", "DMU_OT_ZAP_OTHER", # new object types: "DMU_OT_ERROR_LOG", "DMU_OT_SPA_HISTORY", "DMU_OT_SPA_HISTORY_OFFSETS", "DMU_OT_POOL_PROPS", "DMU_OT_DSL_PERMS", "DMU_OT_ACL", "DMU_OT_SYSACL", "DMU_OT_FUID", "DMU_OT_FUID_SIZE", "DMU_OT_NEXT_CLONES", "DMU_OT_SCAN_QUEUE", "DMU_OT_USERGROUP_USED", "DMU_OT_USERGROUP_QUOTA", "DMU_OT_USERREFS", "DMU_OT_DDT_ZAP", "DMU_OT_DDT_STATS", "DMU_OT_SA", "DMU_OT_SA_MASTER_NODE", "DMU_OT_SA_ATTR_REGISTRATION", "DMU_OT_SA_ATTR_LAYOUTS", "DMU_OT_SCAN_XLATE", "DMU_OT_DEDUP", "DMU_OT_DEADLIST", "DMU_OT_DEADLIST_HDR", "DMU_OT_DSL_CLONES", "DMU_OT_BPOBJ_SUBOBJ"] otn_strings = { 0x80: "DMU_OTN_UINT8_DATA", 0xc0: "DMU_OTN_UINT8_METADATA", 0x81: "DMU_OTN_UINT16_DATA", 0xc1: "DMU_OTN_UINT16_METADATA", 0x82: "DMU_OTN_UINT32_DATA", 0xc2: "DMU_OTN_UINT32_METADATA", 0x83: "DMU_OTN_UINT64_DATA", 0xc3: "DMU_OTN_UINT64_METADATA", 0x84: "DMU_OTN_ZAP_DATA", 0xc4: "DMU_OTN_ZAP_METADATA", 0xa0: "DMU_OTN_UINT8_ENC_DATA", 0xe0: "DMU_OTN_UINT8_ENC_METADATA", 0xa1: "DMU_OTN_UINT16_ENC_DATA", 0xe1: "DMU_OTN_UINT16_ENC_METADATA", 0xa2: "DMU_OTN_UINT32_ENC_DATA", 0xe2: "DMU_OTN_UINT32_ENC_METADATA", 0xa3: "DMU_OTN_UINT64_ENC_DATA", 0xe3: "DMU_OTN_UINT64_ENC_METADATA", 0xa4: "DMU_OTN_ZAP_ENC_DATA", 0xe4: "DMU_OTN_ZAP_ENC_METADATA"} # If "-rr" option is used, don't convert to string representation if raw > 1: return "%i" % t try: if t < len(ot_strings): return ot_strings[t] else: return otn_strings[t] except (IndexError, KeyError): return "(UNKNOWN)" def get_compstring(c): comp_strings = ["ZIO_COMPRESS_INHERIT", "ZIO_COMPRESS_ON", "ZIO_COMPRESS_OFF", "ZIO_COMPRESS_LZJB", "ZIO_COMPRESS_EMPTY", "ZIO_COMPRESS_GZIP_1", "ZIO_COMPRESS_GZIP_2", "ZIO_COMPRESS_GZIP_3", "ZIO_COMPRESS_GZIP_4", "ZIO_COMPRESS_GZIP_5", "ZIO_COMPRESS_GZIP_6", "ZIO_COMPRESS_GZIP_7", "ZIO_COMPRESS_GZIP_8", "ZIO_COMPRESS_GZIP_9", "ZIO_COMPRESS_ZLE", "ZIO_COMPRESS_LZ4", "ZIO_COMPRESS_ZSTD", "ZIO_COMPRESS_FUNCTION"] # If "-rr" option is used, don't convert to string representation if raw > 1: return "%i" % c try: return comp_strings[c] except IndexError: return "%i" % c def parse_line(line, labels): global hdr new = dict() val = None for col in hdr: # These are "special" fields computed in the update_dict # function, prevent KeyError exception on labels[col] for these. if col not in ['bonus', 'cached', 'direct', 'indirect', 'spill']: val = line[labels[col]] if col in ['pool', 'flags']: new[col] = str(val) elif col in ['dtype', 'btype']: new[col] = get_typestring(int(val)) elif col in ['l2_comp']: new[col] = get_compstring(int(val)) else: new[col] = int(val) return new def update_dict(d, k, line, labels): pool = line[labels['pool']] objset = line[labels['objset']] key = line[labels[k]] dbsize = int(line[labels['dbsize']]) blkid = int(line[labels['blkid']]) level = int(line[labels['level']]) if pool not in d: d[pool] = dict() if objset not in d[pool]: d[pool][objset] = dict() if key not in d[pool][objset]: d[pool][objset][key] = parse_line(line, labels) d[pool][objset][key]['bonus'] = 0 d[pool][objset][key]['cached'] = 0 d[pool][objset][key]['direct'] = 0 d[pool][objset][key]['indirect'] = 0 d[pool][objset][key]['spill'] = 0 d[pool][objset][key]['cached'] += dbsize if blkid == -1: d[pool][objset][key]['bonus'] += dbsize elif blkid == -2: d[pool][objset][key]['spill'] += dbsize else: if level == 0: d[pool][objset][key]['direct'] += dbsize else: d[pool][objset][key]['indirect'] += dbsize return d def skip_line(vals, filters): ''' Determines if a line should be skipped during printing based on a set of filters ''' if len(filters) == 0: return False for key in vals: if key in filters: val = prettynum(cols[key][0], cols[key][1], vals[key]).strip() # we want a full match here if re.match("(?:" + filters[key] + r")\Z", val) is None: return True return False def print_dict(d, filters, noheader): if not noheader: print_header() for pool in list(d.keys()): for objset in list(d[pool].keys()): for v in list(d[pool][objset].values()): if not skip_line(v, filters): print_values(v) def dnodes_build_dict(filehandle): labels = dict() dnodes = dict() # First 3 lines are header information, skip the first two for i in range(2): next(filehandle) # The third line contains the labels and index locations for i, v in enumerate(next(filehandle).split()): labels[v] = i # The rest of the file is buffer information for line in filehandle: update_dict(dnodes, 'object', line.split(), labels) return dnodes def types_build_dict(filehandle): labels = dict() types = dict() # First 3 lines are header information, skip the first two for i in range(2): next(filehandle) # The third line contains the labels and index locations for i, v in enumerate(next(filehandle).split()): labels[v] = i # The rest of the file is buffer information for line in filehandle: update_dict(types, 'dtype', line.split(), labels) return types def buffers_print_all(filehandle, filters, noheader): labels = dict() # First 3 lines are header information, skip the first two for i in range(2): next(filehandle) # The third line contains the labels and index locations for i, v in enumerate(next(filehandle).split()): labels[v] = i if not noheader: print_header() # The rest of the file is buffer information for line in filehandle: vals = parse_line(line.split(), labels) if not skip_line(vals, filters): print_values(vals) def main(): global hdr global sep global raw desired_cols = None bflag = False dflag = False hflag = False ifile = None ofile = None tflag = False vflag = False xflag = False nflag = False filters = dict() try: opts, args = getopt.getopt( sys.argv[1:], "bdf:hi:o:rs:tvxF:n", [ "buffers", "dnodes", "columns", "help", "infile", "outfile", "separator", "types", "verbose", "extended", "filter" ] ) except getopt.error: usage() opts = None for opt, arg in opts: if opt in ('-b', '--buffers'): bflag = True if opt in ('-d', '--dnodes'): dflag = True if opt in ('-f', '--columns'): desired_cols = arg if opt in ('-h', '--help'): hflag = True if opt in ('-i', '--infile'): ifile = arg if opt in ('-o', '--outfile'): ofile = arg if opt in ('-r', '--raw'): raw += 1 if opt in ('-s', '--separator'): sep = arg if opt in ('-t', '--types'): tflag = True if opt in ('-v', '--verbose'): vflag = True if opt in ('-x', '--extended'): xflag = True if opt in ('-n', '--noheader'): nflag = True if opt in ('-F', '--filter'): fils = [x.strip() for x in arg.split(",")] for fil in fils: f = [x.strip() for x in fil.split("=")] if len(f) != 2: sys.stderr.write("Invalid filter '%s'.\n" % fil) sys.exit(1) if f[0] not in cols: sys.stderr.write("Invalid field '%s' in filter.\n" % f[0]) sys.exit(1) if f[0] in filters: sys.stderr.write("Field '%s' specified multiple times in " "filter.\n" % f[0]) sys.exit(1) try: re.compile("(?:" + f[1] + r")\Z") except re.error: sys.stderr.write("Invalid regex for field '%s' in " "filter.\n" % f[0]) sys.exit(1) filters[f[0]] = f[1] if hflag or (xflag and desired_cols): usage() if vflag: detailed_usage() # Ensure at most only one of b, d, or t flags are set if (bflag and dflag) or (bflag and tflag) or (dflag and tflag): usage() if bflag: hdr = bxhdr if xflag else bhdr elif tflag: hdr = txhdr if xflag else thdr else: # Even if dflag is False, it's the default if none set dflag = True hdr = dxhdr if xflag else dhdr if desired_cols: hdr = desired_cols.split(",") invalid = [] incompat = [] for ele in hdr: if ele not in cols: invalid.append(ele) elif ((bflag and bincompat and ele in bincompat) or (dflag and dincompat and ele in dincompat) or (tflag and tincompat and ele in tincompat)): incompat.append(ele) if len(invalid) > 0: sys.stderr.write("Invalid column definition! -- %s\n" % invalid) usage() if len(incompat) > 0: sys.stderr.write("Incompatible field specified! -- %s\n" % incompat) usage() if ofile: try: tmp = open(ofile, "w") sys.stdout = tmp except IOError: sys.stderr.write("Cannot open %s for writing\n" % ofile) sys.exit(1) if not ifile: ifile = default_ifile() if ifile != "-": try: tmp = open(ifile, "r") sys.stdin = tmp except IOError: sys.stderr.write("Cannot open %s for reading\n" % ifile) sys.exit(1) if bflag: buffers_print_all(sys.stdin, filters, nflag) if dflag: print_dict(dnodes_build_dict(sys.stdin), filters, nflag) if tflag: print_dict(types_build_dict(sys.stdin), filters, nflag) if __name__ == '__main__': main() diff --git a/config/always-python.m4 b/config/always-python.m4 index 5f47df424c27..5a2008124f72 100644 --- a/config/always-python.m4 +++ b/config/always-python.m4 @@ -1,70 +1,57 @@ dnl # dnl # The majority of the python scripts are written to be compatible -dnl # with Python 2.6 and Python 3.4. Therefore, they may be installed -dnl # and used with either interpreter. This option is intended to +dnl # with Python 3.6. This option is intended to dnl # to provide a method to specify the default system version, and dnl # set the PYTHON environment variable accordingly. dnl # AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYTHON], [ AC_ARG_WITH([python], AS_HELP_STRING([--with-python[=VERSION]], [default system python version @<:@default=check@:>@]), [with_python=$withval], [with_python=check]) AS_CASE([$with_python], - [check], [AC_CHECK_PROGS([PYTHON], [python3 python2], [:])], - [2*], [PYTHON="python${with_python}"], - [*python2*], [PYTHON="${with_python}"], + [check], [AC_CHECK_PROGS([PYTHON], [python3], [:])], [3*], [PYTHON="python${with_python}"], [*python3*], [PYTHON="${with_python}"], [no], [PYTHON=":"], [AC_MSG_ERROR([Unknown --with-python value '$with_python'])] ) dnl # - dnl # Minimum supported Python versions for utilities: - dnl # Python 2.6 or Python 3.4 + dnl # Minimum supported Python versions for utilities: Python 3.6 dnl # AM_PATH_PYTHON([], [], [:]) AS_IF([test -z "$PYTHON_VERSION"], [ PYTHON_VERSION=$(echo ${PYTHON##*/} | tr -cd 0-9.) ]) PYTHON_MINOR=${PYTHON_VERSION#*\.} AS_CASE([$PYTHON_VERSION], - [2.*], [ - AS_IF([test $PYTHON_MINOR -lt 6], - [AC_MSG_ERROR("Python >= 2.6 is required")]) - ], [3.*], [ - AS_IF([test $PYTHON_MINOR -lt 4], - [AC_MSG_ERROR("Python >= 3.4 is required")]) + AS_IF([test $PYTHON_MINOR -lt 6], + [AC_MSG_ERROR("Python >= 3.6 is required")]) ], [:|2|3], [], [PYTHON_VERSION=3] ) AM_CONDITIONAL([USING_PYTHON], [test "$PYTHON" != :]) - AM_CONDITIONAL([USING_PYTHON_2], [test "x${PYTHON_VERSION%%\.*}" = x2]) - AM_CONDITIONAL([USING_PYTHON_3], [test "x${PYTHON_VERSION%%\.*}" = x3]) - - AM_COND_IF([USING_PYTHON_2], - [AC_SUBST([PYTHON_SHEBANG], [python2])], - [AC_SUBST([PYTHON_SHEBANG], [python3])]) + AC_SUBST([PYTHON_SHEBANG], [python3]) dnl # dnl # Request that packages be built for a specific Python version. dnl # AS_IF([test "x$with_python" != xcheck], [ PYTHON_PKG_VERSION=$(echo $PYTHON_VERSION | tr -d .) DEFINE_PYTHON_PKG_VERSION='--define "__use_python_pkg_version '${PYTHON_PKG_VERSION}'"' DEFINE_PYTHON_VERSION='--define "__use_python '${PYTHON}'"' ], [ DEFINE_PYTHON_VERSION='' DEFINE_PYTHON_PKG_VERSION='' ]) AC_SUBST(DEFINE_PYTHON_VERSION) AC_SUBST(DEFINE_PYTHON_PKG_VERSION) ]) diff --git a/config/always-pyzfs.m4 b/config/always-pyzfs.m4 index 00e5d0e2cbbd..996a2a6e2401 100644 --- a/config/always-pyzfs.m4 +++ b/config/always-pyzfs.m4 @@ -1,120 +1,119 @@ dnl # dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false]) dnl # dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html dnl # Required by ZFS_AC_CONFIG_ALWAYS_PYZFS. dnl # AC_DEFUN([ZFS_AC_PYTHON_MODULE], [ PYTHON_NAME=${PYTHON##*/} AC_MSG_CHECKING([for $PYTHON_NAME module: $1]) AS_IF([$PYTHON -c "import $1" 2>/dev/null], [ AC_MSG_RESULT(yes) m4_ifvaln([$2], [$2]) ], [ AC_MSG_RESULT(no) m4_ifvaln([$3], [$3]) ]) ]) dnl # -dnl # Determines if pyzfs can be built, requires Python 2.7 or later. +dnl # Determines if pyzfs can be built, requires Python 3.6 or later. dnl # AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [ AC_ARG_ENABLE([pyzfs], AS_HELP_STRING([--enable-pyzfs], [install libzfs_core python bindings @<:@default=check@:>@]), [enable_pyzfs=$enableval], [enable_pyzfs=check]) dnl # dnl # Packages for pyzfs specifically enabled/disabled. dnl # AS_IF([test "x$enable_pyzfs" != xcheck], [ AS_IF([test "x$enable_pyzfs" = xyes], [ DEFINE_PYZFS='--with pyzfs' ], [ DEFINE_PYZFS='--without pyzfs' ]) ], [ AS_IF([test "$PYTHON" != :], [ DEFINE_PYZFS='' ], [ enable_pyzfs=no DEFINE_PYZFS='--without pyzfs' ]) ]) AC_SUBST(DEFINE_PYZFS) dnl # dnl # Python "packaging" (or, failing that, "distlib") module is required to build and install pyzfs dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([packaging], [], [ ZFS_AC_PYTHON_MODULE([distlib], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_VERSION packaging and distlib modules are not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) ]) dnl # - dnl # Require python-devel libraries + dnl # Require python3-devel libraries dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ AS_CASE([$PYTHON_VERSION], - [3.*], [PYTHON_REQUIRED_VERSION=">= '3.4.0'"], - [2.*], [PYTHON_REQUIRED_VERSION=">= '2.7.0'"], + [3.*], [PYTHON_REQUIRED_VERSION=">= '3.6.0'"], [AC_MSG_ERROR("Python $PYTHON_VERSION unknown")] ) AX_PYTHON_DEVEL([$PYTHON_REQUIRED_VERSION], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION development library is not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) dnl # dnl # Python "setuptools" module is required to build and install pyzfs dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([setuptools], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_VERSION setuptools is not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) dnl # dnl # Python "cffi" module is required to run pyzfs dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([cffi], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_VERSION cffi is not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) dnl # dnl # Set enable_pyzfs to 'yes' if every check passed dnl # AS_IF([test "x$enable_pyzfs" = xcheck], [enable_pyzfs=yes]) AM_CONDITIONAL([PYZFS_ENABLED], [test "x$enable_pyzfs" = xyes]) AC_SUBST([PYZFS_ENABLED], [$enable_pyzfs]) AC_SUBST(pythonsitedir, [$PYTHON_SITE_PKG]) AC_MSG_CHECKING([whether to enable pyzfs: ]) AC_MSG_RESULT($enable_pyzfs) ]) diff --git a/contrib/pyzfs/setup.py.in b/contrib/pyzfs/setup.py.in index bd8ffc728fa6..934b3189ebe1 100644 --- a/contrib/pyzfs/setup.py.in +++ b/contrib/pyzfs/setup.py.in @@ -1,61 +1,60 @@ # # Copyright 2015 ClusterHQ # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from __future__ import absolute_import, division, print_function from setuptools import setup, find_packages setup( name="pyzfs", version="@VERSION@", description="Wrapper for libzfs_core", author="ClusterHQ", author_email="support@clusterhq.com", url="http://pyzfs.readthedocs.org", license="Apache License, Version 2.0", classifiers=[ "Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: Apache Software License", - "Programming Language :: Python :: 2", - "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", - "Programming Language :: Python :: 3.4", - "Programming Language :: Python :: 3.5", "Programming Language :: Python :: 3.6", "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", "Topic :: System :: Filesystems", "Topic :: Software Development :: Libraries", ], keywords=[ "ZFS", "OpenZFS", "libzfs_core", ], packages=find_packages(), include_package_data=True, install_requires=[ "cffi", ], setup_requires=[ "cffi", ], - python_requires='>=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,<4', + python_requires='>=3.6,<4', zip_safe=False, test_suite="libzfs_core.test", ) # vim: softtabstop=4 tabstop=4 expandtab shiftwidth=4 diff --git a/rpm/generic/zfs.spec.in b/rpm/generic/zfs.spec.in index 25beadce7c1f..9ee36b20491e 100644 --- a/rpm/generic/zfs.spec.in +++ b/rpm/generic/zfs.spec.in @@ -1,569 +1,550 @@ %global _sbindir /sbin %global _libdir /%{_lib} # Set the default udev directory based on distribution. %if %{undefined _udevdir} -%if 0%{?fedora} >= 17 || 0%{?rhel} >= 7 || 0%{?centos} >= 7 +%if 0%{?fedora}%{?rhel}%{?centos} %global _udevdir %{_prefix}/lib/udev %else %global _udevdir /lib/udev %endif %endif # Set the default udevrule directory based on distribution. %if %{undefined _udevruledir} -%if 0%{?fedora} >= 17 || 0%{?rhel} >= 7 || 0%{?centos} >= 7 +%if 0%{?fedora}%{?rhel}%{?centos} %global _udevruledir %{_prefix}/lib/udev/rules.d %else %global _udevruledir /lib/udev/rules.d %endif %endif # Set the default dracut directory based on distribution. %if %{undefined _dracutdir} -%if 0%{?fedora} >= 17 || 0%{?rhel} >= 7 || 0%{?centos} >= 7 +%if 0%{?fedora}%{?rhel}%{?centos} %global _dracutdir %{_prefix}/lib/dracut %else %global _dracutdir %{_prefix}/share/dracut %endif %endif %if %{undefined _initconfdir} %global _initconfdir /etc/sysconfig %endif %if %{undefined _unitdir} %global _unitdir %{_prefix}/lib/systemd/system %endif %if %{undefined _presetdir} %global _presetdir %{_prefix}/lib/systemd/system-preset %endif %if %{undefined _modulesloaddir} %global _modulesloaddir %{_prefix}/lib/modules-load.d %endif %if %{undefined _systemdgeneratordir} %global _systemdgeneratordir %{_prefix}/lib/systemd/system-generators %endif %if %{undefined _pkgconfigdir} %global _pkgconfigdir %{_prefix}/%{_lib}/pkgconfig %endif %bcond_with debug %bcond_with debuginfo %bcond_with asan %bcond_with systemd %bcond_with pam +%bcond_without pyzfs # Generic enable switch for systemd %if %{with systemd} %define _systemd 1 %endif -# RHEL >= 7 comes with systemd -%if 0%{?rhel} >= 7 +# Distros below support systemd +%if 0%{?rhel}%{?fedora}%{?centos}%{?suse_version} %define _systemd 1 %endif -# Fedora >= 15 comes with systemd, but only >= 18 has -# the proper macros -%if 0%{?fedora} >= 18 -%define _systemd 1 -%endif - -# opensuse >= 12.1 comes with systemd, but only >= 13.1 -# has the proper macros -%if 0%{?suse_version} >= 1310 -%define _systemd 1 -%endif - -# When not specified default to distribution provided version. This -# is normally Python 3, but for RHEL <= 7 only Python 2 is provided. +# When not specified default to distribution provided version. %if %{undefined __use_python} -%if 0%{?rhel} && 0%{?rhel} <= 7 -%define __python /usr/bin/python2 -%define __python_pkg_version 2 -%define __python_cffi_pkg python-cffi -%define __python_setuptools_pkg python-setuptools -%else %define __python /usr/bin/python3 %define __python_pkg_version 3 -%define __python_cffi_pkg python3-cffi -%define __python_setuptools_pkg python3-setuptools -%endif %else %define __python %{__use_python} %define __python_pkg_version %{__use_python_pkg_version} -%define __python_cffi_pkg python%{__python_pkg_version}-cffi -%define __python_setuptools_pkg python%{__python_pkg_version}-setuptools %endif %define __python_sitelib %(%{__python} -Esc "from distutils.sysconfig import get_python_lib; print(get_python_lib())") -# By default python-pyzfs is enabled, with the exception of -# RHEL 6 which by default uses Python 2.6 which is too old. -%if 0%{?rhel} == 6 -%bcond_with pyzfs -%else -%bcond_without pyzfs -%endif - Name: @PACKAGE@ Version: @VERSION@ Release: @RELEASE@%{?dist} Summary: Commands to control the kernel modules and libraries Group: System Environment/Kernel License: @ZFS_META_LICENSE@ URL: https://github.com/openzfs/zfs Source0: %{name}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) Requires: libzpool5 = %{version} Requires: libnvpair3 = %{version} Requires: libuutil3 = %{version} Requires: libzfs5 = %{version} Requires: %{name}-kmod = %{version} Provides: %{name}-kmod-common = %{version} Obsoletes: spl # zfs-fuse provides the same commands and man pages that OpenZFS does. # Renaming those on either side would conflict with all available documentation. Conflicts: zfs-fuse -%if 0%{?rhel}%{?fedora}%{?suse_version} +%if 0%{?rhel}%{?centos}%{?fedora}%{?suse_version} BuildRequires: gcc, make BuildRequires: zlib-devel BuildRequires: libuuid-devel BuildRequires: libblkid-devel BuildRequires: libudev-devel BuildRequires: libattr-devel BuildRequires: openssl-devel # We don't directly use it, but if this isn't installed, rpmbuild as root can # crash+corrupt rpmdb # See issue #12071 BuildRequires: ncompress -%if 0%{?fedora} >= 28 || 0%{?rhel} >= 8 || 0%{?centos} >= 8 +%if 0%{?fedora} || 0%{?rhel} >= 8 || 0%{?centos} >= 8 BuildRequires: libtirpc-devel %endif Requires: openssl %if 0%{?_systemd} BuildRequires: systemd %endif %endif %if 0%{?_systemd} Requires(post): systemd Requires(preun): systemd Requires(postun): systemd %endif # The zpool iostat/status -c scripts call some utilities like lsblk and iostat Requires: util-linux Requires: sysstat %description This package contains the core ZFS command line utilities. %package -n libzpool5 Summary: Native ZFS pool library for Linux Group: System Environment/Kernel Obsoletes: libzpool2 Obsoletes: libzpool4 %description -n libzpool5 This package contains the zpool library, which provides support for managing zpools %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets -n libzpool5 %else %post -n libzpool5 -p /sbin/ldconfig %postun -n libzpool5 -p /sbin/ldconfig %endif %package -n libnvpair3 Summary: Solaris name-value library for Linux Group: System Environment/Kernel Obsoletes: libnvpair1 %description -n libnvpair3 This package contains routines for packing and unpacking name-value pairs. This functionality is used to portably transport data across process boundaries, between kernel and user space, and can be used to write self describing data structures on disk. %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets -n libnvpair3 %else %post -n libnvpair3 -p /sbin/ldconfig %postun -n libnvpair3 -p /sbin/ldconfig %endif %package -n libuutil3 Summary: Solaris userland utility library for Linux Group: System Environment/Kernel Obsoletes: libuutil1 %description -n libuutil3 This library provides a variety of compatibility functions for OpenZFS: * libspl: The Solaris Porting Layer userland library, which provides APIs that make it possible to run Solaris user code in a Linux environment with relatively minimal modification. * libavl: The Adelson-Velskii Landis balanced binary tree manipulation library. * libefi: The Extensible Firmware Interface library for GUID disk partitioning. * libshare: NFS, SMB, and iSCSI service integration for ZFS. %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets -n libuutil3 %else %post -n libuutil3 -p /sbin/ldconfig %postun -n libuutil3 -p /sbin/ldconfig %endif # The library version is encoded in the package name. When updating the # version information it is important to add an obsoletes line below for # the previous version of the package. %package -n libzfs5 Summary: Native ZFS filesystem library for Linux Group: System Environment/Kernel Obsoletes: libzfs2 Obsoletes: libzfs4 %description -n libzfs5 This package provides support for managing ZFS filesystems %if %{defined ldconfig_scriptlets} %ldconfig_scriptlets -n libzfs5 %else %post -n libzfs5 -p /sbin/ldconfig %postun -n libzfs5 -p /sbin/ldconfig %endif %package -n libzfs5-devel Summary: Development headers Group: System Environment/Kernel Requires: libzfs5 = %{version} Requires: libzpool5 = %{version} Requires: libnvpair3 = %{version} Requires: libuutil3 = %{version} Provides: libzpool5-devel Provides: libnvpair3-devel Provides: libuutil3-devel Obsoletes: zfs-devel Obsoletes: libzfs2-devel Obsoletes: libzfs4-devel %description -n libzfs5-devel This package contains the header files needed for building additional applications against the ZFS libraries. %package test Summary: Test infrastructure Group: System Environment/Kernel Requires: %{name}%{?_isa} = %{version}-%{release} Requires: parted Requires: lsscsi Requires: mdadm Requires: bc Requires: ksh Requires: fio Requires: acl Requires: sudo Requires: sysstat Requires: libaio Requires: python%{__python_pkg_version} -%if 0%{?rhel}%{?fedora}%{?suse_version} +%if 0%{?rhel}%{?centos}%{?fedora}%{?suse_version} BuildRequires: libaio-devel %endif AutoReqProv: no %description test This package contains test infrastructure and support scripts for validating the file system. %package dracut Summary: Dracut module Group: System Environment/Kernel BuildArch: noarch Requires: %{name} >= %{version} Requires: dracut Requires: /usr/bin/awk Requires: grep %description dracut This package contains a dracut module used to construct an initramfs image which is ZFS aware. %if %{with pyzfs} +# Enforce `python36-` package prefix for CentOS 7 +# since dependencies come from EPEL and are named this way %package -n python%{__python_pkg_version}-pyzfs Summary: Python %{python_version} wrapper for libzfs_core Group: Development/Languages/Python License: Apache-2.0 BuildArch: noarch Requires: libzfs5 = %{version} Requires: libnvpair3 = %{version} Requires: libffi Requires: python%{__python_pkg_version} -Requires: %{__python_cffi_pkg} -%if 0%{?rhel}%{?fedora}%{?suse_version} -%if 0%{?rhel} >= 8 || 0%{?centos} >= 8 || 0%{?fedora} >= 28 -BuildRequires: python3-packaging + +%if 0%{?centos} == 7 +Requires: python36-cffi %else -BuildRequires: python-packaging +Requires: python%{__python_pkg_version}-cffi %endif + +%if 0%{?rhel}%{?centos}%{?fedora}%{?suse_version} +%if 0%{?centos} == 7 +BuildRequires: python36-packaging +BuildRequires: python36-devel +BuildRequires: python36-cffi +BuildRequires: python36-setuptools +%else +BuildRequires: python%{__python_pkg_version}-packaging BuildRequires: python%{__python_pkg_version}-devel -BuildRequires: %{__python_cffi_pkg} -BuildRequires: %{__python_setuptools_pkg} +BuildRequires: python%{__python_pkg_version}-cffi +BuildRequires: python%{__python_pkg_version}-setuptools +%endif + BuildRequires: libffi-devel %endif %description -n python%{__python_pkg_version}-pyzfs This package provides a python wrapper for the libzfs_core C library. %endif %if 0%{?_initramfs} %package initramfs Summary: Initramfs module Group: System Environment/Kernel Requires: %{name}%{?_isa} = %{version}-%{release} Requires: %{name} = %{version}-%{release} Requires: initramfs-tools %description initramfs This package contains a initramfs module used to construct an initramfs image which is ZFS aware. %endif %prep %if %{with debug} %define debug --enable-debug %else %define debug --disable-debug %endif %if %{with debuginfo} %define debuginfo --enable-debuginfo %else %define debuginfo --disable-debuginfo %endif %if %{with asan} %define asan --enable-asan %else %define asan --disable-asan %endif %if 0%{?_systemd} %define systemd --enable-systemd --with-systemdunitdir=%{_unitdir} --with-systemdpresetdir=%{_presetdir} --with-systemdmodulesloaddir=%{_modulesloaddir} --with-systemdgeneratordir=%{_systemdgeneratordir} --disable-sysvinit %define systemd_svcs zfs-import-cache.service zfs-import-scan.service zfs-mount.service zfs-share.service zfs-zed.service zfs.target zfs-import.target zfs-volume-wait.service zfs-volumes.target %else %define systemd --enable-sysvinit --disable-systemd %endif %if %{with pyzfs} %define pyzfs --enable-pyzfs %else %define pyzfs --disable-pyzfs %endif %if %{with pam} %define pam --enable-pam %else %define pam --disable-pam %endif %setup -q %build %configure \ --with-config=user \ --with-udevdir=%{_udevdir} \ --with-udevruledir=%{_udevruledir} \ --with-dracutdir=%{_dracutdir} \ --with-pamconfigsdir=%{_datadir}/pam-configs \ --with-pammoduledir=%{_libdir}/security \ --with-python=%{__python} \ --with-pkgconfigdir=%{_pkgconfigdir} \ --disable-static \ %{debug} \ %{debuginfo} \ %{asan} \ %{systemd} \ %{pam} \ %{pyzfs} make %{?_smp_mflags} %install %{__rm} -rf $RPM_BUILD_ROOT make install DESTDIR=%{?buildroot} find %{?buildroot}%{_libdir} -name '*.la' -exec rm -f {} \; %if 0%{!?__brp_mangle_shebangs:1} find %{?buildroot}%{_bindir} \ \( -name arc_summary -or -name arcstat -or -name dbufstat \) \ -exec %{__sed} -i 's|^#!.*|#!%{__python}|' {} \; find %{?buildroot}%{_datadir} \ \( -name test-runner.py -or -name zts-report.py \) \ -exec %{__sed} -i 's|^#!.*|#!%{__python}|' {} \; %endif %post %if 0%{?_systemd} %if 0%{?systemd_post:1} %systemd_post %{systemd_svcs} %else if [ "$1" = "1" -o "$1" = "install" ] ; then # Initial installation systemctl preset %{systemd_svcs} >/dev/null || true fi %endif %else if [ -x /sbin/chkconfig ]; then /sbin/chkconfig --add zfs-import /sbin/chkconfig --add zfs-load-key /sbin/chkconfig --add zfs-mount /sbin/chkconfig --add zfs-share /sbin/chkconfig --add zfs-zed fi %endif exit 0 # On RHEL/CentOS 7 the static nodes aren't refreshed by default after # installing a package. This is the default behavior for Fedora. %posttrans %if 0%{?rhel} == 7 || 0%{?centos} == 7 systemctl restart kmod-static-nodes systemctl restart systemd-tmpfiles-setup-dev udevadm trigger %endif %preun %if 0%{?_systemd} %if 0%{?systemd_preun:1} %systemd_preun %{systemd_svcs} %else if [ "$1" = "0" -o "$1" = "remove" ] ; then # Package removal, not upgrade systemctl --no-reload disable %{systemd_svcs} >/dev/null || true systemctl stop %{systemd_svcs} >/dev/null || true fi %endif %else if [ "$1" = "0" -o "$1" = "remove" ] && [ -x /sbin/chkconfig ]; then /sbin/chkconfig --del zfs-import /sbin/chkconfig --del zfs-load-key /sbin/chkconfig --del zfs-mount /sbin/chkconfig --del zfs-share /sbin/chkconfig --del zfs-zed fi %endif exit 0 %postun %if 0%{?_systemd} %if 0%{?systemd_postun:1} %systemd_postun %{systemd_svcs} %else systemctl --system daemon-reload >/dev/null || true %endif %endif %files # Core utilities %{_sbindir}/* %{_bindir}/raidz_test %{_sbindir}/zgenhostid %{_bindir}/zvol_wait -# Optional Python 2/3 scripts +# Optional Python 3 scripts %{_bindir}/arc_summary %{_bindir}/arcstat %{_bindir}/dbufstat # Man pages %{_mandir}/man1/* %{_mandir}/man4/* %{_mandir}/man5/* %{_mandir}/man7/* %{_mandir}/man8/* # Configuration files and scripts %{_libexecdir}/%{name} %{_udevdir}/vdev_id %{_udevdir}/zvol_id %{_udevdir}/rules.d/* %{_datadir}/%{name}/compatibility.d %if ! 0%{?_systemd} || 0%{?_initramfs} # Files needed for sysvinit and initramfs-tools %{_sysconfdir}/%{name}/zfs-functions %config(noreplace) %{_initconfdir}/zfs %else %exclude %{_sysconfdir}/%{name}/zfs-functions %exclude %{_initconfdir}/zfs %endif %if 0%{?_systemd} %{_unitdir}/* %{_presetdir}/* %{_modulesloaddir}/* %{_systemdgeneratordir}/* %else %config(noreplace) %{_sysconfdir}/init.d/* %endif %config(noreplace) %{_sysconfdir}/%{name}/zed.d/* %config(noreplace) %{_sysconfdir}/%{name}/zpool.d/* %config(noreplace) %{_sysconfdir}/%{name}/vdev_id.conf.*.example %attr(440, root, root) %config(noreplace) %{_sysconfdir}/sudoers.d/* %if %{with pam} %{_libdir}/security/* %{_datadir}/pam-configs/* %endif %files -n libzpool5 %{_libdir}/libzpool.so.* %files -n libnvpair3 %{_libdir}/libnvpair.so.* %files -n libuutil3 %{_libdir}/libuutil.so.* %files -n libzfs5 %{_libdir}/libzfs*.so.* %files -n libzfs5-devel %{_pkgconfigdir}/libzfs.pc %{_pkgconfigdir}/libzfsbootenv.pc %{_pkgconfigdir}/libzfs_core.pc %{_libdir}/*.so %{_includedir}/* %doc AUTHORS COPYRIGHT LICENSE NOTICE README.md %files test %{_datadir}/%{name}/zfs-tests %{_datadir}/%{name}/test-runner %{_datadir}/%{name}/runfiles %{_datadir}/%{name}/*.sh %files dracut %doc contrib/dracut/README.dracut.markdown %{_dracutdir}/modules.d/* %if %{with pyzfs} %files -n python%{__python_pkg_version}-pyzfs %doc contrib/pyzfs/README %doc contrib/pyzfs/LICENSE %defattr(-,root,root,-) %{__python_sitelib}/libzfs_core/* %{__python_sitelib}/pyzfs* %endif %if 0%{?_initramfs} %files initramfs %doc contrib/initramfs/README.initramfs.markdown /usr/share/initramfs-tools/* %else # Since we're not building the initramfs package, # ignore those files. %exclude /usr/share/initramfs-tools %endif diff --git a/tests/test-runner/bin/test-runner.py.in b/tests/test-runner/bin/test-runner.py.in index d32e05c45392..304494083c75 100755 --- a/tests/test-runner/bin/test-runner.py.in +++ b/tests/test-runner/bin/test-runner.py.in @@ -1,1107 +1,1099 @@ #!/usr/bin/env @PYTHON_SHEBANG@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2012, 2018 by Delphix. All rights reserved. # Copyright (c) 2019 Datto Inc. # -# This script must remain compatible with Python 2.6+ and Python 3.4+. +# This script must remain compatible with Python 3.6+. # -# some python 2.7 system don't have a configparser shim -try: - import configparser -except ImportError: - import ConfigParser as configparser - import os import sys import ctypes import re +import configparser from datetime import datetime from optparse import OptionParser from pwd import getpwnam from pwd import getpwuid from select import select from subprocess import PIPE from subprocess import Popen from threading import Timer -from time import time +from time import time, CLOCK_MONOTONIC_RAW BASEDIR = '/var/tmp/test_results' TESTDIR = '/usr/share/zfs/' KILL = 'kill' TRUE = 'true' SUDO = 'sudo' LOG_FILE = 'LOG_FILE' LOG_OUT = 'LOG_OUT' LOG_ERR = 'LOG_ERR' LOG_FILE_OBJ = None -# some python 2.7 system don't have a concept of monotonic time -CLOCK_MONOTONIC_RAW = 4 # see - class timespec(ctypes.Structure): _fields_ = [ ('tv_sec', ctypes.c_long), ('tv_nsec', ctypes.c_long) ] librt = ctypes.CDLL('librt.so.1', use_errno=True) clock_gettime = librt.clock_gettime clock_gettime.argtypes = [ctypes.c_int, ctypes.POINTER(timespec)] def monotonic_time(): t = timespec() if clock_gettime(CLOCK_MONOTONIC_RAW, ctypes.pointer(t)) != 0: errno_ = ctypes.get_errno() raise OSError(errno_, os.strerror(errno_)) return t.tv_sec + t.tv_nsec * 1e-9 class Result(object): total = 0 runresults = {'PASS': 0, 'FAIL': 0, 'SKIP': 0, 'KILLED': 0, 'RERAN': 0} def __init__(self): self.starttime = None self.returncode = None self.runtime = '' self.stdout = [] self.stderr = [] self.result = '' def done(self, proc, killed, reran): """ Finalize the results of this Cmd. """ Result.total += 1 m, s = divmod(monotonic_time() - self.starttime, 60) self.runtime = '%02d:%02d' % (m, s) self.returncode = proc.returncode if reran is True: Result.runresults['RERAN'] += 1 if killed: self.result = 'KILLED' Result.runresults['KILLED'] += 1 elif self.returncode == 0: self.result = 'PASS' Result.runresults['PASS'] += 1 elif self.returncode == 4: self.result = 'SKIP' Result.runresults['SKIP'] += 1 elif self.returncode != 0: self.result = 'FAIL' Result.runresults['FAIL'] += 1 class Output(object): """ This class is a slightly modified version of the 'Stream' class found here: http://goo.gl/aSGfv """ def __init__(self, stream): self.stream = stream self._buf = b'' self.lines = [] def fileno(self): return self.stream.fileno() def read(self, drain=0): """ Read from the file descriptor. If 'drain' set, read until EOF. """ while self._read() is not None: if not drain: break def _read(self): """ Read up to 4k of data from this output stream. Collect the output up to the last newline, and append it to any leftover data from a previous call. The lines are stored as a (timestamp, data) tuple for easy sorting/merging later. """ fd = self.fileno() buf = os.read(fd, 4096) if not buf: return None if b'\n' not in buf: self._buf += buf return [] buf = self._buf + buf tmp, rest = buf.rsplit(b'\n', 1) self._buf = rest now = datetime.now() rows = tmp.split(b'\n') self.lines += [(now, r) for r in rows] class Cmd(object): verified_users = [] def __init__(self, pathname, identifier=None, outputdir=None, timeout=None, user=None, tags=None): self.pathname = pathname self.identifier = identifier self.outputdir = outputdir or 'BASEDIR' """ The timeout for tests is measured in wall-clock time """ self.timeout = timeout self.user = user or '' self.killed = False self.reran = None self.result = Result() if self.timeout is None: self.timeout = 60 def __str__(self): return '''\ Pathname: %s Identifier: %s Outputdir: %s Timeout: %d User: %s ''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user) def kill_cmd(self, proc, keyboard_interrupt=False): """ Kill a running command due to timeout, or ^C from the keyboard. If sudo is required, this user was verified previously. """ self.killed = True do_sudo = len(self.user) != 0 signal = '-TERM' cmd = [SUDO, KILL, signal, str(proc.pid)] if not do_sudo: del cmd[0] try: kp = Popen(cmd) kp.wait() except Exception: pass """ If this is not a user-initiated kill and the test has not been reran before we consider if the test needs to be reran: If the test has spent some time hibernating and didn't run the whole length of time before being timed out we will rerun the test. """ if keyboard_interrupt is False and self.reran is None: runtime = monotonic_time() - self.result.starttime if int(self.timeout) > runtime: self.killed = False self.reran = False self.run(False) self.reran = True def update_cmd_privs(self, cmd, user): """ If a user has been specified to run this Cmd and we're not already running as that user, prepend the appropriate sudo command to run as that user. """ me = getpwuid(os.getuid()) if not user or user is me: if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK): cmd += '.ksh' if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK): cmd += '.sh' return cmd if not os.path.isfile(cmd): if os.path.isfile(cmd+'.ksh') and os.access(cmd+'.ksh', os.X_OK): cmd += '.ksh' if os.path.isfile(cmd+'.sh') and os.access(cmd+'.sh', os.X_OK): cmd += '.sh' ret = '%s -E -u %s %s' % (SUDO, user, cmd) return ret.split(' ') def collect_output(self, proc): """ Read from stdout/stderr as data becomes available, until the process is no longer running. Return the lines from the stdout and stderr Output objects. """ out = Output(proc.stdout) err = Output(proc.stderr) res = [] while proc.returncode is None: proc.poll() res = select([out, err], [], [], .1) for fd in res[0]: fd.read() for fd in res[0]: fd.read(drain=1) return out.lines, err.lines def run(self, dryrun): """ This is the main function that runs each individual test. Determine whether or not the command requires sudo, and modify it if needed. Run the command, and update the result object. """ if dryrun is True: print(self) return privcmd = self.update_cmd_privs(self.pathname, self.user) try: old = os.umask(0) if not os.path.isdir(self.outputdir): os.makedirs(self.outputdir, mode=0o777) os.umask(old) except OSError as e: fail('%s' % e) self.result.starttime = monotonic_time() proc = Popen(privcmd, stdout=PIPE, stderr=PIPE) # Allow a special timeout value of 0 to mean infinity if int(self.timeout) == 0: self.timeout = sys.maxsize t = Timer(int(self.timeout), self.kill_cmd, [proc]) try: t.start() self.result.stdout, self.result.stderr = self.collect_output(proc) except KeyboardInterrupt: self.kill_cmd(proc, True) fail('\nRun terminated at user request.') finally: t.cancel() if self.reran is not False: self.result.done(proc, self.killed, self.reran) def skip(self): """ Initialize enough of the test result that we can log a skipped command. """ Result.total += 1 Result.runresults['SKIP'] += 1 self.result.stdout = self.result.stderr = [] self.result.starttime = monotonic_time() m, s = divmod(monotonic_time() - self.result.starttime, 60) self.result.runtime = '%02d:%02d' % (m, s) self.result.result = 'SKIP' def log(self, options, suppress_console=False): """ This function is responsible for writing all output. This includes the console output, the logfile of all results (with timestamped merged stdout and stderr), and for each test, the unmodified stdout/stderr/merged in its own file. """ logname = getpwuid(os.getuid()).pw_name rer = '' if self.reran is True: rer = ' (RERAN)' user = ' (run as %s)' % (self.user if len(self.user) else logname) if self.identifier: msga = 'Test (%s): %s%s ' % (self.identifier, self.pathname, user) else: msga = 'Test: %s%s ' % (self.pathname, user) msgb = '[%s] [%s]%s\n' % (self.result.runtime, self.result.result, rer) pad = ' ' * (80 - (len(msga) + len(msgb))) result_line = msga + pad + msgb # The result line is always written to the log file. If -q was # specified only failures are written to the console, otherwise # the result line is written to the console. The console output # may be suppressed by calling log() with suppress_console=True. write_log(bytearray(result_line, encoding='utf-8'), LOG_FILE) if not suppress_console: if not options.quiet: write_log(result_line, LOG_OUT) elif options.quiet and self.result.result != 'PASS': write_log(result_line, LOG_OUT) lines = sorted(self.result.stdout + self.result.stderr, key=lambda x: x[0]) # Write timestamped output (stdout and stderr) to the logfile for dt, line in lines: timestamp = bytearray(dt.strftime("%H:%M:%S.%f ")[:11], encoding='utf-8') write_log(b'%s %s\n' % (timestamp, line), LOG_FILE) # Write the separate stdout/stderr/merged files, if the data exists if len(self.result.stdout): with open(os.path.join(self.outputdir, 'stdout'), 'wb') as out: for _, line in self.result.stdout: os.write(out.fileno(), b'%s\n' % line) if len(self.result.stderr): with open(os.path.join(self.outputdir, 'stderr'), 'wb') as err: for _, line in self.result.stderr: os.write(err.fileno(), b'%s\n' % line) if len(self.result.stdout) and len(self.result.stderr): with open(os.path.join(self.outputdir, 'merged'), 'wb') as merged: for _, line in lines: os.write(merged.fileno(), b'%s\n' % line) class Test(Cmd): props = ['outputdir', 'timeout', 'user', 'pre', 'pre_user', 'post', 'post_user', 'failsafe', 'failsafe_user', 'tags'] def __init__(self, pathname, pre=None, pre_user=None, post=None, post_user=None, failsafe=None, failsafe_user=None, tags=None, **kwargs): super(Test, self).__init__(pathname, **kwargs) self.pre = pre or '' self.pre_user = pre_user or '' self.post = post or '' self.post_user = post_user or '' self.failsafe = failsafe or '' self.failsafe_user = failsafe_user or '' self.tags = tags or [] def __str__(self): post_user = pre_user = failsafe_user = '' if len(self.pre_user): pre_user = ' (as %s)' % (self.pre_user) if len(self.post_user): post_user = ' (as %s)' % (self.post_user) if len(self.failsafe_user): failsafe_user = ' (as %s)' % (self.failsafe_user) return '''\ Pathname: %s Identifier: %s Outputdir: %s Timeout: %d User: %s Pre: %s%s Post: %s%s Failsafe: %s%s Tags: %s ''' % (self.pathname, self.identifier, self.outputdir, self.timeout, self.user, self.pre, pre_user, self.post, post_user, self.failsafe, failsafe_user, self.tags) def verify(self): """ Check the pre/post/failsafe scripts, user and Test. Omit the Test from this run if there are any problems. """ files = [self.pre, self.pathname, self.post, self.failsafe] users = [self.pre_user, self.user, self.post_user, self.failsafe_user] for f in [f for f in files if len(f)]: if not verify_file(f): write_log("Warning: Test '%s' not added to this run because" " it failed verification.\n" % f, LOG_ERR) return False for user in [user for user in users if len(user)]: if not verify_user(user): write_log("Not adding Test '%s' to this run.\n" % self.pathname, LOG_ERR) return False return True def run(self, options): """ Create Cmd instances for the pre/post/failsafe scripts. If the pre script doesn't pass, skip this Test. Run the post script regardless. If the Test is killed, also run the failsafe script. """ odir = os.path.join(self.outputdir, os.path.basename(self.pre)) pretest = Cmd(self.pre, identifier=self.identifier, outputdir=odir, timeout=self.timeout, user=self.pre_user) test = Cmd(self.pathname, identifier=self.identifier, outputdir=self.outputdir, timeout=self.timeout, user=self.user) odir = os.path.join(self.outputdir, os.path.basename(self.failsafe)) failsafe = Cmd(self.failsafe, identifier=self.identifier, outputdir=odir, timeout=self.timeout, user=self.failsafe_user) odir = os.path.join(self.outputdir, os.path.basename(self.post)) posttest = Cmd(self.post, identifier=self.identifier, outputdir=odir, timeout=self.timeout, user=self.post_user) cont = True if len(pretest.pathname): pretest.run(options.dryrun) cont = pretest.result.result == 'PASS' pretest.log(options) if cont: test.run(options.dryrun) if test.result.result == 'KILLED' and len(failsafe.pathname): failsafe.run(options.dryrun) failsafe.log(options, suppress_console=True) else: test.skip() test.log(options) if len(posttest.pathname): posttest.run(options.dryrun) posttest.log(options) class TestGroup(Test): props = Test.props + ['tests'] def __init__(self, pathname, tests=None, **kwargs): super(TestGroup, self).__init__(pathname, **kwargs) self.tests = tests or [] def __str__(self): post_user = pre_user = failsafe_user = '' if len(self.pre_user): pre_user = ' (as %s)' % (self.pre_user) if len(self.post_user): post_user = ' (as %s)' % (self.post_user) if len(self.failsafe_user): failsafe_user = ' (as %s)' % (self.failsafe_user) return '''\ Pathname: %s Identifier: %s Outputdir: %s Tests: %s Timeout: %s User: %s Pre: %s%s Post: %s%s Failsafe: %s%s Tags: %s ''' % (self.pathname, self.identifier, self.outputdir, self.tests, self.timeout, self.user, self.pre, pre_user, self.post, post_user, self.failsafe, failsafe_user, self.tags) def filter(self, keeplist): self.tests = [x for x in self.tests if x in keeplist] def verify(self): """ Check the pre/post/failsafe scripts, user and tests in this TestGroup. Omit the TestGroup entirely, or simply delete the relevant tests in the group, if that's all that's required. """ # If the pre/post/failsafe scripts are relative pathnames, convert to # absolute, so they stand a chance of passing verification. if len(self.pre) and not os.path.isabs(self.pre): self.pre = os.path.join(self.pathname, self.pre) if len(self.post) and not os.path.isabs(self.post): self.post = os.path.join(self.pathname, self.post) if len(self.failsafe) and not os.path.isabs(self.failsafe): self.post = os.path.join(self.pathname, self.post) auxfiles = [self.pre, self.post, self.failsafe] users = [self.pre_user, self.user, self.post_user, self.failsafe_user] for f in [f for f in auxfiles if len(f)]: if f != self.failsafe and self.pathname != os.path.dirname(f): write_log("Warning: TestGroup '%s' not added to this run. " "Auxiliary script '%s' exists in a different " "directory.\n" % (self.pathname, f), LOG_ERR) return False if not verify_file(f): write_log("Warning: TestGroup '%s' not added to this run. " "Auxiliary script '%s' failed verification.\n" % (self.pathname, f), LOG_ERR) return False for user in [user for user in users if len(user)]: if not verify_user(user): write_log("Not adding TestGroup '%s' to this run.\n" % self.pathname, LOG_ERR) return False # If one of the tests is invalid, delete it, log it, and drive on. for test in self.tests: if not verify_file(os.path.join(self.pathname, test)): del self.tests[self.tests.index(test)] write_log("Warning: Test '%s' removed from TestGroup '%s' " "because it failed verification.\n" % (test, self.pathname), LOG_ERR) return len(self.tests) != 0 def run(self, options): """ Create Cmd instances for the pre/post/failsafe scripts. If the pre script doesn't pass, skip all the tests in this TestGroup. Run the post script regardless. Run the failsafe script when a test is killed. """ # tags assigned to this test group also include the test names if options.tags and not set(self.tags).intersection(set(options.tags)): return odir = os.path.join(self.outputdir, os.path.basename(self.pre)) pretest = Cmd(self.pre, outputdir=odir, timeout=self.timeout, user=self.pre_user, identifier=self.identifier) odir = os.path.join(self.outputdir, os.path.basename(self.post)) posttest = Cmd(self.post, outputdir=odir, timeout=self.timeout, user=self.post_user, identifier=self.identifier) cont = True if len(pretest.pathname): pretest.run(options.dryrun) cont = pretest.result.result == 'PASS' pretest.log(options) for fname in self.tests: odir = os.path.join(self.outputdir, fname) test = Cmd(os.path.join(self.pathname, fname), outputdir=odir, timeout=self.timeout, user=self.user, identifier=self.identifier) odir = os.path.join(odir, os.path.basename(self.failsafe)) failsafe = Cmd(self.failsafe, outputdir=odir, timeout=self.timeout, user=self.failsafe_user, identifier=self.identifier) if cont: test.run(options.dryrun) if test.result.result == 'KILLED' and len(failsafe.pathname): failsafe.run(options.dryrun) failsafe.log(options, suppress_console=True) else: test.skip() test.log(options) if len(posttest.pathname): posttest.run(options.dryrun) posttest.log(options) class TestRun(object): props = ['quiet', 'outputdir'] def __init__(self, options): self.tests = {} self.testgroups = {} self.starttime = time() self.timestamp = datetime.now().strftime('%Y%m%dT%H%M%S') self.outputdir = os.path.join(options.outputdir, self.timestamp) self.setup_logging(options) self.defaults = [ ('outputdir', BASEDIR), ('quiet', False), ('timeout', 60), ('user', ''), ('pre', ''), ('pre_user', ''), ('post', ''), ('post_user', ''), ('failsafe', ''), ('failsafe_user', ''), ('tags', []) ] def __str__(self): s = 'TestRun:\n outputdir: %s\n' % self.outputdir s += 'TESTS:\n' for key in sorted(self.tests.keys()): s += '%s%s' % (self.tests[key].__str__(), '\n') s += 'TESTGROUPS:\n' for key in sorted(self.testgroups.keys()): s += '%s%s' % (self.testgroups[key].__str__(), '\n') return s def addtest(self, pathname, options): """ Create a new Test, and apply any properties that were passed in from the command line. If it passes verification, add it to the TestRun. """ test = Test(pathname) for prop in Test.props: setattr(test, prop, getattr(options, prop)) if test.verify(): self.tests[pathname] = test def addtestgroup(self, dirname, filenames, options): """ Create a new TestGroup, and apply any properties that were passed in from the command line. If it passes verification, add it to the TestRun. """ if dirname not in self.testgroups: testgroup = TestGroup(dirname) for prop in Test.props: setattr(testgroup, prop, getattr(options, prop)) # Prevent pre/post/failsafe scripts from running as regular tests for f in [testgroup.pre, testgroup.post, testgroup.failsafe]: if f in filenames: del filenames[filenames.index(f)] self.testgroups[dirname] = testgroup self.testgroups[dirname].tests = sorted(filenames) testgroup.verify() def filter(self, keeplist): for group in list(self.testgroups.keys()): if group not in keeplist: del self.testgroups[group] continue g = self.testgroups[group] if g.pre and os.path.basename(g.pre) in keeplist[group]: continue g.filter(keeplist[group]) for test in list(self.tests.keys()): directory, base = os.path.split(test) if directory not in keeplist or base not in keeplist[directory]: del self.tests[test] def read(self, options): """ Read in the specified runfiles, and apply the TestRun properties listed in the 'DEFAULT' section to our TestRun. Then read each section, and apply the appropriate properties to the Test or TestGroup. Properties from individual sections override those set in the 'DEFAULT' section. If the Test or TestGroup passes verification, add it to the TestRun. """ config = configparser.RawConfigParser() parsed = config.read(options.runfiles) failed = options.runfiles - set(parsed) if len(failed): files = ' '.join(sorted(failed)) fail("Couldn't read config files: %s" % files) for opt in TestRun.props: if config.has_option('DEFAULT', opt): setattr(self, opt, config.get('DEFAULT', opt)) self.outputdir = os.path.join(self.outputdir, self.timestamp) testdir = options.testdir for section in config.sections(): if 'tests' in config.options(section): parts = section.split(':', 1) sectiondir = parts[0] identifier = parts[1] if len(parts) == 2 else None if os.path.isdir(sectiondir): pathname = sectiondir elif os.path.isdir(os.path.join(testdir, sectiondir)): pathname = os.path.join(testdir, sectiondir) else: pathname = sectiondir testgroup = TestGroup(os.path.abspath(pathname), identifier=identifier) for prop in TestGroup.props: for sect in ['DEFAULT', section]: if config.has_option(sect, prop): if prop == 'tags': setattr(testgroup, prop, eval(config.get(sect, prop))) elif prop == 'failsafe': failsafe = config.get(sect, prop) setattr(testgroup, prop, os.path.join(testdir, failsafe)) else: setattr(testgroup, prop, config.get(sect, prop)) # Repopulate tests using eval to convert the string to a list testgroup.tests = eval(config.get(section, 'tests')) if testgroup.verify(): self.testgroups[section] = testgroup else: test = Test(section) for prop in Test.props: for sect in ['DEFAULT', section]: if config.has_option(sect, prop): if prop == 'failsafe': failsafe = config.get(sect, prop) setattr(test, prop, os.path.join(testdir, failsafe)) else: setattr(test, prop, config.get(sect, prop)) if test.verify(): self.tests[section] = test def write(self, options): """ Create a configuration file for editing and later use. The 'DEFAULT' section of the config file is created from the properties that were specified on the command line. Tests are simply added as sections that inherit everything from the 'DEFAULT' section. TestGroups are the same, except they get an option including all the tests to run in that directory. """ defaults = dict([(prop, getattr(options, prop)) for prop, _ in self.defaults]) config = configparser.RawConfigParser(defaults) for test in sorted(self.tests.keys()): config.add_section(test) for prop in Test.props: if prop not in self.props: config.set(test, prop, getattr(self.tests[test], prop)) for testgroup in sorted(self.testgroups.keys()): config.add_section(testgroup) config.set(testgroup, 'tests', self.testgroups[testgroup].tests) for prop in TestGroup.props: if prop not in self.props: config.set(testgroup, prop, getattr(self.testgroups[testgroup], prop)) try: with open(options.template, 'w') as f: return config.write(f) except IOError: fail('Could not open \'%s\' for writing.' % options.template) def complete_outputdirs(self): """ Collect all the pathnames for Tests, and TestGroups. Work backwards one pathname component at a time, to create a unique directory name in which to deposit test output. Tests will be able to write output files directly in the newly modified outputdir. TestGroups will be able to create one subdirectory per test in the outputdir, and are guaranteed uniqueness because a group can only contain files in one directory. Pre and post tests will create a directory rooted at the outputdir of the Test or TestGroup in question for their output. Failsafe scripts will create a directory rooted at the outputdir of each Test for their output. """ done = False components = 0 tmp_dict = dict(list(self.tests.items()) + list(self.testgroups.items())) total = len(tmp_dict) base = self.outputdir while not done: paths = [] components -= 1 for testfile in list(tmp_dict.keys()): uniq = '/'.join(testfile.split('/')[components:]).lstrip('/') if uniq not in paths: paths.append(uniq) tmp_dict[testfile].outputdir = os.path.join(base, uniq) else: break done = total == len(paths) def setup_logging(self, options): """ This function creates the output directory and gets a file object for the logfile. This function must be called before write_log() can be used. """ if options.dryrun is True: return global LOG_FILE_OBJ if not options.template: try: old = os.umask(0) os.makedirs(self.outputdir, mode=0o777) os.umask(old) filename = os.path.join(self.outputdir, 'log') LOG_FILE_OBJ = open(filename, buffering=0, mode='wb') except OSError as e: fail('%s' % e) def run(self, options): """ Walk through all the Tests and TestGroups, calling run(). """ try: os.chdir(self.outputdir) except OSError: fail('Could not change to directory %s' % self.outputdir) # make a symlink to the output for the currently running test logsymlink = os.path.join(self.outputdir, '../current') if os.path.islink(logsymlink): os.unlink(logsymlink) if not os.path.exists(logsymlink): os.symlink(self.outputdir, logsymlink) else: write_log('Could not make a symlink to directory %s\n' % self.outputdir, LOG_ERR) iteration = 0 while iteration < options.iterations: for test in sorted(self.tests.keys()): self.tests[test].run(options) for testgroup in sorted(self.testgroups.keys()): self.testgroups[testgroup].run(options) iteration += 1 def summary(self): if Result.total == 0: return 2 print('\nResults Summary') for key in list(Result.runresults.keys()): if Result.runresults[key] != 0: print('%s\t% 4d' % (key, Result.runresults[key])) m, s = divmod(time() - self.starttime, 60) h, m = divmod(m, 60) print('\nRunning Time:\t%02d:%02d:%02d' % (h, m, s)) print('Percent passed:\t%.1f%%' % ((float(Result.runresults['PASS']) / float(Result.total)) * 100)) print('Log directory:\t%s' % self.outputdir) if Result.runresults['FAIL'] > 0: return 1 if Result.runresults['KILLED'] > 0: return 1 if Result.runresults['RERAN'] > 0: return 3 return 0 def write_log(msg, target): """ Write the provided message to standard out, standard error or the logfile. If specifying LOG_FILE, then `msg` must be a bytes like object. This way we can still handle output from tests that may be in unexpected encodings. """ if target == LOG_OUT: os.write(sys.stdout.fileno(), bytearray(msg, encoding='utf-8')) elif target == LOG_ERR: os.write(sys.stderr.fileno(), bytearray(msg, encoding='utf-8')) elif target == LOG_FILE: os.write(LOG_FILE_OBJ.fileno(), msg) else: fail('log_msg called with unknown target "%s"' % target) def verify_file(pathname): """ Verify that the supplied pathname is an executable regular file. """ if os.path.isdir(pathname) or os.path.islink(pathname): return False for ext in '', '.ksh', '.sh': script_path = pathname + ext if os.path.isfile(script_path) and os.access(script_path, os.X_OK): return True return False def verify_user(user): """ Verify that the specified user exists on this system, and can execute sudo without being prompted for a password. """ testcmd = [SUDO, '-n', '-u', user, TRUE] if user in Cmd.verified_users: return True try: getpwnam(user) except KeyError: write_log("Warning: user '%s' does not exist.\n" % user, LOG_ERR) return False p = Popen(testcmd) p.wait() if p.returncode != 0: write_log("Warning: user '%s' cannot use passwordless sudo.\n" % user, LOG_ERR) return False else: Cmd.verified_users.append(user) return True def find_tests(testrun, options): """ For the given list of pathnames, add files as Tests. For directories, if do_groups is True, add the directory as a TestGroup. If False, recursively search for executable files. """ for p in sorted(options.pathnames): if os.path.isdir(p): for dirname, _, filenames in os.walk(p): if options.do_groups: testrun.addtestgroup(dirname, filenames, options) else: for f in sorted(filenames): testrun.addtest(os.path.join(dirname, f), options) else: testrun.addtest(p, options) def filter_tests(testrun, options): try: fh = open(options.logfile, "r") except Exception as e: fail('%s' % e) failed = {} while True: line = fh.readline() if not line: break m = re.match(r'Test: .*(tests/.*)/(\S+).*\[FAIL\]', line) if not m: continue group, test = m.group(1, 2) try: failed[group].append(test) except KeyError: failed[group] = [test] fh.close() testrun.filter(failed) def fail(retstr, ret=1): print('%s: %s' % (sys.argv[0], retstr)) exit(ret) def options_cb(option, opt_str, value, parser): path_options = ['outputdir', 'template', 'testdir', 'logfile'] if opt_str in parser.rargs: fail('%s may only be specified once.' % opt_str) if option.dest == 'runfiles': parser.values.cmd = 'rdconfig' value = set(os.path.abspath(p) for p in value.split(',')) if option.dest == 'tags': value = [x.strip() for x in value.split(',')] if option.dest in path_options: setattr(parser.values, option.dest, os.path.abspath(value)) else: setattr(parser.values, option.dest, value) def parse_args(): parser = OptionParser() parser.add_option('-c', action='callback', callback=options_cb, type='string', dest='runfiles', metavar='runfiles', help='Specify tests to run via config files.') parser.add_option('-d', action='store_true', default=False, dest='dryrun', help='Dry run. Print tests, but take no other action.') parser.add_option('-l', action='callback', callback=options_cb, default=None, dest='logfile', metavar='logfile', type='string', help='Read logfile and re-run tests which failed.') parser.add_option('-g', action='store_true', default=False, dest='do_groups', help='Make directories TestGroups.') parser.add_option('-o', action='callback', callback=options_cb, default=BASEDIR, dest='outputdir', type='string', metavar='outputdir', help='Specify an output directory.') parser.add_option('-i', action='callback', callback=options_cb, default=TESTDIR, dest='testdir', type='string', metavar='testdir', help='Specify a test directory.') parser.add_option('-p', action='callback', callback=options_cb, default='', dest='pre', metavar='script', type='string', help='Specify a pre script.') parser.add_option('-P', action='callback', callback=options_cb, default='', dest='post', metavar='script', type='string', help='Specify a post script.') parser.add_option('-q', action='store_true', default=False, dest='quiet', help='Silence on the console during a test run.') parser.add_option('-s', action='callback', callback=options_cb, default='', dest='failsafe', metavar='script', type='string', help='Specify a failsafe script.') parser.add_option('-S', action='callback', callback=options_cb, default='', dest='failsafe_user', metavar='failsafe_user', type='string', help='Specify a user to execute the failsafe script.') parser.add_option('-t', action='callback', callback=options_cb, default=60, dest='timeout', metavar='seconds', type='int', help='Timeout (in seconds) for an individual test.') parser.add_option('-u', action='callback', callback=options_cb, default='', dest='user', metavar='user', type='string', help='Specify a different user name to run as.') parser.add_option('-w', action='callback', callback=options_cb, default=None, dest='template', metavar='template', type='string', help='Create a new config file.') parser.add_option('-x', action='callback', callback=options_cb, default='', dest='pre_user', metavar='pre_user', type='string', help='Specify a user to execute the pre script.') parser.add_option('-X', action='callback', callback=options_cb, default='', dest='post_user', metavar='post_user', type='string', help='Specify a user to execute the post script.') parser.add_option('-T', action='callback', callback=options_cb, default='', dest='tags', metavar='tags', type='string', help='Specify tags to execute specific test groups.') parser.add_option('-I', action='callback', callback=options_cb, default=1, dest='iterations', metavar='iterations', type='int', help='Number of times to run the test run.') (options, pathnames) = parser.parse_args() if options.runfiles and len(pathnames): fail('Extraneous arguments.') options.pathnames = [os.path.abspath(path) for path in pathnames] return options def main(): options = parse_args() testrun = TestRun(options) if options.runfiles: testrun.read(options) else: find_tests(testrun, options) if options.logfile: filter_tests(testrun, options) if options.template: testrun.write(options) exit(0) testrun.complete_outputdirs() testrun.run(options) exit(testrun.summary()) if __name__ == '__main__': main() diff --git a/tests/test-runner/bin/zts-report.py.in b/tests/test-runner/bin/zts-report.py.in index 410c2cb4fcdc..9f00ec11b437 100755 --- a/tests/test-runner/bin/zts-report.py.in +++ b/tests/test-runner/bin/zts-report.py.in @@ -1,474 +1,474 @@ #!/usr/bin/env @PYTHON_SHEBANG@ # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2017 by Delphix. All rights reserved. # Copyright (c) 2018 by Lawrence Livermore National Security, LLC. # -# This script must remain compatible with Python 2.6+ and Python 3.4+. +# This script must remain compatible with Python 3.6+. # import os import re import sys import argparse # # This script parses the stdout of zfstest, which has this format: # # Test: /path/to/testa (run as root) [00:00] [PASS] # Test: /path/to/testb (run as jkennedy) [00:00] [PASS] # Test: /path/to/testc (run as root) [00:00] [FAIL] # [...many more results...] # # Results Summary # FAIL 22 # SKIP 32 # PASS 1156 # # Running Time: 02:50:31 # Percent passed: 95.5% # Log directory: /var/tmp/test_results/20180615T205926 # # # Common generic reasons for a test or test group to be skipped. # # Some test cases are known to fail in ways which are not harmful or dangerous. # In these cases simply mark the test as a known failure until it can be # updated and the issue resolved. Note that it's preferable to open a unique # issue on the GitHub issue tracker for each test case failure. # known_reason = 'Known issue' # # Some tests require that a test user be able to execute the zfs utilities. # This may not be possible when testing in-tree due to the default permissions # on the user's home directory. When testing this can be resolved by granting # group read access. # # chmod 0750 $HOME # exec_reason = 'Test user execute permissions required for utilities' # -# Some tests require a minimum python version of 3.5 and will be skipped when +# Some tests require a minimum python version of 3.6 and will be skipped when # the default system version is too old. There may also be tests which require -# additional python modules be installed, for example python-cffi is required +# additional python modules be installed, for example python3-cffi is required # by the pyzfs tests. # -python_reason = 'Python v3.5 or newer required' -python_deps_reason = 'Python modules missing: python-cffi' +python_reason = 'Python v3.6 or newer required' +python_deps_reason = 'Python modules missing: python3-cffi' # # Some tests require the O_TMPFILE flag which was first introduced in the # 3.11 kernel. # tmpfile_reason = 'Kernel O_TMPFILE support required' # # Some tests require the statx(2) system call on Linux which was first # introduced in the 4.11 kernel. # statx_reason = 'Kernel statx(2) system call required on Linux' # # Some tests require that the NFS client and server utilities be installed. # share_reason = 'NFS client and server utilities required' # # Some tests require that the lsattr utility support the project id feature. # project_id_reason = 'lsattr with set/show project ID required' # # Some tests require that the kernel support user namespaces. # user_ns_reason = 'Kernel user namespace support required' # # Some rewind tests can fail since nothing guarantees that old MOS blocks # are not overwritten. Snapshots protect datasets and data files but not # the MOS. Reasonable efforts are made in the test case to increase the # odds that some txgs will have their MOS data left untouched, but it is # never a sure thing. # rewind_reason = 'Arbitrary pool rewind is not guaranteed' # # Some tests may by structured in a way that relies on exact knowledge # of how much free space in available in a pool. These tests cannot be # made completely reliable because the internal details of how free space # is managed are not exposed to user space. # enospc_reason = 'Exact free space reporting is not guaranteed' # # Some tests require a minimum version of the fio benchmark utility. # Older distributions such as CentOS 6.x only provide fio-2.0.13. # fio_reason = 'Fio v2.3 or newer required' # # Some tests require that the DISKS provided support the discard operation. # Normally this is not an issue because loop back devices are used for DISKS # and they support discard (TRIM/UNMAP). # trim_reason = 'DISKS must support discard (TRIM/UNMAP)' # # Some tests on FreeBSD require the fspacectl(2) system call and the # truncate(1) utility supporting the -d option. The system call was first # introduced in FreeBSD version 1400032. # fspacectl_reason = 'fspacectl(2) and truncate -d support required' # # Some tests are not applicable to a platform or need to be updated to operate # in the manor required by the platform. Any tests which are skipped for this # reason will be suppressed in the final analysis output. # na_reason = "Not applicable" # # Some test cases doesn't have all requirements to run on Github actions CI. # ci_reason = 'CI runner doesn\'t have all requirements' summary = { 'total': float(0), 'passed': float(0), 'logfile': "Could not determine logfile location." } # # These tests are known to fail, thus we use this list to prevent these # failures from failing the job as a whole; only unexpected failures # bubble up to cause this script to exit with a non-zero exit status. # # Format: { 'test-name': ['expected result', 'issue-number | reason'] } # # For each known failure it is recommended to link to a GitHub issue by # setting the reason to the issue number. Alternately, one of the generic # reasons listed above can be used. # known = { 'casenorm/mixed_none_lookup_ci': ['FAIL', '7633'], 'casenorm/mixed_formd_lookup_ci': ['FAIL', '7633'], 'cli_root/zfs_unshare/zfs_unshare_002_pos': ['SKIP', na_reason], 'cli_root/zfs_unshare/zfs_unshare_006_pos': ['SKIP', na_reason], 'cli_root/zpool_import/import_rewind_device_replaced': ['FAIL', rewind_reason], 'cli_user/misc/zfs_share_001_neg': ['SKIP', na_reason], 'cli_user/misc/zfs_unshare_001_neg': ['SKIP', na_reason], 'privilege/setup': ['SKIP', na_reason], 'refreserv/refreserv_004_pos': ['FAIL', known_reason], 'rootpool/setup': ['SKIP', na_reason], 'rsend/rsend_008_pos': ['SKIP', '6066'], 'vdev_zaps/vdev_zaps_007_pos': ['FAIL', known_reason], } if sys.platform.startswith('freebsd'): known.update({ 'cli_root/zpool_wait/zpool_wait_trim_basic': ['SKIP', trim_reason], 'cli_root/zpool_wait/zpool_wait_trim_cancel': ['SKIP', trim_reason], 'cli_root/zpool_wait/zpool_wait_trim_flag': ['SKIP', trim_reason], 'link_count/link_count_001': ['SKIP', na_reason], }) elif sys.platform.startswith('linux'): known.update({ 'casenorm/mixed_formd_lookup': ['FAIL', '7633'], 'casenorm/mixed_formd_delete': ['FAIL', '7633'], 'casenorm/sensitive_formd_lookup': ['FAIL', '7633'], 'casenorm/sensitive_formd_delete': ['FAIL', '7633'], 'removal/removal_with_zdb': ['SKIP', known_reason], }) # # These tests may occasionally fail or be skipped. We want there failures # to be reported but only unexpected failures should bubble up to cause # this script to exit with a non-zero exit status. # # Format: { 'test-name': ['expected result', 'issue-number | reason'] } # # For each known failure it is recommended to link to a GitHub issue by # setting the reason to the issue number. Alternately, one of the generic # reasons listed above can be used. # maybe = { 'chattr/setup': ['SKIP', exec_reason], 'crtime/crtime_001_pos': ['SKIP', statx_reason], 'cli_root/zdb/zdb_006_pos': ['FAIL', known_reason], 'cli_root/zfs_destroy/zfs_destroy_dev_removal_condense': ['FAIL', known_reason], 'cli_root/zfs_get/zfs_get_004_pos': ['FAIL', known_reason], 'cli_root/zfs_get/zfs_get_009_pos': ['SKIP', '5479'], 'cli_root/zfs_rollback/zfs_rollback_001_pos': ['FAIL', known_reason], 'cli_root/zfs_rollback/zfs_rollback_002_pos': ['FAIL', known_reason], 'cli_root/zfs_share/setup': ['SKIP', share_reason], 'cli_root/zfs_snapshot/zfs_snapshot_002_neg': ['FAIL', known_reason], 'cli_root/zfs_unshare/setup': ['SKIP', share_reason], 'cli_root/zpool_add/zpool_add_004_pos': ['FAIL', known_reason], 'cli_root/zpool_destroy/zpool_destroy_001_pos': ['SKIP', '6145'], 'cli_root/zpool_import/import_rewind_config_changed': ['FAIL', rewind_reason], 'cli_root/zpool_import/zpool_import_missing_003_pos': ['SKIP', '6839'], 'cli_root/zpool_initialize/zpool_initialize_import_export': ['FAIL', '11948'], 'cli_root/zpool_labelclear/zpool_labelclear_removed': ['FAIL', known_reason], 'cli_root/zpool_trim/setup': ['SKIP', trim_reason], 'cli_root/zpool_upgrade/zpool_upgrade_004_pos': ['FAIL', '6141'], 'delegate/setup': ['SKIP', exec_reason], 'fallocate/fallocate_punch-hole': ['SKIP', fspacectl_reason], 'history/history_004_pos': ['FAIL', '7026'], 'history/history_005_neg': ['FAIL', '6680'], 'history/history_006_neg': ['FAIL', '5657'], 'history/history_008_pos': ['FAIL', known_reason], 'history/history_010_pos': ['SKIP', exec_reason], 'io/mmap': ['SKIP', fio_reason], 'largest_pool/largest_pool_001_pos': ['FAIL', known_reason], 'mmp/mmp_on_uberblocks': ['FAIL', known_reason], 'pyzfs/pyzfs_unittest': ['SKIP', python_deps_reason], 'no_space/enospc_002_pos': ['FAIL', enospc_reason], 'pool_checkpoint/checkpoint_discard_busy': ['FAIL', '11946'], 'projectquota/setup': ['SKIP', exec_reason], 'redundancy/redundancy_004_neg': ['FAIL', '7290'], 'redundancy/redundancy_draid_spare3': ['SKIP', known_reason], 'removal/removal_condense_export': ['FAIL', known_reason], 'reservation/reservation_008_pos': ['FAIL', '7741'], 'reservation/reservation_018_pos': ['FAIL', '5642'], 'snapshot/clone_001_pos': ['FAIL', known_reason], 'snapshot/snapshot_009_pos': ['FAIL', '7961'], 'snapshot/snapshot_010_pos': ['FAIL', '7961'], 'snapused/snapused_004_pos': ['FAIL', '5513'], 'tmpfile/setup': ['SKIP', tmpfile_reason], 'threadsappend/threadsappend_001_pos': ['FAIL', '6136'], 'trim/setup': ['SKIP', trim_reason], 'upgrade/upgrade_projectquota_001_pos': ['SKIP', project_id_reason], 'user_namespace/setup': ['SKIP', user_ns_reason], 'userquota/setup': ['SKIP', exec_reason], 'vdev_zaps/vdev_zaps_004_pos': ['FAIL', '6935'], 'zvol/zvol_ENOSPC/zvol_ENOSPC_001_pos': ['FAIL', '5848'], 'pam/setup': ['SKIP', "pamtester might be not available"], } if sys.platform.startswith('freebsd'): maybe.update({ 'cli_root/zfs_copies/zfs_copies_002_pos': ['FAIL', known_reason], 'cli_root/zfs_inherit/zfs_inherit_001_neg': ['FAIL', known_reason], 'cli_root/zfs_receive/receive-o-x_props_override': ['FAIL', known_reason], 'cli_root/zfs_share/zfs_share_011_pos': ['FAIL', known_reason], 'cli_root/zfs_share/zfs_share_concurrent_shares': ['FAIL', known_reason], 'cli_root/zpool_import/zpool_import_012_pos': ['FAIL', known_reason], 'delegate/zfs_allow_003_pos': ['FAIL', known_reason], 'inheritance/inherit_001_pos': ['FAIL', '11829'], 'resilver/resilver_restart_001': ['FAIL', known_reason], 'pool_checkpoint/checkpoint_big_rewind': ['FAIL', '12622'], 'pool_checkpoint/checkpoint_indirect': ['FAIL', '12623'], }) elif sys.platform.startswith('linux'): maybe.update({ 'cli_root/zfs_rename/zfs_rename_002_pos': ['FAIL', known_reason], 'cli_root/zpool_expand/zpool_expand_001_pos': ['FAIL', known_reason], 'cli_root/zpool_expand/zpool_expand_005_pos': ['FAIL', known_reason], 'cli_root/zpool_reopen/zpool_reopen_003_pos': ['FAIL', known_reason], 'fault/auto_spare_shared': ['FAIL', '11889'], 'io/io_uring': ['SKIP', 'io_uring support required'], 'limits/filesystem_limit': ['SKIP', known_reason], 'limits/snapshot_limit': ['SKIP', known_reason], 'mmp/mmp_active_import': ['FAIL', known_reason], 'mmp/mmp_exported_import': ['FAIL', known_reason], 'mmp/mmp_inactive_import': ['FAIL', known_reason], 'zvol/zvol_misc/zvol_misc_snapdev': ['FAIL', '12621'], 'zvol/zvol_misc/zvol_misc_volmode': ['FAIL', known_reason], }) # Not all Github actions runners have scsi_debug module, so we may skip # some tests which use it. if os.environ.get('CI') == 'true': known.update({ 'cli_root/zpool_expand/zpool_expand_001_pos': ['SKIP', ci_reason], 'cli_root/zpool_expand/zpool_expand_003_neg': ['SKIP', ci_reason], 'cli_root/zpool_expand/zpool_expand_005_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/setup': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_001_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_002_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_003_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_004_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_005_pos': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_006_neg': ['SKIP', ci_reason], 'cli_root/zpool_reopen/zpool_reopen_007_pos': ['SKIP', ci_reason], 'cli_root/zpool_split/zpool_split_wholedisk': ['SKIP', ci_reason], 'fault/auto_offline_001_pos': ['SKIP', ci_reason], 'fault/auto_online_001_pos': ['SKIP', ci_reason], 'fault/auto_online_002_pos': ['SKIP', ci_reason], 'fault/auto_replace_001_pos': ['SKIP', ci_reason], 'fault/auto_spare_ashift': ['SKIP', ci_reason], 'fault/auto_spare_shared': ['SKIP', ci_reason], 'procfs/pool_state': ['SKIP', ci_reason], }) maybe.update({ 'events/events_002_pos': ['FAIL', '11546'], }) def usage(s): print(s) sys.exit(1) def process_results(pathname): try: f = open(pathname) except IOError as e: print('Error opening file: %s' % e) sys.exit(1) prefix = '/zfs-tests/tests/functional/' pattern = \ r'^Test(?:\s+\(\S+\))?:' + \ r'\s*\S*%s(\S+)\s*\(run as (\S+)\)\s*\[(\S+)\]\s*\[(\S+)\]' \ % prefix pattern_log = r'^\s*Log directory:\s*(\S*)' d = {} for line in f.readlines(): m = re.match(pattern, line) if m and len(m.groups()) == 4: summary['total'] += 1 if m.group(4) == "PASS": summary['passed'] += 1 d[m.group(1)] = m.group(4) continue m = re.match(pattern_log, line) if m: summary['logfile'] = m.group(1) return d class ListMaybesAction(argparse.Action): def __init__(self, option_strings, dest="SUPPRESS", default="SUPPRESS", help="list flaky tests and exit"): super(ListMaybesAction, self).__init__( option_strings=option_strings, dest=dest, default=default, nargs=0, help=help) def __call__(self, parser, namespace, values, option_string=None): for test in maybe: print(test) sys.exit(0) if __name__ == "__main__": parser = argparse.ArgumentParser(description='Analyze ZTS logs') parser.add_argument('logfile') parser.add_argument('--list-maybes', action=ListMaybesAction) parser.add_argument('--no-maybes', action='store_false', dest='maybes') args = parser.parse_args() results = process_results(args.logfile) if summary['total'] == 0: print("\n\nNo test results were found.") print("Log directory: %s" % summary['logfile']) sys.exit(0) expected = [] unexpected = [] all_maybes = True for test in list(results.keys()): if results[test] == "PASS": continue setup = test.replace(os.path.basename(test), "setup") if results[test] == "SKIP" and test != setup: if setup in known and known[setup][0] == "SKIP": continue if setup in maybe and maybe[setup][0] == "SKIP": continue if (test in known and results[test] in known[test][0]): expected.append(test) elif test in maybe and results[test] in maybe[test][0]: if results[test] == 'SKIP' or args.maybes: expected.append(test) elif not args.maybes: unexpected.append(test) else: unexpected.append(test) all_maybes = False print("\nTests with results other than PASS that are expected:") for test in sorted(expected): issue_url = 'https://github.com/openzfs/zfs/issues/' # Include the reason why the result is expected, given the following: # 1. Suppress test results which set the "Not applicable" reason. # 2. Numerical reasons are assumed to be GitHub issue numbers. # 3. When an entire test group is skipped only report the setup reason. if test in known: if known[test][1] == na_reason: continue elif known[test][1].isdigit(): expect = issue_url + known[test][1] else: expect = known[test][1] elif test in maybe: if maybe[test][1].isdigit(): expect = issue_url + maybe[test][1] else: expect = maybe[test][1] elif setup in known and known[setup][0] == "SKIP" and setup != test: continue elif setup in maybe and maybe[setup][0] == "SKIP" and setup != test: continue else: expect = "UNKNOWN REASON" print(" %s %s (%s)" % (results[test], test, expect)) print("\nTests with result of PASS that are unexpected:") for test in sorted(known.keys()): # We probably should not be silently ignoring the case # where "test" is not in "results". if test not in results or results[test] != "PASS": continue print(" %s %s (expected %s)" % (results[test], test, known[test][0])) print("\nTests with results other than PASS that are unexpected:") for test in sorted(unexpected): expect = "PASS" if test not in known else known[test][0] print(" %s %s (expected %s)" % (results[test], test, expect)) if len(unexpected) == 0: sys.exit(0) elif not args.maybes and all_maybes: sys.exit(2) else: sys.exit(1) diff --git a/tests/zfs-tests/include/commands.cfg b/tests/zfs-tests/include/commands.cfg index e479f2c01f15..ca7628855d66 100644 --- a/tests/zfs-tests/include/commands.cfg +++ b/tests/zfs-tests/include/commands.cfg @@ -1,225 +1,223 @@ # # Copyright (c) 2016, 2019 by Delphix. All rights reserved. # These variables are used by zfs-tests.sh to constrain which utilities # may be used by the suite. The suite will create a directory which is # the only element of $PATH and create symlinks from that dir to the # binaries listed below. # # Please keep the contents of each variable sorted for ease of reading # and maintenance. # export SYSTEM_FILES_COMMON='arp awk base64 basename bc bunzip2 bzcat cat chgrp chmod chown cksum cmp cp cpio cut date dd df diff dirname dmesg du echo egrep env expr false file find fio getconf getent getfacl grep gunzip gzip head hostname id iostat kill ksh ln logname ls mkdir mknod mktemp mount mv net od openssl pamtester pax pgrep ping pkill printenv printf ps pwd - python - python2 python3 quotaon readlink rm rmdir scp script sed seq setfacl sh sleep sort ssh stat strings su sudo sum swapoff swapon sync tail tar tee timeout touch tr true truncate umask umount uname uniq uuidgen vmstat wait wc which xargs' export SYSTEM_FILES_FREEBSD='chflags compress diskinfo dumpon fsck getextattr gpart jail jexec jls lsextattr md5 mdconfig mkfifo newfs pw rmextattr setextattr sha256 showmount swapctl sysctl uncompress' export SYSTEM_FILES_LINUX='attr bash blkid blockdev chattr dmidecode exportfs fallocate fdisk free getfattr groupadd groupdel groupmod hostid losetup lsattr lsblk lscpu lsmod lsscsi md5sum mkswap modprobe mpstat nproc parted perf setenforce setfattr sha256sum udevadm useradd userdel usermod' export ZFS_FILES='zdb zfs zhack zinject zpool ztest raidz_test arc_summary arcstat dbufstat mount.zfs zed zgenhostid zstream zfs_ids_to_path zpool_influxdb' export ZFSTEST_FILES='badsend btree_test chg_usr_exec devname2devid dir_rd_update draid file_check file_trunc file_write get_diff getversion largest_file libzfs_input_check mkbusy mkfile mkfiles mktree mmap_exec mmap_libaio mmap_seek mmapwrite nvlist_to_lua randfree_file randwritecomp readmmap rename_dir rm_lnkcnt_zero_file send_doall threadsappend user_ns_exec xattrtest stride_dd' diff --git a/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh b/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh index 3788543b0b2f..b0265c5ee4a1 100755 --- a/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh +++ b/tests/zfs-tests/tests/functional/cli_root/zfs_program/zfs_program_json.ksh @@ -1,148 +1,134 @@ #!/bin/ksh -p # # CDDL HEADER START # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy is of the CDDL is also available via the Internet # at http://www.illumos.org/license/CDDL. # # CDDL HEADER END # # # Copyright (c) 2018 Datto Inc. # Copyright (c) 2019 by Delphix. All rights reserved. # . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # # STRATEGY: # 1. Compare JSON output formatting for a channel program to template # 2. Using bad command line option (-Z) gives correct error output # verify_runnable "both" function cleanup { log_must zfs destroy $TESTDS return 0 } log_onexit cleanup log_assert "Channel programs output valid JSON" TESTDS="$TESTPOOL/zcp-json" log_must zfs create $TESTDS TESTZCP="/$TESTDS/zfs_rlist.zcp" cat > "$TESTZCP" << EOF succeeded = {} failed = {} function list_recursive(root, prop) for child in zfs.list.children(root) do list_recursive(child, prop) end val, src = zfs.get_prop(root, prop) if (val == nil) then failed[root] = val else succeeded[root] = val end end args = ... argv = args["argv"] list_recursive(argv[1], argv[2]) results = {} results["succeeded"] = succeeded results["failed"] = failed return results EOF # 1. Compare JSON output formatting for a channel program to template typeset -a pos_cmds=("recordsize" "type") typeset -a pos_cmds_out=( "{ \"return\": { \"failed\": {}, \"succeeded\": { \"$TESTDS\": 131072 } } }" "{ \"return\": { \"failed\": {}, \"succeeded\": { \"$TESTDS\": \"filesystem\" } } }") -# -# N.B. json.tool is needed to guarantee consistent ordering of fields, -# sed is needed to trim trailing space in CentOS 6's json.tool output -# -# As of Python 3.5 the behavior of json.tool changed to keep the order -# the same as the input and the --sort-keys option was added. Detect when -# --sort-keys is supported and apply the option to ensure the expected order. -# -if python -m json.tool --sort-keys <<< "{}"; then - JSON_TOOL_CMD="python -m json.tool --sort-keys" -else - JSON_TOOL_CMD="python -m json.tool" -fi - typeset -i cnt=0 typeset cmd for cmd in ${pos_cmds[@]}; do log_must zfs program $TESTPOOL $TESTZCP $TESTDS $cmd 2>&1 log_must zfs program -j $TESTPOOL $TESTZCP $TESTDS $cmd 2>&1 OUTPUT=$(zfs program -j $TESTPOOL $TESTZCP $TESTDS $cmd 2>&1 | - $JSON_TOOL_CMD | sed 's/[[:space:]]*$//') + python3 -m json.tool --sort-keys) if [ "$OUTPUT" != "${pos_cmds_out[$cnt]}" ]; then log_note "Got :$OUTPUT" log_note "Expected:${pos_cmds_out[$cnt]}" log_fail "Unexpected channel program output"; fi cnt=$((cnt + 1)) done # 2. Using bad command line option (-Z) gives correct error output typeset -a neg_cmds=("-Z") typeset -a neg_cmds_out=( "invalid option 'Z' usage: program [-jn] [-t ] [-m ] [lua args...] For the property list, run: zfs set|get For the delegated permission list, run: zfs allow|unallow") cnt=0 for cmd in ${neg_cmds[@]}; do log_mustnot zfs program $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1 log_mustnot zfs program -j $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1 OUTPUT=$(zfs program -j $cmd $TESTPOOL $TESTZCP $TESTDS 2>&1) if [ "$OUTPUT" != "${neg_cmds_out[$cnt]}" ]; then log_note "Got :$OUTPUT" log_note "Expected:${neg_cmds_out[$cnt]}" log_fail "Unexpected channel program error output"; fi cnt=$((cnt + 1)) done log_pass "Channel programs output valid JSON" diff --git a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in index 4ca610e5f1e9..1f58d8116b68 100755 --- a/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in +++ b/tests/zfs-tests/tests/functional/pyzfs/pyzfs_unittest.ksh.in @@ -1,57 +1,57 @@ #!/bin/ksh -p # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright 2018, loli10K . All rights reserved. # . $STF_SUITE/include/libtest.shlib # # DESCRIPTION: # Verify the libzfs_core Python test suite can be run successfully # # STRATEGY: # 1. Run the nvlist and libzfs_core Python unittest # 2. Verify the exit code is 0 (no errors) # verify_runnable "global" # Verify that the required dependencies for testing are installed. @PYTHON@ -c "import cffi" 2>/dev/null if [ $? -eq 1 ]; then - log_unsupported "python-cffi not found by Python" + log_unsupported "python3-cffi not found by Python" fi # We don't just try to "import libzfs_core" because we want to skip these tests # only if pyzfs was not installed due to missing, build-time, dependencies; if # we cannot load "libzfs_core" due to other reasons, for instance an API/ABI # mismatch, we want to report it. @PYTHON@ -c ' import pkgutil, sys sys.exit(pkgutil.find_loader("libzfs_core") is None)' if [ $? -eq 1 ]; then log_unsupported "libzfs_core not found by Python" fi log_assert "Verify the nvlist and libzfs_core Python unittest run successfully" # NOTE: don't use log_must() here because it makes output unreadable @PYTHON@ -m unittest --verbose \ libzfs_core.test.test_nvlist.TestNVList \ libzfs_core.test.test_libzfs_core.ZFSTest if [ $? -ne 0 ]; then log_fail "Python unittest completed with errors" fi log_pass "Python unittest completed without errors" diff --git a/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh b/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh index 43a80dc582f8..0212bd144fb8 100755 --- a/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh +++ b/tests/zfs-tests/tests/functional/rsend/send_encrypted_files.ksh @@ -1,120 +1,120 @@ #!/bin/ksh -p # # CDDL HEADER START # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # CDDL HEADER END # # # Copyright (c) 2018 by Datto Inc. All rights reserved. # . $STF_SUITE/tests/functional/rsend/rsend.kshlib # # DESCRIPTION: # Verify that a raw zfs send and receive can deal with several different # types of file layouts. # # STRATEGY: # 1. Create a new encrypted filesystem # 2. Add an empty file to the filesystem # 3. Add a small 512 byte file to the filesystem # 4. Add a larger 32M file to the filesystem # 5. Add a large sparse file to the filesystem # 6. Add 1000 empty files to the filesystem # 7. Add a file with a large xattr value # 8. Use xattrtest to create files with random xattrs (with and without xattrs=on) # 9. Take a snapshot of the filesystem # 10. Remove the 1000 empty files to the filesystem # 11. Take another snapshot of the filesystem # 12. Send and receive both snapshots # 13. Mount the filesystem and check the contents # verify_runnable "both" function cleanup { datasetexists $TESTPOOL/$TESTFS2 && \ destroy_dataset $TESTPOOL/$TESTFS2 -r datasetexists $TESTPOOL/recv && \ destroy_dataset $TESTPOOL/recv -r [[ -f $keyfile ]] && log_must rm $keyfile [[ -f $sendfile ]] && log_must rm $sendfile } log_onexit cleanup log_assert "Verify 'zfs send -w' works with many different file layouts" typeset keyfile=/$TESTPOOL/pkey typeset sendfile=/$TESTPOOL/sendfile typeset sendfile2=/$TESTPOOL/sendfile2 # Create an encrypted dataset log_must eval "echo 'password' > $keyfile" log_must zfs create -o encryption=on -o keyformat=passphrase \ -o keylocation=file://$keyfile $TESTPOOL/$TESTFS2 # Create files with varied layouts on disk log_must touch /$TESTPOOL/$TESTFS2/empty log_must mkfile 512 /$TESTPOOL/$TESTFS2/small log_must mkfile 32M /$TESTPOOL/$TESTFS2/full log_must dd if=/dev/urandom of=/$TESTPOOL/$TESTFS2/sparse \ bs=512 count=1 seek=1048576 >/dev/null 2>&1 log_must mkdir -p /$TESTPOOL/$TESTFS2/dir for i in {1..1000}; do log_must mkfile 512 /$TESTPOOL/$TESTFS2/dir/file-$i done log_must mkdir -p /$TESTPOOL/$TESTFS2/xattrondir log_must zfs set xattr=on $TESTPOOL/$TESTFS2 log_must xattrtest -f 10 -x 3 -s 32768 -r -k -p /$TESTPOOL/$TESTFS2/xattrondir log_must mkdir -p /$TESTPOOL/$TESTFS2/xattrsadir log_must zfs set xattr=sa $TESTPOOL/$TESTFS2 log_must xattrtest -f 10 -x 3 -s 32768 -r -k -p /$TESTPOOL/$TESTFS2/xattrsadir # OpenZFS issue #7432 log_must zfs set compression=on xattr=sa $TESTPOOL/$TESTFS2 log_must touch /$TESTPOOL/$TESTFS2/attrs -log_must eval "python -c 'print \"a\" * 4096' | \ +log_must eval "python3 -c 'print \"a\" * 4096' | \ set_xattr_stdin bigval /$TESTPOOL/$TESTFS2/attrs" log_must zfs set compression=off xattr=on $TESTPOOL/$TESTFS2 log_must zfs snapshot $TESTPOOL/$TESTFS2@snap1 # Remove the empty files created in the first snapshot for i in {1..1000}; do log_must rm /$TESTPOOL/$TESTFS2/dir/file-$i done sync_all_pools log_must zfs snapshot $TESTPOOL/$TESTFS2@snap2 expected_cksum=$(recursive_cksum /$TESTPOOL/$TESTFS2) log_must eval "zfs send -wp $TESTPOOL/$TESTFS2@snap1 > $sendfile" log_must eval "zfs send -wp -i @snap1 $TESTPOOL/$TESTFS2@snap2 > $sendfile2" log_must eval "zfs recv -F $TESTPOOL/recv < $sendfile" log_must eval "zfs recv -F $TESTPOOL/recv < $sendfile2" log_must zfs load-key $TESTPOOL/recv log_must zfs mount -a actual_cksum=$(recursive_cksum /$TESTPOOL/recv) [[ "$expected_cksum" != "$actual_cksum" ]] && \ log_fail "Recursive checksums differ ($expected_cksum != $actual_cksum)" log_must xattrtest -f 10 -o3 -y -p /$TESTPOOL/recv/xattrondir log_must xattrtest -f 10 -o3 -y -p /$TESTPOOL/recv/xattrsadir log_pass "Verified 'zfs send -w' works with many different file layouts" diff --git a/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh b/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh index 551ed15db254..bd30488eaab0 100755 --- a/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh +++ b/tests/zfs-tests/tests/functional/rsend/send_realloc_dnode_size.ksh @@ -1,107 +1,107 @@ #!/bin/ksh # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright (c) 2017 by Lawrence Livermore National Security, LLC. # Copyright (c) 2018 Datto Inc. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/rsend/rsend.kshlib # # Description: # Verify incremental receive properly handles objects with changed # dnode slot count. # # Strategy: # 1. Populate a dataset with 1k byte dnodes and snapshot # 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects # get recycled numbers and formerly "interior" dnode slots get assigned # to new objects # 3. Remove objects, set dnodesize=2k, and remount dataset so new objects # overlap with recently recycled and formerly "normal" dnode slots get # assigned to new objects # 4. Create an empty file and add xattrs to it to exercise reclaiming a # dnode that requires more than 1 slot for its bonus buffer (Zol #7433) # 5. Generate initial and incremental streams # 6. Verify initial and incremental streams can be received # verify_runnable "both" log_assert "Verify incremental receive handles objects with changed dnode size" function cleanup { rm -f $BACKDIR/fs-dn-legacy rm -f $BACKDIR/fs-dn-1k rm -f $BACKDIR/fs-dn-2k rm -f $BACKDIR/fs-attr datasetexists $POOL/fs && destroy_dataset $POOL/fs -rR datasetexists $POOL/newfs && destroy_dataset $POOL/newfs -rR } log_onexit cleanup # 1. Populate a dataset with 1k byte dnodes and snapshot log_must zfs create -o dnodesize=1k $POOL/fs log_must mk_files 200 262144 0 $POOL/fs log_must zfs snapshot $POOL/fs@a # 2. Remove objects, set dnodesize=legacy, and remount dataset so new objects # get recycled numbers and formerly "interior" dnode slots get assigned # to new objects rm /$POOL/fs/* log_must zfs unmount $POOL/fs log_must zfs set dnodesize=legacy $POOL/fs log_must zfs mount $POOL/fs log_must mk_files 200 262144 0 $POOL/fs log_must zfs snapshot $POOL/fs@b # 3. Remove objects, set dnodesize=2k, and remount dataset so new objects # overlap with recently recycled and formerly "normal" dnode slots get # assigned to new objects rm /$POOL/fs/* log_must zfs unmount $POOL/fs log_must zfs set dnodesize=2k $POOL/fs log_must zfs mount $POOL/fs log_must touch /$POOL/fs/attrs mk_files 200 262144 0 $POOL/fs log_must zfs snapshot $POOL/fs@c # 4. Create an empty file and add xattrs to it to exercise reclaiming a # dnode that requires more than 1 slot for its bonus buffer (Zol #7433) log_must zfs set compression=on xattr=sa $POOL/fs -log_must eval "python -c 'print \"a\" * 512' | +log_must eval "python3 -c 'print \"a\" * 512' | set_xattr_stdin bigval /$POOL/fs/attrs" log_must zfs snapshot $POOL/fs@d # 5. Generate initial and incremental streams log_must eval "zfs send $POOL/fs@a > $BACKDIR/fs-dn-1k" log_must eval "zfs send -i $POOL/fs@a $POOL/fs@b > $BACKDIR/fs-dn-legacy" log_must eval "zfs send -i $POOL/fs@b $POOL/fs@c > $BACKDIR/fs-dn-2k" log_must eval "zfs send -i $POOL/fs@c $POOL/fs@d > $BACKDIR/fs-attr" # 6. Verify initial and incremental streams can be received log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-1k" log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-legacy" log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-dn-2k" log_must eval "zfs recv $POOL/newfs < $BACKDIR/fs-attr" log_pass "Verify incremental receive handles objects with changed dnode size"