diff --git a/cmd/vdev_id/vdev_id b/cmd/vdev_id/vdev_id index 8cc4399a5668..7b5aab141997 100755 --- a/cmd/vdev_id/vdev_id +++ b/cmd/vdev_id/vdev_id @@ -1,789 +1,792 @@ #!/bin/sh # # vdev_id: udev helper to generate user-friendly names for JBOD disks # # This script parses the file /etc/zfs/vdev_id.conf to map a # physical path in a storage topology to a channel name. The # channel name is combined with a disk enclosure slot number to # create an alias that reflects the physical location of the drive. # This is particularly helpful when it comes to tasks like replacing # failed drives. Slot numbers may also be re-mapped in case the # default numbering is unsatisfactory. The drive aliases will be # created as symbolic links in /dev/disk/by-vdev. # # The currently supported topologies are sas_direct and sas_switch. # A multipath mode is supported in which dm-mpath devices are # handled by examining the first-listed running component disk. In # multipath mode the configuration file should contain a channel # definition with the same name for each path to a given enclosure. # # The alias keyword provides a simple way to map already-existing # device symlinks to more convenient names. It is suitable for # small, static configurations or for sites that have some automated # way to generate the mapping file. # # # Some example configuration files are given below. # # # # Example vdev_id.conf - sas_direct. # # # # multipath no # topology sas_direct # phys_per_port 4 # slot bay # # # PCI_ID HBA PORT CHANNEL NAME # channel 85:00.0 1 A # channel 85:00.0 0 B # channel 86:00.0 1 C # channel 86:00.0 0 D # # # Custom mapping for Channel A # # # Linux Mapped # # Slot Slot Channel # slot 1 7 A # slot 2 10 A # slot 3 3 A # slot 4 6 A # # # Default mapping for B, C, and D # slot 1 4 # slot 2 2 # slot 3 1 # slot 4 3 # # # # Example vdev_id.conf - sas_switch # # # # topology sas_switch # # # SWITCH PORT CHANNEL NAME # channel 1 A # channel 2 B # channel 3 C # channel 4 D # # # # Example vdev_id.conf - multipath # # # # multipath yes # # # PCI_ID HBA PORT CHANNEL NAME # channel 85:00.0 1 A # channel 85:00.0 0 B # channel 86:00.0 1 A # channel 86:00.0 0 B # # # # Example vdev_id.conf - multipath / multijbod-daisychaining # # # # multipath yes # multijbod yes # # # PCI_ID HBA PORT CHANNEL NAME # channel 85:00.0 1 A # channel 85:00.0 0 B # channel 86:00.0 1 A # channel 86:00.0 0 B # # # # Example vdev_id.conf - multipath / mixed # # # # multipath yes # slot mix # # # PCI_ID HBA PORT CHANNEL NAME # channel 85:00.0 3 A # channel 85:00.0 2 B # channel 86:00.0 3 A # channel 86:00.0 2 B # channel af:00.0 0 C # channel af:00.0 1 C # # # # Example vdev_id.conf - alias # # # # # by-vdev # # name fully qualified or base name of device link # alias d1 /dev/disk/by-id/wwn-0x5000c5002de3b9ca # alias d2 wwn-0x5000c5002def789e PATH=/bin:/sbin:/usr/bin:/usr/sbin CONFIG=/etc/zfs/vdev_id.conf PHYS_PER_PORT= DEV= TOPOLOGY= BAY= ENCL_ID="" UNIQ_ENCL_ID="" usage() { cat << EOF Usage: vdev_id [-h] vdev_id <-d device> [-c config_file] [-p phys_per_port] [-g sas_direct|sas_switch|scsi] [-m] -c specify name of an alternative config file [default=$CONFIG] -d specify basename of device (i.e. sda) -e Create enclose device symlinks only (/dev/by-enclosure) -g Storage network topology [default="$TOPOLOGY"] -m Run in multipath mode -j Run in multijbod mode -p number of phy's per switch port [default=$PHYS_PER_PORT] -h show this summary EOF exit 1 # exit with error to avoid processing usage message by a udev rule } map_slot() { LINUX_SLOT=$1 CHANNEL=$2 MAPPED_SLOT=$(awk -v linux_slot="$LINUX_SLOT" -v channel="$CHANNEL" \ '$1 == "slot" && $2 == linux_slot && \ ($4 ~ "^"channel"$" || $4 ~ /^$/) { print $3; exit}' $CONFIG) if [ -z "$MAPPED_SLOT" ] ; then MAPPED_SLOT=$LINUX_SLOT fi printf "%d" "${MAPPED_SLOT}" } map_channel() { MAPPED_CHAN= PCI_ID=$1 PORT=$2 case $TOPOLOGY in "sas_switch") MAPPED_CHAN=$(awk -v port="$PORT" \ '$1 == "channel" && $2 == port \ { print $3; exit }' $CONFIG) ;; "sas_direct"|"scsi") MAPPED_CHAN=$(awk -v pciID="$PCI_ID" -v port="$PORT" \ '$1 == "channel" && $2 == pciID && $3 == port \ {print $4}' $CONFIG) ;; esac printf "%s" "${MAPPED_CHAN}" } get_encl_id() { set -- $(echo $1) count=$# i=1 while [ $i -le $count ] ; do d=$(eval echo '$'{$i}) id=$(cat "/sys/class/enclosure/${d}/id") ENCL_ID="${ENCL_ID} $id" i=$((i + 1)) done } get_uniq_encl_id() { for uuid in ${ENCL_ID}; do found=0 for count in ${UNIQ_ENCL_ID}; do if [ $count = $uuid ]; then found=1 break fi done if [ $found -eq 0 ]; then UNIQ_ENCL_ID="${UNIQ_ENCL_ID} $uuid" fi done } # map_jbod explainer: The bsg driver knows the difference between a SAS # expander and fanout expander. Use hostX instance along with top-level # (whole enclosure) expander instances in /sys/class/enclosure and # matching a field in an array of expanders, using the index of the # matched array field as the enclosure instance, thereby making jbod IDs # dynamic. Avoids reliance on high overhead userspace commands like # multipath and lsscsi and instead uses existing sysfs data. $HOSTCHAN # variable derived from devpath gymnastics in sas_handler() function. map_jbod() { DEVEXP=$(ls -l "/sys/block/$DEV/device/" | grep enclos | awk -F/ '{print $(NF-1) }') DEV=$1 # Use "set --" to create index values (Arrays) set -- $(ls -l /sys/class/enclosure | grep -v "^total" | awk '{print $9}') # Get count of total elements JBOD_COUNT=$# JBOD_ITEM=$* # Build JBODs (enclosure) id from sys/class/enclosure//id get_encl_id "$JBOD_ITEM" # Different expander instances for each paths. # Filter out and keep only unique id. get_uniq_encl_id # Identify final 'mapped jbod' j=0 for count in ${UNIQ_ENCL_ID}; do i=1 j=$((j + 1)) while [ $i -le $JBOD_COUNT ] ; do d=$(eval echo '$'{$i}) id=$(cat "/sys/class/enclosure/${d}/id") if [ "$d" = "$DEVEXP" ] && [ $id = $count ] ; then MAPPED_JBOD=$j break fi i=$((i + 1)) done done printf "%d" "${MAPPED_JBOD}" } sas_handler() { if [ -z "$PHYS_PER_PORT" ] ; then PHYS_PER_PORT=$(awk '$1 == "phys_per_port" \ {print $2; exit}' $CONFIG) fi PHYS_PER_PORT=${PHYS_PER_PORT:-4} if ! echo "$PHYS_PER_PORT" | grep -q -E '^[0-9]+$' ; then echo "Error: phys_per_port value $PHYS_PER_PORT is non-numeric" exit 1 fi if [ -z "$MULTIPATH_MODE" ] ; then MULTIPATH_MODE=$(awk '$1 == "multipath" \ {print $2; exit}' $CONFIG) fi if [ -z "$MULTIJBOD_MODE" ] ; then MULTIJBOD_MODE=$(awk '$1 == "multijbod" \ {print $2; exit}' $CONFIG) fi # Use first running component device if we're handling a dm-mpath device if [ "$MULTIPATH_MODE" = "yes" ] ; then # If udev didn't tell us the UUID via DM_NAME, check /dev/mapper if [ -z "$DM_NAME" ] ; then DM_NAME=$(ls -l --full-time /dev/mapper | grep "$DEV"$ | awk '{print $9}') fi # For raw disks udev exports DEVTYPE=partition when # handling partitions, and the rules can be written to # take advantage of this to append a -part suffix. For # dm devices we get DEVTYPE=disk even for partitions so # we have to append the -part suffix directly in the # helper. if [ "$DEVTYPE" != "partition" ] ; then # Match p[number], remove the 'p' and prepend "-part" PART=$(echo "$DM_NAME" | awk 'match($0,/p[0-9]+$/) {print "-part"substr($0,RSTART+1,RLENGTH-1)}') fi # Strip off partition information. DM_NAME=$(echo "$DM_NAME" | sed 's/p[0-9][0-9]*$//') if [ -z "$DM_NAME" ] ; then return fi # Utilize DM device name to gather subordinate block devices # using sysfs to avoid userspace utilities # If our DEVNAME is something like /dev/dm-177, then we may be # able to get our DMDEV from it. DMDEV=$(echo $DEVNAME | sed 's;/dev/;;g') if [ ! -e /sys/block/$DMDEV/slaves/* ] ; then # It's not there, try looking in /dev/mapper DMDEV=$(ls -l --full-time /dev/mapper | grep $DM_NAME | awk '{gsub("../", " "); print $NF}') fi # Use sysfs pointers in /sys/block/dm-X/slaves because using # userspace tools creates lots of overhead and should be avoided # whenever possible. Use awk to isolate lowest instance of # sd device member in dm device group regardless of string # length. DEV=$(ls "/sys/block/$DMDEV/slaves" | awk ' { len=sprintf ("%20s",length($0)); gsub(/ /,0,str); a[NR]=len "_" $0; } END { asort(a) print substr(a[1],22) }') if [ -z "$DEV" ] ; then return fi fi if echo "$DEV" | grep -q ^/devices/ ; then sys_path=$DEV else sys_path=$(udevadm info -q path -p "/sys/block/$DEV" 2>/dev/null) fi # Use positional parameters as an ad-hoc array set -- $(echo "$sys_path" | tr / ' ') num_dirs=$# scsi_host_dir="/sys" # Get path up to /sys/.../hostX i=1 while [ $i -le "$num_dirs" ] ; do d=$(eval echo '$'{$i}) scsi_host_dir="$scsi_host_dir/$d" echo "$d" | grep -q -E '^host[0-9]+$' && break i=$((i + 1)) done # Lets grab the SAS host channel number and save it for JBOD sorting later HOSTCHAN=$(echo "$d" | awk -F/ '{ gsub("host","",$NF); print $NF}') if [ $i = "$num_dirs" ] ; then return fi PCI_ID=$(eval echo '$'{$((i -1))} | awk -F: '{print $2":"$3}') # In sas_switch mode, the directory four levels beneath # /sys/.../hostX contains symlinks to phy devices that reveal # the switch port number. In sas_direct mode, the phy links one # directory down reveal the HBA port. port_dir=$scsi_host_dir case $TOPOLOGY in "sas_switch") j=$((i + 4)) ;; "sas_direct") j=$((i + 1)) ;; esac i=$((i + 1)) while [ $i -le $j ] ; do port_dir="$port_dir/$(eval echo '$'{$i})" i=$((i + 1)) done PHY=$(ls -vd "$port_dir"/phy* 2>/dev/null | head -1 | awk -F: '{print $NF}') if [ -z "$PHY" ] ; then PHY=0 fi PORT=$((PHY / PHYS_PER_PORT)) # Look in /sys/.../sas_device/end_device-X for the bay_identifier # attribute. end_device_dir=$port_dir while [ $i -lt "$num_dirs" ] ; do d=$(eval echo '$'{$i}) end_device_dir="$end_device_dir/$d" if echo "$d" | grep -q '^end_device' ; then end_device_dir="$end_device_dir/sas_device/$d" break fi i=$((i + 1)) done # Add 'mix' slot type for environments where dm-multipath devices # include end-devices connected via SAS expanders or direct connection # to SAS HBA. A mixed connectivity environment such as pool devices # contained in a SAS JBOD and spare drives or log devices directly # connected in a server backplane without expanders in the I/O path. SLOT= case $BAY in "bay") SLOT=$(cat "$end_device_dir/bay_identifier" 2>/dev/null) ;; "mix") if [ $(cat "$end_device_dir/bay_identifier" 2>/dev/null) ] ; then SLOT=$(cat "$end_device_dir/bay_identifier" 2>/dev/null) else SLOT=$(cat "$end_device_dir/phy_identifier" 2>/dev/null) fi ;; "phy") SLOT=$(cat "$end_device_dir/phy_identifier" 2>/dev/null) ;; "port") d=$(eval echo '$'{$i}) SLOT=$(echo "$d" | sed -e 's/^.*://') ;; "id") i=$((i + 1)) d=$(eval echo '$'{$i}) SLOT=$(echo "$d" | sed -e 's/^.*://') ;; "lun") i=$((i + 2)) d=$(eval echo '$'{$i}) SLOT=$(echo "$d" | sed -e 's/^.*://') ;; "ses") # look for this SAS path in all SCSI Enclosure Services # (SES) enclosures sas_address=$(cat "$end_device_dir/sas_address" 2>/dev/null) enclosures=$(lsscsi -g | \ sed -n -e '/enclosu/s/^.* \([^ ][^ ]*\) *$/\1/p') for enclosure in $enclosures; do set -- $(sg_ses -p aes "$enclosure" | \ awk "/device slot number:/{slot=\$12} \ /SAS address: $sas_address/\ {print slot}") SLOT=$1 if [ -n "$SLOT" ] ; then break fi done ;; esac if [ -z "$SLOT" ] ; then return fi if [ "$MULTIJBOD_MODE" = "yes" ] ; then CHAN=$(map_channel "$PCI_ID" "$PORT") SLOT=$(map_slot "$SLOT" "$CHAN") JBOD=$(map_jbod "$DEV") if [ -z "$CHAN" ] ; then return fi echo "${CHAN}"-"${JBOD}"-"${SLOT}${PART}" else CHAN=$(map_channel "$PCI_ID" "$PORT") SLOT=$(map_slot "$SLOT" "$CHAN") if [ -z "$CHAN" ] ; then return fi echo "${CHAN}${SLOT}${PART}" fi } scsi_handler() { if [ -z "$FIRST_BAY_NUMBER" ] ; then FIRST_BAY_NUMBER=$(awk '$1 == "first_bay_number" \ {print $2; exit}' $CONFIG) fi FIRST_BAY_NUMBER=${FIRST_BAY_NUMBER:-0} if [ -z "$PHYS_PER_PORT" ] ; then PHYS_PER_PORT=$(awk '$1 == "phys_per_port" \ {print $2; exit}' $CONFIG) fi PHYS_PER_PORT=${PHYS_PER_PORT:-4} if ! echo "$PHYS_PER_PORT" | grep -q -E '^[0-9]+$' ; then echo "Error: phys_per_port value $PHYS_PER_PORT is non-numeric" exit 1 fi if [ -z "$MULTIPATH_MODE" ] ; then MULTIPATH_MODE=$(awk '$1 == "multipath" \ {print $2; exit}' $CONFIG) fi # Use first running component device if we're handling a dm-mpath device if [ "$MULTIPATH_MODE" = "yes" ] ; then # If udev didn't tell us the UUID via DM_NAME, check /dev/mapper if [ -z "$DM_NAME" ] ; then DM_NAME=$(ls -l --full-time /dev/mapper | grep "$DEV"$ | awk '{print $9}') fi # For raw disks udev exports DEVTYPE=partition when # handling partitions, and the rules can be written to # take advantage of this to append a -part suffix. For # dm devices we get DEVTYPE=disk even for partitions so # we have to append the -part suffix directly in the # helper. if [ "$DEVTYPE" != "partition" ] ; then # Match p[number], remove the 'p' and prepend "-part" PART=$(echo "$DM_NAME" | awk 'match($0,/p[0-9]+$/) {print "-part"substr($0,RSTART+1,RLENGTH-1)}') fi # Strip off partition information. DM_NAME=$(echo "$DM_NAME" | sed 's/p[0-9][0-9]*$//') if [ -z "$DM_NAME" ] ; then return fi # Get the raw scsi device name from multipath -ll. Strip off # leading pipe symbols to make field numbering consistent. DEV=$(multipath -ll "$DM_NAME" | awk '/running/{gsub("^[|]"," "); print $3 ; exit}') if [ -z "$DEV" ] ; then return fi fi if echo "$DEV" | grep -q ^/devices/ ; then sys_path=$DEV else sys_path=$(udevadm info -q path -p "/sys/block/$DEV" 2>/dev/null) fi # expect sys_path like this, for example: # /devices/pci0000:00/0000:00:0b.0/0000:09:00.0/0000:0a:05.0/0000:0c:00.0/host3/target3:1:0/3:1:0:21/block/sdv # Use positional parameters as an ad-hoc array set -- $(echo "$sys_path" | tr / ' ') num_dirs=$# scsi_host_dir="/sys" # Get path up to /sys/.../hostX i=1 while [ $i -le "$num_dirs" ] ; do d=$(eval echo '$'{$i}) scsi_host_dir="$scsi_host_dir/$d" echo "$d" | grep -q -E '^host[0-9]+$' && break i=$((i + 1)) done if [ $i = "$num_dirs" ] ; then return fi PCI_ID=$(eval echo '$'{$((i -1))} | awk -F: '{print $2":"$3}') # In scsi mode, the directory two levels beneath # /sys/.../hostX reveals the port and slot. port_dir=$scsi_host_dir j=$((i + 2)) i=$((i + 1)) while [ $i -le $j ] ; do port_dir="$port_dir/$(eval echo '$'{$i})" i=$((i + 1)) done set -- $(echo "$port_dir" | sed -e 's/^.*:\([^:]*\):\([^:]*\)$/\1 \2/') PORT=$1 SLOT=$(($2 + FIRST_BAY_NUMBER)) if [ -z "$SLOT" ] ; then return fi CHAN=$(map_channel "$PCI_ID" "$PORT") SLOT=$(map_slot "$SLOT" "$CHAN") if [ -z "$CHAN" ] ; then return fi echo "${CHAN}${SLOT}${PART}" } # Figure out the name for the enclosure symlink enclosure_handler () { # We get all the info we need from udev's DEVPATH variable: # # DEVPATH=/sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/subsystem/devices/0:0:0:0/scsi_generic/sg0 # Get the enclosure ID ("0:0:0:0") - ENC=$(basename $(readlink -m "/sys/$DEVPATH/../..")) + ENC="${DEVPATH%/*}" + ENC="${ENC%/*}" + ENC="${ENC##*/}" if [ ! -d "/sys/class/enclosure/$ENC" ] ; then # Not an enclosure, bail out return fi # Get the long sysfs device path to our enclosure. Looks like: # /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0/ ... /enclosure/0:0:0:0 ENC_DEVICE=$(readlink "/sys/class/enclosure/$ENC") # Grab the full path to the hosts port dir: # /devices/pci0000:00/0000:00:03.0/0000:05:00.0/host0/port-0:0 PORT_DIR=$(echo "$ENC_DEVICE" | grep -Eo '.+host[0-9]+/port-[0-9]+:[0-9]+') # Get the port number PORT_ID=$(echo "$PORT_DIR" | grep -Eo "[0-9]+$") # The PCI directory is two directories up from the port directory # /sys/devices/pci0000:00/0000:00:03.0/0000:05:00.0 - PCI_ID_LONG=$(basename $(readlink -m "/sys/$PORT_DIR/../..")) + PCI_ID_LONG="$(readlink -m "/sys/$PORT_DIR/../..")" + PCI_ID_LONG="${PCI_ID_LONG##*/}" # Strip down the PCI address from 0000:05:00.0 to 05:00.0 - PCI_ID=$(echo "$PCI_ID_LONG" | sed -r 's/^[0-9]+://g') + PCI_ID="${PCI_ID_LONG#[0-9]*:}" # Name our device according to vdev_id.conf (like "L0" or "U1"). NAME=$(awk "/channel/{if (\$1 == \"channel\" && \$2 == \"$PCI_ID\" && \ \$3 == \"$PORT_ID\") {print \$4\$3}}" $CONFIG) echo "${NAME}" } alias_handler () { # Special handling is needed to correctly append a -part suffix # to partitions of device mapper devices. The DEVTYPE attribute # is normally set to "disk" instead of "partition" in this case, # so the udev rules won't handle that for us as they do for # "plain" block devices. # # For example, we may have the following links for a device and its # partitions, # # /dev/disk/by-id/dm-name-isw_dibgbfcije_ARRAY0 -> ../../dm-0 # /dev/disk/by-id/dm-name-isw_dibgbfcije_ARRAY0p1 -> ../../dm-1 # /dev/disk/by-id/dm-name-isw_dibgbfcije_ARRAY0p2 -> ../../dm-3 # # and the following alias in vdev_id.conf. # # alias A0 dm-name-isw_dibgbfcije_ARRAY0 # # The desired outcome is for the following links to be created # without having explicitly defined aliases for the partitions. # # /dev/disk/by-vdev/A0 -> ../../dm-0 # /dev/disk/by-vdev/A0-part1 -> ../../dm-1 # /dev/disk/by-vdev/A0-part2 -> ../../dm-3 # # Warning: The following grep pattern will misidentify whole-disk # devices whose names end with 'p' followed by a string of # digits as partitions, causing alias creation to fail. This # ambiguity seems unavoidable, so devices using this facility # must not use such names. DM_PART= if echo "$DM_NAME" | grep -q -E 'p[0-9][0-9]*$' ; then if [ "$DEVTYPE" != "partition" ] ; then # Match p[number], remove the 'p' and prepend "-part" DM_PART=$(echo "$DM_NAME" | awk 'match($0,/p[0-9]+$/) {print "-part"substr($0,RSTART+1,RLENGTH-1)}') fi fi # DEVLINKS attribute must have been populated by already-run udev rules. for link in $DEVLINKS ; do # Remove partition information to match key of top-level device. if [ -n "$DM_PART" ] ; then link=$(echo "$link" | sed 's/p[0-9][0-9]*$//') fi # Check both the fully qualified and the base name of link. - for l in $link $(basename "$link") ; do + for l in $link ${link##*/} ; do if [ ! -z "$l" ]; then alias=$(awk -v var="$l" '($1 == "alias") && \ ($3 == var) \ { print $2; exit }' $CONFIG) if [ -n "$alias" ] ; then echo "${alias}${DM_PART}" return fi fi done done } # main while getopts 'c:d:eg:jmp:h' OPTION; do case ${OPTION} in c) CONFIG=${OPTARG} ;; d) DEV=${OPTARG} ;; e) # When udev sees a scsi_generic device, it calls this script with -e to # create the enclosure device symlinks only. We also need # "enclosure_symlinks yes" set in vdev_id.config to actually create the # symlink. ENCLOSURE_MODE=$(awk '{if ($1 == "enclosure_symlinks") \ print $2}' "$CONFIG") if [ "$ENCLOSURE_MODE" != "yes" ] ; then exit 0 fi ;; g) TOPOLOGY=$OPTARG ;; p) PHYS_PER_PORT=${OPTARG} ;; j) MULTIJBOD_MODE=yes ;; m) MULTIPATH_MODE=yes ;; h) usage ;; esac done if [ ! -r "$CONFIG" ] ; then echo "Error: Config file \"$CONFIG\" not found" exit 1 fi if [ -z "$DEV" ] && [ -z "$ENCLOSURE_MODE" ] ; then echo "Error: missing required option -d" exit 1 fi if [ -z "$TOPOLOGY" ] ; then TOPOLOGY=$(awk '($1 == "topology") {print $2; exit}' "$CONFIG") fi if [ -z "$BAY" ] ; then BAY=$(awk '($1 == "slot") {print $2; exit}' "$CONFIG") fi TOPOLOGY=${TOPOLOGY:-sas_direct} # Should we create /dev/by-enclosure symlinks? if [ "$ENCLOSURE_MODE" = "yes" ] && [ "$TOPOLOGY" = "sas_direct" ] ; then ID_ENCLOSURE=$(enclosure_handler) if [ -z "$ID_ENCLOSURE" ] ; then exit 0 fi # Just create the symlinks to the enclosure devices and then exit. ENCLOSURE_PREFIX=$(awk '/enclosure_symlinks_prefix/{print $2}' "$CONFIG") if [ -z "$ENCLOSURE_PREFIX" ] ; then ENCLOSURE_PREFIX="enc" fi echo "ID_ENCLOSURE=$ID_ENCLOSURE" echo "ID_ENCLOSURE_PATH=by-enclosure/$ENCLOSURE_PREFIX-$ID_ENCLOSURE" exit 0 fi # First check if an alias was defined for this device. ID_VDEV=$(alias_handler) if [ -z "$ID_VDEV" ] ; then BAY=${BAY:-bay} case $TOPOLOGY in sas_direct|sas_switch) ID_VDEV=$(sas_handler) ;; scsi) ID_VDEV=$(scsi_handler) ;; *) echo "Error: unknown topology $TOPOLOGY" exit 1 ;; esac fi if [ -n "$ID_VDEV" ] ; then echo "ID_VDEV=${ID_VDEV}" echo "ID_VDEV_PATH=disk/by-vdev/${ID_VDEV}" fi diff --git a/cmd/zed/zed.d/all-syslog.sh b/cmd/zed/zed.d/all-syslog.sh index b07cf0f295ad..ea108c47b779 100755 --- a/cmd/zed/zed.d/all-syslog.sh +++ b/cmd/zed/zed.d/all-syslog.sh @@ -1,51 +1,51 @@ #!/bin/sh # # Copyright (C) 2013-2014 Lawrence Livermore National Security, LLC. # Copyright (c) 2020 by Delphix. All rights reserved. # # # Log the zevent via syslog. # [ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc" . "${ZED_ZEDLET_DIR}/zed-functions.sh" zed_exit_if_ignoring_this_event # build a string of name=value pairs for this event msg="eid=${ZEVENT_EID} class=${ZEVENT_SUBCLASS}" if [ "${ZED_SYSLOG_DISPLAY_GUIDS}" = "1" ]; then [ -n "${ZEVENT_POOL_GUID}" ] && msg="${msg} pool_guid=${ZEVENT_POOL_GUID}" [ -n "${ZEVENT_VDEV_GUID}" ] && msg="${msg} vdev_guid=${ZEVENT_VDEV_GUID}" else [ -n "${ZEVENT_POOL}" ] && msg="${msg} pool='${ZEVENT_POOL}'" - [ -n "${ZEVENT_VDEV_PATH}" ] && msg="${msg} vdev=$(basename "${ZEVENT_VDEV_PATH}")" + [ -n "${ZEVENT_VDEV_PATH}" ] && msg="${msg} vdev=${ZEVENT_VDEV_PATH##*/}" fi # log pool state if state is anything other than 'ACTIVE' [ -n "${ZEVENT_POOL_STATE_STR}" ] && [ "$ZEVENT_POOL_STATE" -ne 0 ] && \ msg="${msg} pool_state=${ZEVENT_POOL_STATE_STR}" # Log the following payload nvpairs if they are present [ -n "${ZEVENT_VDEV_STATE_STR}" ] && msg="${msg} vdev_state=${ZEVENT_VDEV_STATE_STR}" [ -n "${ZEVENT_CKSUM_ALGORITHM}" ] && msg="${msg} algorithm=${ZEVENT_CKSUM_ALGORITHM}" [ -n "${ZEVENT_ZIO_SIZE}" ] && msg="${msg} size=${ZEVENT_ZIO_SIZE}" [ -n "${ZEVENT_ZIO_OFFSET}" ] && msg="${msg} offset=${ZEVENT_ZIO_OFFSET}" [ -n "${ZEVENT_ZIO_PRIORITY}" ] && msg="${msg} priority=${ZEVENT_ZIO_PRIORITY}" [ -n "${ZEVENT_ZIO_ERR}" ] && msg="${msg} err=${ZEVENT_ZIO_ERR}" [ -n "${ZEVENT_ZIO_FLAGS}" ] && msg="${msg} flags=$(printf '0x%x' "${ZEVENT_ZIO_FLAGS}")" # log delays that are >= 10 milisec [ -n "${ZEVENT_ZIO_DELAY}" ] && [ "$ZEVENT_ZIO_DELAY" -gt 10000000 ] && \ msg="${msg} delay=$((ZEVENT_ZIO_DELAY / 1000000))ms" # list the bookmark data together # shellcheck disable=SC2153 [ -n "${ZEVENT_ZIO_OBJSET}" ] && \ msg="${msg} bookmark=${ZEVENT_ZIO_OBJSET}:${ZEVENT_ZIO_OBJECT}:${ZEVENT_ZIO_LEVEL}:${ZEVENT_ZIO_BLKID}" zed_log_msg "${msg}" exit 0 diff --git a/cmd/zed/zed.d/generic-notify.sh b/cmd/zed/zed.d/generic-notify.sh index 1db26980c1a0..9cf657e39970 100755 --- a/cmd/zed/zed.d/generic-notify.sh +++ b/cmd/zed/zed.d/generic-notify.sh @@ -1,54 +1,54 @@ #!/bin/sh # # Send notification in response to a given zevent. # # This is a generic script than can be symlinked to a file in the # enabled-zedlets directory to have a notification sent when a particular # class of zevents occurs. The symlink filename must begin with the zevent # (sub)class string (e.g., "probe_failure-notify.sh" for the "probe_failure" # subclass). Refer to the zed(8) manpage for details. # # Only one notification per ZED_NOTIFY_INTERVAL_SECS will be sent for a given # class/pool combination. This protects against spamming the recipient # should multiple events occur together in time for the same pool. # # Exit codes: # 0: notification sent # 1: notification failed # 2: notification not configured # 3: notification suppressed [ -f "${ZED_ZEDLET_DIR}/zed.rc" ] && . "${ZED_ZEDLET_DIR}/zed.rc" . "${ZED_ZEDLET_DIR}/zed-functions.sh" # Rate-limit the notification based in part on the filename. # -rate_limit_tag="${ZEVENT_POOL};${ZEVENT_SUBCLASS};$(basename -- "$0")" +rate_limit_tag="${ZEVENT_POOL};${ZEVENT_SUBCLASS};${0##*/}" rate_limit_interval="${ZED_NOTIFY_INTERVAL_SECS}" zed_rate_limit "${rate_limit_tag}" "${rate_limit_interval}" || exit 3 umask 077 pool_str="${ZEVENT_POOL:+" for ${ZEVENT_POOL}"}" host_str=" on $(hostname)" note_subject="ZFS ${ZEVENT_SUBCLASS} event${pool_str}${host_str}" note_pathname="$(mktemp)" { echo "ZFS has posted the following event:" echo echo " eid: ${ZEVENT_EID}" echo " class: ${ZEVENT_SUBCLASS}" echo " host: $(hostname)" echo " time: ${ZEVENT_TIME_STRING}" [ -n "${ZEVENT_VDEV_TYPE}" ] && echo " vtype: ${ZEVENT_VDEV_TYPE}" [ -n "${ZEVENT_VDEV_PATH}" ] && echo " vpath: ${ZEVENT_VDEV_PATH}" [ -n "${ZEVENT_VDEV_GUID}" ] && echo " vguid: ${ZEVENT_VDEV_GUID}" [ -n "${ZEVENT_POOL}" ] && [ -x "${ZPOOL}" ] \ && "${ZPOOL}" status "${ZEVENT_POOL}" } > "${note_pathname}" zed_notify "${note_subject}" "${note_pathname}"; rv=$? rm -f "${note_pathname}" exit "${rv}" diff --git a/cmd/zed/zed.d/zed-functions.sh b/cmd/zed/zed.d/zed-functions.sh index 9044922ac346..290f9150b43f 100644 --- a/cmd/zed/zed.d/zed-functions.sh +++ b/cmd/zed/zed.d/zed-functions.sh @@ -1,614 +1,614 @@ #!/bin/sh # shellcheck disable=SC2039 # zed-functions.sh # # ZED helper functions for use in ZEDLETs # Variable Defaults # : "${ZED_LOCKDIR:="/var/lock"}" : "${ZED_NOTIFY_INTERVAL_SECS:=3600}" : "${ZED_NOTIFY_VERBOSE:=0}" : "${ZED_RUNDIR:="/var/run"}" : "${ZED_SYSLOG_PRIORITY:="daemon.notice"}" : "${ZED_SYSLOG_TAG:="zed"}" ZED_FLOCK_FD=8 # zed_check_cmd (cmd, ...) # # For each argument given, search PATH for the executable command [cmd]. # Log a message if [cmd] is not found. # # Arguments # cmd: name of executable command for which to search # # Return # 0 if all commands are found in PATH and are executable # n for a count of the command executables that are not found # zed_check_cmd() { local cmd local rv=0 for cmd; do if ! command -v "${cmd}" >/dev/null 2>&1; then zed_log_err "\"${cmd}\" not installed" rv=$((rv + 1)) fi done return "${rv}" } # zed_log_msg (msg, ...) # # Write all argument strings to the system log. # # Globals # ZED_SYSLOG_PRIORITY # ZED_SYSLOG_TAG # # Return # nothing # zed_log_msg() { logger -p "${ZED_SYSLOG_PRIORITY}" -t "${ZED_SYSLOG_TAG}" -- "$@" } # zed_log_err (msg, ...) # # Write an error message to the system log. This message will contain the # script name, EID, and all argument strings. # # Globals # ZED_SYSLOG_PRIORITY # ZED_SYSLOG_TAG # ZEVENT_EID # # Return # nothing # zed_log_err() { logger -p "${ZED_SYSLOG_PRIORITY}" -t "${ZED_SYSLOG_TAG}" -- "error:" \ - "$(basename -- "$0"):""${ZEVENT_EID:+" eid=${ZEVENT_EID}:"}" "$@" + "${0##*/}:""${ZEVENT_EID:+" eid=${ZEVENT_EID}:"}" "$@" } # zed_lock (lockfile, [fd]) # # Obtain an exclusive (write) lock on [lockfile]. If the lock cannot be # immediately acquired, wait until it becomes available. # # Every zed_lock() must be paired with a corresponding zed_unlock(). # # By default, flock-style locks associate the lockfile with file descriptor 8. # The bash manpage warns that file descriptors >9 should be used with care as # they may conflict with file descriptors used internally by the shell. File # descriptor 9 is reserved for zed_rate_limit(). If concurrent locks are held # within the same process, they must use different file descriptors (preferably # decrementing from 8); otherwise, obtaining a new lock with a given file # descriptor will release the previous lock associated with that descriptor. # # Arguments # lockfile: pathname of the lock file; the lock will be stored in # ZED_LOCKDIR unless the pathname contains a "/". # fd: integer for the file descriptor used by flock (OPTIONAL unless holding # concurrent locks) # # Globals # ZED_FLOCK_FD # ZED_LOCKDIR # # Return # nothing # zed_lock() { local lockfile="$1" local fd="${2:-${ZED_FLOCK_FD}}" local umask_bak local err [ -n "${lockfile}" ] || return if ! expr "${lockfile}" : '.*/' >/dev/null 2>&1; then lockfile="${ZED_LOCKDIR}/${lockfile}" fi umask_bak="$(umask)" umask 077 # Obtain a lock on the file bound to the given file descriptor. # eval "exec ${fd}>> '${lockfile}'" if ! err="$(flock --exclusive "${fd}" 2>&1)"; then zed_log_err "failed to lock \"${lockfile}\": ${err}" fi umask "${umask_bak}" } # zed_unlock (lockfile, [fd]) # # Release the lock on [lockfile]. # # Arguments # lockfile: pathname of the lock file # fd: integer for the file descriptor used by flock (must match the file # descriptor passed to the zed_lock function call) # # Globals # ZED_FLOCK_FD # ZED_LOCKDIR # # Return # nothing # zed_unlock() { local lockfile="$1" local fd="${2:-${ZED_FLOCK_FD}}" local err [ -n "${lockfile}" ] || return if ! expr "${lockfile}" : '.*/' >/dev/null 2>&1; then lockfile="${ZED_LOCKDIR}/${lockfile}" fi # Release the lock and close the file descriptor. if ! err="$(flock --unlock "${fd}" 2>&1)"; then zed_log_err "failed to unlock \"${lockfile}\": ${err}" fi eval "exec ${fd}>&-" } # zed_notify (subject, pathname) # # Send a notification via all available methods. # # Arguments # subject: notification subject # pathname: pathname containing the notification message (OPTIONAL) # # Return # 0: notification succeeded via at least one method # 1: notification failed # 2: no notification methods configured # zed_notify() { local subject="$1" local pathname="$2" local num_success=0 local num_failure=0 zed_notify_email "${subject}" "${pathname}"; rv=$? [ "${rv}" -eq 0 ] && num_success=$((num_success + 1)) [ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1)) zed_notify_pushbullet "${subject}" "${pathname}"; rv=$? [ "${rv}" -eq 0 ] && num_success=$((num_success + 1)) [ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1)) zed_notify_slack_webhook "${subject}" "${pathname}"; rv=$? [ "${rv}" -eq 0 ] && num_success=$((num_success + 1)) [ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1)) zed_notify_pushover "${subject}" "${pathname}"; rv=$? [ "${rv}" -eq 0 ] && num_success=$((num_success + 1)) [ "${rv}" -eq 1 ] && num_failure=$((num_failure + 1)) [ "${num_success}" -gt 0 ] && return 0 [ "${num_failure}" -gt 0 ] && return 1 return 2 } # zed_notify_email (subject, pathname) # # Send a notification via email to the address specified by ZED_EMAIL_ADDR. # # Requires the mail executable to be installed in the standard PATH, or # ZED_EMAIL_PROG to be defined with the pathname of an executable capable of # reading a message body from stdin. # # Command-line options to the mail executable can be specified in # ZED_EMAIL_OPTS. This undergoes the following keyword substitutions: # - @ADDRESS@ is replaced with the space-delimited recipient email address(es) # - @SUBJECT@ is replaced with the notification subject # # Arguments # subject: notification subject # pathname: pathname containing the notification message (OPTIONAL) # # Globals # ZED_EMAIL_PROG # ZED_EMAIL_OPTS # ZED_EMAIL_ADDR # # Return # 0: notification sent # 1: notification failed # 2: not configured # zed_notify_email() { local subject="$1" local pathname="${2:-"/dev/null"}" : "${ZED_EMAIL_PROG:="mail"}" : "${ZED_EMAIL_OPTS:="-s '@SUBJECT@' @ADDRESS@"}" # For backward compatibility with ZED_EMAIL. if [ -n "${ZED_EMAIL}" ] && [ -z "${ZED_EMAIL_ADDR}" ]; then ZED_EMAIL_ADDR="${ZED_EMAIL}" fi [ -n "${ZED_EMAIL_ADDR}" ] || return 2 zed_check_cmd "${ZED_EMAIL_PROG}" || return 1 [ -n "${subject}" ] || return 1 if [ ! -r "${pathname}" ]; then zed_log_err \ - "$(basename "${ZED_EMAIL_PROG}") cannot read \"${pathname}\"" + "${ZED_EMAIL_PROG##*/} cannot read \"${pathname}\"" return 1 fi ZED_EMAIL_OPTS="$(echo "${ZED_EMAIL_OPTS}" \ | sed -e "s/@ADDRESS@/${ZED_EMAIL_ADDR}/g" \ -e "s/@SUBJECT@/${subject}/g")" # shellcheck disable=SC2086 eval ${ZED_EMAIL_PROG} ${ZED_EMAIL_OPTS} < "${pathname}" >/dev/null 2>&1 rv=$? if [ "${rv}" -ne 0 ]; then - zed_log_err "$(basename "${ZED_EMAIL_PROG}") exit=${rv}" + zed_log_err "${ZED_EMAIL_PROG##*/} exit=${rv}" return 1 fi return 0 } # zed_notify_pushbullet (subject, pathname) # # Send a notification via Pushbullet . # The access token (ZED_PUSHBULLET_ACCESS_TOKEN) identifies this client to the # Pushbullet server. The optional channel tag (ZED_PUSHBULLET_CHANNEL_TAG) is # for pushing to notification feeds that can be subscribed to; if a channel is # not defined, push notifications will instead be sent to all devices # associated with the account specified by the access token. # # Requires awk, curl, and sed executables to be installed in the standard PATH. # # References # https://docs.pushbullet.com/ # https://www.pushbullet.com/security # # Arguments # subject: notification subject # pathname: pathname containing the notification message (OPTIONAL) # # Globals # ZED_PUSHBULLET_ACCESS_TOKEN # ZED_PUSHBULLET_CHANNEL_TAG # # Return # 0: notification sent # 1: notification failed # 2: not configured # zed_notify_pushbullet() { local subject="$1" local pathname="${2:-"/dev/null"}" local msg_body local msg_tag local msg_json local msg_out local msg_err local url="https://api.pushbullet.com/v2/pushes" [ -n "${ZED_PUSHBULLET_ACCESS_TOKEN}" ] || return 2 [ -n "${subject}" ] || return 1 if [ ! -r "${pathname}" ]; then zed_log_err "pushbullet cannot read \"${pathname}\"" return 1 fi zed_check_cmd "awk" "curl" "sed" || return 1 # Escape the following characters in the message body for JSON: # newline, backslash, double quote, horizontal tab, vertical tab, # and carriage return. # msg_body="$(awk '{ ORS="\\n" } { gsub(/\\/, "\\\\"); gsub(/"/, "\\\""); gsub(/\t/, "\\t"); gsub(/\f/, "\\f"); gsub(/\r/, "\\r"); print }' \ "${pathname}")" # Push to a channel if one is configured. # [ -n "${ZED_PUSHBULLET_CHANNEL_TAG}" ] && msg_tag="$(printf \ '"channel_tag": "%s", ' "${ZED_PUSHBULLET_CHANNEL_TAG}")" # Construct the JSON message for pushing a note. # msg_json="$(printf '{%s"type": "note", "title": "%s", "body": "%s"}' \ "${msg_tag}" "${subject}" "${msg_body}")" # Send the POST request and check for errors. # msg_out="$(curl -u "${ZED_PUSHBULLET_ACCESS_TOKEN}:" -X POST "${url}" \ --header "Content-Type: application/json" --data-binary "${msg_json}" \ 2>/dev/null)"; rv=$? if [ "${rv}" -ne 0 ]; then zed_log_err "curl exit=${rv}" return 1 fi msg_err="$(echo "${msg_out}" \ | sed -n -e 's/.*"error" *:.*"message" *: *"\([^"]*\)".*/\1/p')" if [ -n "${msg_err}" ]; then zed_log_err "pushbullet \"${msg_err}"\" return 1 fi return 0 } # zed_notify_slack_webhook (subject, pathname) # # Notification via Slack Webhook . # The Webhook URL (ZED_SLACK_WEBHOOK_URL) identifies this client to the # Slack channel. # # Requires awk, curl, and sed executables to be installed in the standard PATH. # # References # https://api.slack.com/incoming-webhooks # # Arguments # subject: notification subject # pathname: pathname containing the notification message (OPTIONAL) # # Globals # ZED_SLACK_WEBHOOK_URL # # Return # 0: notification sent # 1: notification failed # 2: not configured # zed_notify_slack_webhook() { [ -n "${ZED_SLACK_WEBHOOK_URL}" ] || return 2 local subject="$1" local pathname="${2:-"/dev/null"}" local msg_body local msg_tag local msg_json local msg_out local msg_err local url="${ZED_SLACK_WEBHOOK_URL}" [ -n "${subject}" ] || return 1 if [ ! -r "${pathname}" ]; then zed_log_err "slack webhook cannot read \"${pathname}\"" return 1 fi zed_check_cmd "awk" "curl" "sed" || return 1 # Escape the following characters in the message body for JSON: # newline, backslash, double quote, horizontal tab, vertical tab, # and carriage return. # msg_body="$(awk '{ ORS="\\n" } { gsub(/\\/, "\\\\"); gsub(/"/, "\\\""); gsub(/\t/, "\\t"); gsub(/\f/, "\\f"); gsub(/\r/, "\\r"); print }' \ "${pathname}")" # Construct the JSON message for posting. # msg_json="$(printf '{"text": "*%s*\\n%s"}' "${subject}" "${msg_body}" )" # Send the POST request and check for errors. # msg_out="$(curl -X POST "${url}" \ --header "Content-Type: application/json" --data-binary "${msg_json}" \ 2>/dev/null)"; rv=$? if [ "${rv}" -ne 0 ]; then zed_log_err "curl exit=${rv}" return 1 fi msg_err="$(echo "${msg_out}" \ | sed -n -e 's/.*"error" *:.*"message" *: *"\([^"]*\)".*/\1/p')" if [ -n "${msg_err}" ]; then zed_log_err "slack webhook \"${msg_err}"\" return 1 fi return 0 } # zed_notify_pushover (subject, pathname) # # Send a notification via Pushover . # The access token (ZED_PUSHOVER_TOKEN) identifies this client to the # Pushover server. The user token (ZED_PUSHOVER_USER) defines the user or # group to which the notification will be sent. # # Requires curl and sed executables to be installed in the standard PATH. # # References # https://pushover.net/api # # Arguments # subject: notification subject # pathname: pathname containing the notification message (OPTIONAL) # # Globals # ZED_PUSHOVER_TOKEN # ZED_PUSHOVER_USER # # Return # 0: notification sent # 1: notification failed # 2: not configured # zed_notify_pushover() { local subject="$1" local pathname="${2:-"/dev/null"}" local msg_body local msg_out local msg_err local url="https://api.pushover.net/1/messages.json" [ -n "${ZED_PUSHOVER_TOKEN}" ] && [ -n "${ZED_PUSHOVER_USER}" ] || return 2 if [ ! -r "${pathname}" ]; then zed_log_err "pushover cannot read \"${pathname}\"" return 1 fi zed_check_cmd "curl" "sed" || return 1 # Read the message body in. # msg_body="$(cat "${pathname}")" if [ -z "${msg_body}" ] then msg_body=$subject subject="" fi # Send the POST request and check for errors. # msg_out="$( \ curl \ --form-string "token=${ZED_PUSHOVER_TOKEN}" \ --form-string "user=${ZED_PUSHOVER_USER}" \ --form-string "message=${msg_body}" \ --form-string "title=${subject}" \ "${url}" \ 2>/dev/null \ )"; rv=$? if [ "${rv}" -ne 0 ]; then zed_log_err "curl exit=${rv}" return 1 fi msg_err="$(echo "${msg_out}" \ | sed -n -e 's/.*"errors" *:.*\[\(.*\)\].*/\1/p')" if [ -n "${msg_err}" ]; then zed_log_err "pushover \"${msg_err}"\" return 1 fi return 0 } # zed_rate_limit (tag, [interval]) # # Check whether an event of a given type [tag] has already occurred within the # last [interval] seconds. # # This function obtains a lock on the statefile using file descriptor 9. # # Arguments # tag: arbitrary string for grouping related events to rate-limit # interval: time interval in seconds (OPTIONAL) # # Globals # ZED_NOTIFY_INTERVAL_SECS # ZED_RUNDIR # # Return # 0 if the event should be processed # 1 if the event should be dropped # # State File Format # time;tag # zed_rate_limit() { local tag="$1" local interval="${2:-${ZED_NOTIFY_INTERVAL_SECS}}" local lockfile="zed.zedlet.state.lock" local lockfile_fd=9 local statefile="${ZED_RUNDIR}/zed.zedlet.state" local time_now local time_prev local umask_bak local rv=0 [ -n "${tag}" ] || return 0 zed_lock "${lockfile}" "${lockfile_fd}" time_now="$(date +%s)" time_prev="$(grep -E "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \ | tail -1 | cut -d\; -f1)" if [ -n "${time_prev}" ] \ && [ "$((time_now - time_prev))" -lt "${interval}" ]; then rv=1 else umask_bak="$(umask)" umask 077 grep -E -v "^[0-9]+;${tag}\$" "${statefile}" 2>/dev/null \ > "${statefile}.$$" echo "${time_now};${tag}" >> "${statefile}.$$" mv -f "${statefile}.$$" "${statefile}" umask "${umask_bak}" fi zed_unlock "${lockfile}" "${lockfile_fd}" return "${rv}" } # zed_guid_to_pool (guid) # # Convert a pool GUID into its pool name (like "tank") # Arguments # guid: pool GUID (decimal or hex) # # Return # Pool name # zed_guid_to_pool() { if [ -z "$1" ] ; then return fi guid="$(printf "%u" "$1")" $ZPOOL get -H -ovalue,name guid | awk '$1 == '"$guid"' {print $2; exit}' } # zed_exit_if_ignoring_this_event # # Exit the script if we should ignore this event, as determined by # $ZED_SYSLOG_SUBCLASS_INCLUDE and $ZED_SYSLOG_SUBCLASS_EXCLUDE in zed.rc. # This function assumes you've imported the normal zed variables. zed_exit_if_ignoring_this_event() { if [ -n "${ZED_SYSLOG_SUBCLASS_INCLUDE}" ]; then eval "case ${ZEVENT_SUBCLASS} in ${ZED_SYSLOG_SUBCLASS_INCLUDE});; *) exit 0;; esac" elif [ -n "${ZED_SYSLOG_SUBCLASS_EXCLUDE}" ]; then eval "case ${ZEVENT_SUBCLASS} in ${ZED_SYSLOG_SUBCLASS_EXCLUDE}) exit 0;; *);; esac" fi } diff --git a/cmd/zpool/zpool.d/dm-deps b/cmd/zpool/zpool.d/dm-deps index ee39514e4d92..42af6a8d63cd 100755 --- a/cmd/zpool/zpool.d/dm-deps +++ b/cmd/zpool/zpool.d/dm-deps @@ -1,29 +1,27 @@ #!/bin/sh # # Show device mapper dependent / underlying devices. This is useful for # looking up the /dev/sd* devices associated with a dm or multipath device. # if [ "$1" = "-h" ] ; then echo "Show device mapper dependent (underlying) devices." exit fi dev="$VDEV_PATH" # If the VDEV path is a symlink, resolve it to a real device if [ -L "$dev" ] ; then dev=$(readlink "$dev") fi -dev=$(basename "$dev") +dev="${dev##*/}" val="" if [ -d "/sys/class/block/$dev/slaves" ] ; then - # ls -C: output in columns, no newlines - val=$(ls -C "/sys/class/block/$dev/slaves") - - # ls -C will print two spaces between files; change to one space. - val=$(echo "$val" | sed -r 's/[[:blank:]]+/ /g') + # ls -C: output in columns, no newlines, two spaces (change to one) + # shellcheck disable=SC2012 + val=$(ls -C "/sys/class/block/$dev/slaves" | tr -s '[:space:]' ' ') fi echo "dm-deps=$val" diff --git a/cmd/zpool/zpool.d/iostat b/cmd/zpool/zpool.d/iostat index 41a3acfae7a4..19be475e9b27 100755 --- a/cmd/zpool/zpool.d/iostat +++ b/cmd/zpool/zpool.d/iostat @@ -1,77 +1,77 @@ #!/bin/sh # # Display most relevant iostat bandwidth/latency numbers. The output is # dependent on the name of the script/symlink used to call it. # helpstr=" iostat: Show iostat values since boot (summary page). iostat-1s: Do a single 1-second iostat sample and show values. iostat-10s: Do a single 10-second iostat sample and show values." -script=$(basename "$0") +script="${0##*/}" if [ "$1" = "-h" ] ; then echo "$helpstr" | grep "$script:" | tr -s '\t' | cut -f 2- exit fi if [ "$script" = "iostat-1s" ] ; then # Do a single one-second sample interval=1 # Don't show summary stats brief="yes" elif [ "$script" = "iostat-10s" ] ; then # Do a single ten-second sample interval=10 # Don't show summary stats brief="yes" fi if [ -f "$VDEV_UPATH" ] ; then # We're a file-based vdev, iostat doesn't work on us. Do nothing. exit fi if [ "$(uname)" = "FreeBSD" ]; then out=$(iostat -dKx \ ${interval:+"-w $interval"} \ ${interval:+"-c 1"} \ "$VDEV_UPATH" | tail -n 2) else out=$(iostat -kx \ ${brief:+"-y"} \ ${interval:+"$interval"} \ ${interval:+"1"} \ - "$VDEV_UPATH" | awk NF | tail -n 2) + "$VDEV_UPATH" | grep -v '^$' | tail -n 2) fi # Sample output (we want the last two lines): # # Linux 2.6.32-642.13.1.el6.x86_64 (centos68) 03/09/2017 _x86_64_ (6 CPU) # # avg-cpu: %user %nice %system %iowait %steal %idle # 0.00 0.00 0.00 0.00 0.00 100.00 # # Device: rrqm/s wrqm/s r/s w/s rkB/s wkB/s avgrq-sz avgqu-sz await r_await w_await svctm %util # sdb 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 0.00 # # Get the column names cols=$(echo "$out" | head -n 1) # Get the values and tab separate them to make them cut-able. -vals=$(echo "$out" | tail -n 1 | sed -r 's/[[:blank:]]+/\t/g') +vals=$(echo "$out" | tail -n 1 | tr -s '[:space:]' '\t') i=0 for col in $cols ; do i=$((i+1)) # Skip the first column since it's just the device name if [ $i -eq 1 ]; then continue fi # Get i'th value val=$(echo "$vals" | cut -f "$i") echo "$col=$val" done diff --git a/cmd/zpool/zpool.d/lsblk b/cmd/zpool/zpool.d/lsblk index 1cdef40494fe..919783a1c1bf 100755 --- a/cmd/zpool/zpool.d/lsblk +++ b/cmd/zpool/zpool.d/lsblk @@ -1,83 +1,83 @@ #!/bin/sh # # Print some common lsblk values # # Any (lowercased) name symlinked to the lsblk script will be passed to lsblk # as one of its --output names. Here's a partial list of --output names # from the lsblk binary: # # Available columns (for --output): # NAME device name # KNAME internal kernel device name # MAJ:MIN major:minor device number # FSTYPE filesystem type # MOUNTPOINT where the device is mounted # LABEL filesystem LABEL # UUID filesystem UUID # RA read-ahead of the device # RO read-only device # RM removable device # MODEL device identifier # SIZE size of the device # STATE state of the device # OWNER user name # GROUP group name # MODE device node permissions # ALIGNMENT alignment offset # MIN-IO minimum I/O size # OPT-IO optimal I/O size # PHY-SEC physical sector size # LOG-SEC logical sector size # ROTA rotational device # SCHED I/O scheduler name # RQ-SIZE request queue size # TYPE device type # DISC-ALN discard alignment offset # DISC-GRAN discard granularity # DISC-MAX discard max bytes # DISC-ZERO discard zeroes data # # If the script is run as just 'lsblk' then print out disk size, vendor, # and model number. helpstr=" label: Show filesystem label. model: Show disk model number. size: Show the disk capacity. vendor: Show the disk vendor. lsblk: Show the disk size, vendor, and model number." -script=$(basename "$0") +script="${0##*/}" if [ "$1" = "-h" ] ; then echo "$helpstr" | grep "$script:" | tr -s '\t' | cut -f 2- exit fi if [ "$script" = "lsblk" ] ; then list="size vendor model" else list=$(echo "$script" | tr '[:upper:]' '[:lower:]') fi # Older versions of lsblk don't support all these values (like SERIAL). for i in $list ; do # Special case: Looking up the size of a file-based vdev can't # be done with lsblk. if [ "$i" = "size" ] && [ -f "$VDEV_UPATH" ] ; then size=$(du -h --apparent-size "$VDEV_UPATH" | cut -f 1) echo "size=$size" continue fi val="" if val=$(eval "lsblk -dl -n -o $i $VDEV_UPATH 2>/dev/null") ; then # Remove leading/trailing whitespace from value val=$(echo "$val" | sed -e 's/^[[:space:]]*//' \ -e 's/[[:space:]]*$//') fi echo "$i=$val" done diff --git a/cmd/zpool/zpool.d/media b/cmd/zpool/zpool.d/media index 5683cdc3c023..660f78b743fc 100755 --- a/cmd/zpool/zpool.d/media +++ b/cmd/zpool/zpool.d/media @@ -1,34 +1,31 @@ #!/bin/sh # # Print out the type of device # if [ "$1" = "-h" ] ; then echo "Show whether a vdev is a file, hdd, ssd, or iscsi." exit fi if [ -b "$VDEV_UPATH" ]; then - device=$(basename "$VDEV_UPATH") - val=$(cat "/sys/block/$device/queue/rotational" 2>/dev/null) - if [ "$val" = "0" ]; then - MEDIA="ssd" - fi - - if [ "$val" = "1" ]; then - MEDIA="hdd" - fi + device="${VDEV_UPATH##*/}" + read -r val 2>/dev/null < "/sys/block/$device/queue/rotational" + case "$val" in + 0) MEDIA="ssd" ;; + 1) MEDIA="hdd" ;; + esac vpd_pg83="/sys/block/$device/device/vpd_pg83" if [ -f "$vpd_pg83" ]; then if grep -q --binary "iqn." "$vpd_pg83"; then MEDIA="iscsi" fi fi else if [ -f "$VDEV_UPATH" ]; then MEDIA="file" fi fi echo "media=$MEDIA" diff --git a/cmd/zpool/zpool.d/ses b/cmd/zpool/zpool.d/ses index b1836d676528..b51fe31894ab 100755 --- a/cmd/zpool/zpool.d/ses +++ b/cmd/zpool/zpool.d/ses @@ -1,58 +1,58 @@ #!/bin/sh # # Print SCSI Enclosure Services (SES) info. The output is dependent on the name # of the script/symlink used to call it. # helpstr=" enc: Show disk enclosure w:x:y:z value. slot: Show disk slot number as reported by the enclosure. encdev: Show /dev/sg* device associated with the enclosure disk slot. fault_led: Show value of the disk enclosure slot fault LED. locate_led: Show value of the disk enclosure slot locate LED. ses: Show disk's enc, enc device, slot, and fault/locate LED values." -script=$(basename "$0") +script="${0##*/}" if [ "$1" = "-h" ] ; then echo "$helpstr" | grep "$script:" | tr -s '\t' | cut -f 2- exit fi if [ "$script" = "ses" ] ; then scripts='enc encdev slot fault_led locate_led' else scripts="$script" fi for i in $scripts ; do if [ -z "$VDEV_ENC_SYSFS_PATH" ] ; then echo "$i=" continue fi val="" case $i in enc) val=$(ls "$VDEV_ENC_SYSFS_PATH/../../" 2>/dev/null) ;; slot) val=$(cat "$VDEV_ENC_SYSFS_PATH/slot" 2>/dev/null) ;; encdev) val=$(ls "$VDEV_ENC_SYSFS_PATH/../device/scsi_generic" 2>/dev/null) ;; fault_led) # JBODs fault LED is called 'fault', NVMe fault LED is called # 'attention'. if [ -f "$VDEV_ENC_SYSFS_PATH/fault" ] ; then val=$(cat "$VDEV_ENC_SYSFS_PATH/fault" 2>/dev/null) elif [ -f "$VDEV_ENC_SYSFS_PATH/attention" ] ; then val=$(cat "$VDEV_ENC_SYSFS_PATH/attention" 2>/dev/null) fi ;; locate_led) val=$(cat "$VDEV_ENC_SYSFS_PATH/locate" 2>/dev/null) ;; esac echo "$i=$val" done diff --git a/config/always-python.m4 b/config/always-python.m4 index 76b06fcd8488..5f47df424c27 100644 --- a/config/always-python.m4 +++ b/config/always-python.m4 @@ -1,70 +1,70 @@ dnl # dnl # The majority of the python scripts are written to be compatible dnl # with Python 2.6 and Python 3.4. Therefore, they may be installed dnl # and used with either interpreter. This option is intended to dnl # to provide a method to specify the default system version, and dnl # set the PYTHON environment variable accordingly. dnl # AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYTHON], [ AC_ARG_WITH([python], AS_HELP_STRING([--with-python[=VERSION]], [default system python version @<:@default=check@:>@]), [with_python=$withval], [with_python=check]) AS_CASE([$with_python], [check], [AC_CHECK_PROGS([PYTHON], [python3 python2], [:])], [2*], [PYTHON="python${with_python}"], [*python2*], [PYTHON="${with_python}"], [3*], [PYTHON="python${with_python}"], [*python3*], [PYTHON="${with_python}"], [no], [PYTHON=":"], [AC_MSG_ERROR([Unknown --with-python value '$with_python'])] ) dnl # dnl # Minimum supported Python versions for utilities: dnl # Python 2.6 or Python 3.4 dnl # AM_PATH_PYTHON([], [], [:]) AS_IF([test -z "$PYTHON_VERSION"], [ - PYTHON_VERSION=$(basename $PYTHON | tr -cd 0-9.) + PYTHON_VERSION=$(echo ${PYTHON##*/} | tr -cd 0-9.) ]) PYTHON_MINOR=${PYTHON_VERSION#*\.} AS_CASE([$PYTHON_VERSION], [2.*], [ AS_IF([test $PYTHON_MINOR -lt 6], [AC_MSG_ERROR("Python >= 2.6 is required")]) ], [3.*], [ AS_IF([test $PYTHON_MINOR -lt 4], [AC_MSG_ERROR("Python >= 3.4 is required")]) ], [:|2|3], [], [PYTHON_VERSION=3] ) AM_CONDITIONAL([USING_PYTHON], [test "$PYTHON" != :]) AM_CONDITIONAL([USING_PYTHON_2], [test "x${PYTHON_VERSION%%\.*}" = x2]) AM_CONDITIONAL([USING_PYTHON_3], [test "x${PYTHON_VERSION%%\.*}" = x3]) AM_COND_IF([USING_PYTHON_2], [AC_SUBST([PYTHON_SHEBANG], [python2])], [AC_SUBST([PYTHON_SHEBANG], [python3])]) dnl # dnl # Request that packages be built for a specific Python version. dnl # AS_IF([test "x$with_python" != xcheck], [ PYTHON_PKG_VERSION=$(echo $PYTHON_VERSION | tr -d .) DEFINE_PYTHON_PKG_VERSION='--define "__use_python_pkg_version '${PYTHON_PKG_VERSION}'"' DEFINE_PYTHON_VERSION='--define "__use_python '${PYTHON}'"' ], [ DEFINE_PYTHON_VERSION='' DEFINE_PYTHON_PKG_VERSION='' ]) AC_SUBST(DEFINE_PYTHON_VERSION) AC_SUBST(DEFINE_PYTHON_PKG_VERSION) ]) diff --git a/config/always-pyzfs.m4 b/config/always-pyzfs.m4 index fa39fd88519c..00e5d0e2cbbd 100644 --- a/config/always-pyzfs.m4 +++ b/config/always-pyzfs.m4 @@ -1,120 +1,120 @@ dnl # dnl # ZFS_AC_PYTHON_MODULE(module_name, [action-if-true], [action-if-false]) dnl # dnl # Checks for Python module. Freely inspired by AX_PYTHON_MODULE dnl # https://www.gnu.org/software/autoconf-archive/ax_python_module.html dnl # Required by ZFS_AC_CONFIG_ALWAYS_PYZFS. dnl # AC_DEFUN([ZFS_AC_PYTHON_MODULE], [ - PYTHON_NAME=$(basename $PYTHON) + PYTHON_NAME=${PYTHON##*/} AC_MSG_CHECKING([for $PYTHON_NAME module: $1]) AS_IF([$PYTHON -c "import $1" 2>/dev/null], [ AC_MSG_RESULT(yes) m4_ifvaln([$2], [$2]) ], [ AC_MSG_RESULT(no) m4_ifvaln([$3], [$3]) ]) ]) dnl # dnl # Determines if pyzfs can be built, requires Python 2.7 or later. dnl # AC_DEFUN([ZFS_AC_CONFIG_ALWAYS_PYZFS], [ AC_ARG_ENABLE([pyzfs], AS_HELP_STRING([--enable-pyzfs], [install libzfs_core python bindings @<:@default=check@:>@]), [enable_pyzfs=$enableval], [enable_pyzfs=check]) dnl # dnl # Packages for pyzfs specifically enabled/disabled. dnl # AS_IF([test "x$enable_pyzfs" != xcheck], [ AS_IF([test "x$enable_pyzfs" = xyes], [ DEFINE_PYZFS='--with pyzfs' ], [ DEFINE_PYZFS='--without pyzfs' ]) ], [ AS_IF([test "$PYTHON" != :], [ DEFINE_PYZFS='' ], [ enable_pyzfs=no DEFINE_PYZFS='--without pyzfs' ]) ]) AC_SUBST(DEFINE_PYZFS) dnl # dnl # Python "packaging" (or, failing that, "distlib") module is required to build and install pyzfs dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([packaging], [], [ ZFS_AC_PYTHON_MODULE([distlib], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_VERSION packaging and distlib modules are not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) ]) dnl # dnl # Require python-devel libraries dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ AS_CASE([$PYTHON_VERSION], [3.*], [PYTHON_REQUIRED_VERSION=">= '3.4.0'"], [2.*], [PYTHON_REQUIRED_VERSION=">= '2.7.0'"], [AC_MSG_ERROR("Python $PYTHON_VERSION unknown")] ) AX_PYTHON_DEVEL([$PYTHON_REQUIRED_VERSION], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_REQUIRED_VERSION development library is not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) dnl # dnl # Python "setuptools" module is required to build and install pyzfs dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([setuptools], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_VERSION setuptools is not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) dnl # dnl # Python "cffi" module is required to run pyzfs dnl # AS_IF([test "x$enable_pyzfs" = xcheck -o "x$enable_pyzfs" = xyes], [ ZFS_AC_PYTHON_MODULE([cffi], [], [ AS_IF([test "x$enable_pyzfs" = xyes], [ AC_MSG_ERROR("Python $PYTHON_VERSION cffi is not installed") ], [test "x$enable_pyzfs" != xno], [ enable_pyzfs=no ]) ]) ]) dnl # dnl # Set enable_pyzfs to 'yes' if every check passed dnl # AS_IF([test "x$enable_pyzfs" = xcheck], [enable_pyzfs=yes]) AM_CONDITIONAL([PYZFS_ENABLED], [test "x$enable_pyzfs" = xyes]) AC_SUBST([PYZFS_ENABLED], [$enable_pyzfs]) AC_SUBST(pythonsitedir, [$PYTHON_SITE_PKG]) AC_MSG_CHECKING([whether to enable pyzfs: ]) AC_MSG_RESULT($enable_pyzfs) ]) diff --git a/config/zfs-meta.m4 b/config/zfs-meta.m4 index 1c9d246124d1..20064a0fb595 100644 --- a/config/zfs-meta.m4 +++ b/config/zfs-meta.m4 @@ -1,207 +1,207 @@ dnl # dnl # DESCRIPTION: dnl # Read meta data from the META file. When building from a git repository dnl # the ZFS_META_RELEASE field will be overwritten if there is an annotated dnl # tag matching the form ZFS_META_NAME-ZFS_META_VERSION-*. This allows dnl # for working builds to be uniquely identified using the git commit hash. dnl # dnl # The META file format is as follows: dnl # ^[ ]*KEY:[ \t]+VALUE$ dnl # dnl # In other words: dnl # - KEY is separated from VALUE by a colon and one or more spaces/tabs. dnl # - KEY and VALUE are case sensitive. dnl # - Leading spaces are ignored. dnl # - First match wins for duplicate keys. dnl # dnl # A line can be commented out by preceding it with a '#' (or technically dnl # any non-space character since that will prevent the regex from dnl # matching). dnl # dnl # WARNING: dnl # Placing a colon followed by a space or tab (ie, ":[ \t]+") within the dnl # VALUE will prematurely terminate the string since that sequence is dnl # used as the awk field separator. dnl # dnl # KEYS: dnl # The following META keys are recognized: dnl # Name, Version, Release, Date, Author, LT_Current, LT_Revision, LT_Age dnl # dnl # Written by Chris Dunlap . dnl # Modified by Brian Behlendorf . dnl # AC_DEFUN([ZFS_AC_META], [ AH_BOTTOM([ #undef PACKAGE #undef PACKAGE_BUGREPORT #undef PACKAGE_NAME #undef PACKAGE_STRING #undef PACKAGE_TARNAME #undef PACKAGE_VERSION #undef STDC_HEADERS #undef VERSION]) AC_PROG_AWK AC_MSG_CHECKING([metadata]) META="$srcdir/META" _zfs_ac_meta_type="none" if test -f "$META"; then _zfs_ac_meta_type="META file" ZFS_META_NAME=_ZFS_AC_META_GETVAL([(Name|Project|Package)]); if test -n "$ZFS_META_NAME"; then AC_DEFINE_UNQUOTED([ZFS_META_NAME], ["$ZFS_META_NAME"], [Define the project name.] ) AC_SUBST([ZFS_META_NAME]) fi ZFS_META_VERSION=_ZFS_AC_META_GETVAL([Version]); if test -n "$ZFS_META_VERSION"; then AC_DEFINE_UNQUOTED([ZFS_META_VERSION], ["$ZFS_META_VERSION"], [Define the project version.]) AC_DEFINE_UNQUOTED([SPL_META_VERSION], [ZFS_META_VERSION], [Defined for legacy compatibility.]) AC_SUBST([ZFS_META_VERSION]) fi ZFS_META_RELEASE=_ZFS_AC_META_GETVAL([Release]); if test ! -f ".nogitrelease" && git rev-parse --git-dir > /dev/null 2>&1; then _match="${ZFS_META_NAME}-${ZFS_META_VERSION}" _alias=$(git describe --match=${_match} 2>/dev/null) - _release=$(echo ${_alias}|sed "s/${ZFS_META_NAME}//"|cut -f3- -d'-'|sed 's/-/_/g') + _release=$(echo ${_alias}|sed "s/${ZFS_META_NAME}//"|cut -f3- -d'-'|tr - _) if test -n "${_release}"; then ZFS_META_RELEASE=${_release} _zfs_ac_meta_type="git describe" else _match="${ZFS_META_NAME}-${ZFS_META_VERSION}-${ZFS_META_RELEASE}" _alias=$(git describe --match=${_match} 2>/dev/null) - _release=$(echo ${_alias}|sed 's/${ZFS_META_NAME}//'|cut -f3- -d'-'|sed 's/-/_/g') + _release=$(echo ${_alias}|sed 's/${ZFS_META_NAME}//'|cut -f3- -d'-'|tr - _) if test -n "${_release}"; then ZFS_META_RELEASE=${_release} _zfs_ac_meta_type="git describe" fi fi fi if test -n "$ZFS_META_RELEASE"; then AC_DEFINE_UNQUOTED([ZFS_META_RELEASE], ["$ZFS_META_RELEASE"], [Define the project release.]) AC_DEFINE_UNQUOTED([SPL_META_RELEASE], [ZFS_META_RELEASE], [Defined for legacy compatibility.]) AC_SUBST([ZFS_META_RELEASE]) RELEASE="$ZFS_META_RELEASE" AC_SUBST([RELEASE]) fi ZFS_META_LICENSE=_ZFS_AC_META_GETVAL([License]); if test -n "$ZFS_META_LICENSE"; then AC_DEFINE_UNQUOTED([ZFS_META_LICENSE], ["$ZFS_META_LICENSE"], [Define the project license.] ) AC_SUBST([ZFS_META_LICENSE]) fi if test -n "$ZFS_META_NAME" -a -n "$ZFS_META_VERSION"; then ZFS_META_ALIAS="$ZFS_META_NAME-$ZFS_META_VERSION" test -n "$ZFS_META_RELEASE" && ZFS_META_ALIAS="$ZFS_META_ALIAS-$ZFS_META_RELEASE" AC_DEFINE_UNQUOTED([ZFS_META_ALIAS], ["$ZFS_META_ALIAS"], [Define the project alias string.]) AC_DEFINE_UNQUOTED([SPL_META_ALIAS], [ZFS_META_ALIAS], [Defined for legacy compatibility.]) AC_SUBST([ZFS_META_ALIAS]) fi ZFS_META_DATA=_ZFS_AC_META_GETVAL([Date]); if test -n "$ZFS_META_DATA"; then AC_DEFINE_UNQUOTED([ZFS_META_DATA], ["$ZFS_META_DATA"], [Define the project release date.] ) AC_SUBST([ZFS_META_DATA]) fi ZFS_META_AUTHOR=_ZFS_AC_META_GETVAL([Author]); if test -n "$ZFS_META_AUTHOR"; then AC_DEFINE_UNQUOTED([ZFS_META_AUTHOR], ["$ZFS_META_AUTHOR"], [Define the project author.] ) AC_SUBST([ZFS_META_AUTHOR]) fi ZFS_META_KVER_MIN=_ZFS_AC_META_GETVAL([Linux-Minimum]); if test -n "$ZFS_META_KVER_MIN"; then AC_DEFINE_UNQUOTED([ZFS_META_KVER_MIN], ["$ZFS_META_KVER_MIN"], [Define the minimum compatible kernel version.] ) AC_SUBST([ZFS_META_KVER_MIN]) fi ZFS_META_KVER_MAX=_ZFS_AC_META_GETVAL([Linux-Maximum]); if test -n "$ZFS_META_KVER_MAX"; then AC_DEFINE_UNQUOTED([ZFS_META_KVER_MAX], ["$ZFS_META_KVER_MAX"], [Define the maximum compatible kernel version.] ) AC_SUBST([ZFS_META_KVER_MAX]) fi m4_pattern_allow([^LT_(CURRENT|REVISION|AGE)$]) ZFS_META_LT_CURRENT=_ZFS_AC_META_GETVAL([LT_Current]); ZFS_META_LT_REVISION=_ZFS_AC_META_GETVAL([LT_Revision]); ZFS_META_LT_AGE=_ZFS_AC_META_GETVAL([LT_Age]); if test -n "$ZFS_META_LT_CURRENT" \ -o -n "$ZFS_META_LT_REVISION" \ -o -n "$ZFS_META_LT_AGE"; then test -n "$ZFS_META_LT_CURRENT" || ZFS_META_LT_CURRENT="0" test -n "$ZFS_META_LT_REVISION" || ZFS_META_LT_REVISION="0" test -n "$ZFS_META_LT_AGE" || ZFS_META_LT_AGE="0" AC_DEFINE_UNQUOTED([ZFS_META_LT_CURRENT], ["$ZFS_META_LT_CURRENT"], [Define the libtool library 'current' version information.] ) AC_DEFINE_UNQUOTED([ZFS_META_LT_REVISION], ["$ZFS_META_LT_REVISION"], [Define the libtool library 'revision' version information.] ) AC_DEFINE_UNQUOTED([ZFS_META_LT_AGE], ["$ZFS_META_LT_AGE"], [Define the libtool library 'age' version information.] ) AC_SUBST([ZFS_META_LT_CURRENT]) AC_SUBST([ZFS_META_LT_REVISION]) AC_SUBST([ZFS_META_LT_AGE]) fi fi AC_MSG_RESULT([$_zfs_ac_meta_type]) ] ) dnl # _ZFS_AC_META_GETVAL (KEY_NAME_OR_REGEX) dnl # dnl # Returns the META VALUE associated with the given KEY_NAME_OR_REGEX expr. dnl # dnl # Despite their resemblance to line noise, dnl # the "@<:@" and "@:>@" constructs are quadrigraphs for "[" and "]". dnl # dnl # dnl # The "$[]1" and "$[]2" constructs prevent M4 parameter expansion dnl # so a literal $1 and $2 will be passed to the resulting awk script, dnl # whereas the "$1" will undergo M4 parameter expansion for the META key. dnl # AC_DEFUN([_ZFS_AC_META_GETVAL], [`$AWK -F ':@<:@ \t@:>@+' '$[]1 ~ /^ *$1$/ { print $[]2; exit }' $META`]dnl ) diff --git a/contrib/bpftrace/zfs-trace.sh b/contrib/bpftrace/zfs-trace.sh index 54f66f3ba3fd..0165335c474b 100755 --- a/contrib/bpftrace/zfs-trace.sh +++ b/contrib/bpftrace/zfs-trace.sh @@ -1,10 +1,11 @@ #!/bin/sh -ZVER=$(cut -f 1 -d '-' /sys/module/zfs/version) +read -r ZVER < /sys/module/zfs/version +ZVER="${ZVER%%-*}" KVER=$(uname -r) exec bpftrace \ --include "/usr/src/zfs-$ZVER/$KVER/zfs_config.h" \ -I "/usr/src/zfs-$ZVER/include" \ -I "/usr/src/zfs-$ZVER/include/spl" \ "$@" diff --git a/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in b/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in index d21ab74cc0d0..a161fbf6f113 100755 --- a/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in +++ b/contrib/dracut/02zfsexpandknowledge/module-setup.sh.in @@ -1,126 +1,122 @@ #!/usr/bin/env bash get_devtype() { local typ - typ=$(udevadm info --query=property --name="$1" | grep "^ID_FS_TYPE=" | sed 's|^ID_FS_TYPE=||') - if [ "$typ" = "" ] ; then + typ=$(udevadm info --query=property --name="$1" | sed -n 's|^ID_FS_TYPE=||p') + if [ -z "$typ" ] ; then typ=$(blkid -c /dev/null "$1" -o value -s TYPE) fi echo "$typ" } get_pool_devices() { # also present in 99zfssystemd local poolconfigtemp local poolconfigoutput local pooldev local resolved poolconfigtemp="$(mktemp)" if ! @sbindir@/zpool list -v -H -P "$1" > "$poolconfigtemp" 2>&1 ; then poolconfigoutput="$(cat "$poolconfigtemp")" dinfo "zfsexpandknowledge: pool $1 cannot be listed: $poolconfigoutput" else awk -F '\t' '/\t\/dev/ { print $2 }' "$poolconfigtemp" | \ while read -r pooldev ; do if [ -e "$pooldev" ] ; then resolved="$(readlink -f "$pooldev")" dinfo "zfsexpandknowledge: pool $1 has device $pooldev (which resolves to $resolved)" echo "$resolved" fi done fi rm -f "$poolconfigtemp" } find_zfs_block_devices() { local dev local mp local fstype - local pool local _ numfields="$(awk '{print NF; exit}' /proc/self/mountinfo)" if [ "$numfields" = "10" ] ; then fields="_ _ _ _ mp _ _ fstype dev _" else fields="_ _ _ _ mp _ _ _ fstype dev _" fi # shellcheck disable=SC2086 while read -r ${fields?} ; do [ "$fstype" = "zfs" ] || continue - if [ "$mp" = "$1" ]; then - pool=$(echo "$dev" | cut -d / -f 1) - get_pool_devices "$pool" - fi + [ "$mp" = "$1" ] && get_pool_devices "${dev%%/*}" done < /proc/self/mountinfo } array_contains () { local e for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done return 1 } check() { local mp local dev local blockdevs local fstype local majmin local _depdev local _depdevname local _depdevtype # shellcheck disable=SC2154 if [ -n "$hostonly" ]; then for mp in \ "/" \ "/etc" \ "/bin" \ "/sbin" \ "/lib" \ "/lib64" \ "/usr" \ "/usr/bin" \ "/usr/sbin" \ "/usr/lib" \ "/usr/lib64" \ "/boot"; do mp=$(readlink -f "$mp") mountpoint "$mp" >/dev/null 2>&1 || continue blockdevs=$(find_zfs_block_devices "$mp") if [ -z "$blockdevs" ] ; then continue ; fi dinfo "zfsexpandknowledge: block devices backing ZFS dataset $mp: ${blockdevs//$'\n'/ }" for dev in $blockdevs do array_contains "$dev" "${host_devs[@]}" || host_devs+=("$dev") fstype=$(get_devtype "$dev") host_fs_types["$dev"]="$fstype" majmin=$(get_maj_min "$dev") if [ -d "/sys/dev/block/$majmin/slaves" ] ; then for _depdev in "/sys/dev/block/$majmin/slaves"/*; do - [[ -f $_depdev/dev ]] || continue - _depdev=/dev/$(basename "$_depdev") - _depdevname=$(udevadm info --query=property --name="$_depdev" | grep "^DEVNAME=" | sed 's|^DEVNAME=||') + [ -f "$_depdev/dev" ] || continue + _depdev="/dev/${_depdev##*/}" + _depdevname=$(udevadm info --query=property --name="$_depdev" | sed -n 's|^DEVNAME=||p') _depdevtype=$(get_devtype "$_depdevname") dinfo "zfsexpandknowledge: underlying block device backing ZFS dataset $mp: ${_depdevname//$'\n'/ }" array_contains "$_depdevname" "${host_devs[@]}" || host_devs+=("$_depdevname") host_fs_types["$_depdevname"]="$_depdevtype" done fi done done for a in "${host_devs[@]}" do dinfo "zfsexpandknowledge: host device $a" done for a in "${!host_fs_types[@]}" do dinfo "zfsexpandknowledge: device $a of type ${host_fs_types[$a]}" done fi return 1 } diff --git a/contrib/dracut/90zfs/module-setup.sh.in b/contrib/dracut/90zfs/module-setup.sh.in index 8c2951dd3523..bee53522772f 100755 --- a/contrib/dracut/90zfs/module-setup.sh.in +++ b/contrib/dracut/90zfs/module-setup.sh.in @@ -1,142 +1,142 @@ #!/usr/bin/env bash # shellcheck disable=SC2154 check() { # We depend on udev-rules being loaded [ "${1}" = "-d" ] && return 0 # Verify the zfs tool chain for tool in "@sbindir@/zgenhostid" "@sbindir@/zpool" "@sbindir@/zfs" "@mounthelperdir@/mount.zfs" ; do test -x "$tool" || return 1 done return 0 } depends() { echo udev-rules return 0 } installkernel() { instmods zfs instmods zcommon instmods znvpair instmods zavl instmods zunicode instmods zlua instmods icp instmods spl instmods zlib_deflate instmods zlib_inflate } install() { inst_rules @udevruledir@/90-zfs.rules inst_rules @udevruledir@/69-vdev.rules inst_rules @udevruledir@/60-zvol.rules dracut_install hostid dracut_install grep dracut_install @sbindir@/zgenhostid dracut_install @sbindir@/zfs dracut_install @sbindir@/zpool # Workaround for https://github.com/openzfs/zfs/issues/4749 by # ensuring libgcc_s.so(.1) is included if ldd @sbindir@/zpool | grep -qF 'libgcc_s.so'; then # Dracut will have already tracked and included it :; elif command -v gcc-config >/dev/null 2>&1; then # On systems with gcc-config (Gentoo, Funtoo, etc.): # Use the current profile to resolve the appropriate path s="$(gcc-config -c)" dracut_install "/usr/lib/gcc/${s%-*}/${s##*-}/libgcc_s.so"* elif [ "$(echo /usr/lib/libgcc_s.so*)" != "/usr/lib/libgcc_s.so*" ]; then # Try a simple path first dracut_install /usr/lib/libgcc_s.so* elif [ "$(echo /lib*/libgcc_s.so*)" != "/lib*/libgcc_s.so*" ]; then # SUSE dracut_install /lib*/libgcc_s.so* else # Fallback: Guess the path and include all matches dracut_install /usr/lib*/gcc/**/libgcc_s.so* fi if [ @LIBFETCH_DYNAMIC@ != 0 ]; then for d in $libdirs; do [ -e "$d"/@LIBFETCH_SONAME@ ] && dracut_install "$d"/@LIBFETCH_SONAME@ done fi dracut_install @mounthelperdir@/mount.zfs dracut_install @udevdir@/vdev_id dracut_install awk - dracut_install basename dracut_install cut + dracut_install tr dracut_install head dracut_install @udevdir@/zvol_id inst_hook cmdline 95 "${moddir}/parse-zfs.sh" if [ -n "$systemdutildir" ] ; then inst_script "${moddir}/zfs-generator.sh" "$systemdutildir"/system-generators/dracut-zfs-generator fi inst_hook pre-mount 90 "${moddir}/zfs-load-key.sh" inst_hook mount 98 "${moddir}/mount-zfs.sh" inst_hook cleanup 99 "${moddir}/zfs-needshutdown.sh" inst_hook shutdown 20 "${moddir}/export-zfs.sh" inst_simple "${moddir}/zfs-lib.sh" "/lib/dracut-zfs-lib.sh" if [ -e @sysconfdir@/zfs/zpool.cache ]; then inst @sysconfdir@/zfs/zpool.cache type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/zfs/zpool.cache fi if [ -e @sysconfdir@/zfs/vdev_id.conf ]; then inst @sysconfdir@/zfs/vdev_id.conf type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/zfs/vdev_id.conf fi # Synchronize initramfs and system hostid if [ -f @sysconfdir@/hostid ]; then inst @sysconfdir@/hostid type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/hostid elif HOSTID="$(hostid 2>/dev/null)" && [ "${HOSTID}" != "00000000" ]; then zgenhostid -o "${initdir}@sysconfdir@/hostid" "${HOSTID}" type mark_hostonly >/dev/null 2>&1 && mark_hostonly @sysconfdir@/hostid fi if dracut_module_included "systemd"; then mkdir -p "${initdir}/$systemdsystemunitdir/zfs-import.target.wants" for _service in "zfs-import-scan.service" "zfs-import-cache.service" ; do dracut_install "@systemdunitdir@/$_service" if ! [ -L "${initdir}/$systemdsystemunitdir/zfs-import.target.wants/$_service" ]; then ln -sf ../$_service "${initdir}/$systemdsystemunitdir/zfs-import.target.wants/$_service" type mark_hostonly >/dev/null 2>&1 && mark_hostonly "@systemdunitdir@/$_service" fi done inst "${moddir}"/zfs-env-bootfs.service "${systemdsystemunitdir}"/zfs-env-bootfs.service ln -s ../zfs-env-bootfs.service "${initdir}/${systemdsystemunitdir}/zfs-import.target.wants"/zfs-env-bootfs.service type mark_hostonly >/dev/null 2>&1 && mark_hostonly @systemdunitdir@/zfs-env-bootfs.service dracut_install systemd-ask-password dracut_install systemd-tty-ask-password-agent mkdir -p "${initdir}/$systemdsystemunitdir/initrd.target.wants" dracut_install @systemdunitdir@/zfs-import.target if ! [ -L "${initdir}/$systemdsystemunitdir/initrd.target.wants"/zfs-import.target ]; then ln -s ../zfs-import.target "${initdir}/$systemdsystemunitdir/initrd.target.wants"/zfs-import.target type mark_hostonly >/dev/null 2>&1 && mark_hostonly @systemdunitdir@/zfs-import.target fi for _service in zfs-snapshot-bootfs.service zfs-rollback-bootfs.service ; do inst "${moddir}/$_service" "${systemdsystemunitdir}/$_service" if ! [ -L "${initdir}/$systemdsystemunitdir/initrd.target.wants/$_service" ]; then ln -s "../$_service" "${initdir}/$systemdsystemunitdir/initrd.target.wants/$_service" fi done # There isn't a pkg-config variable for this, # and dracut doesn't automatically resolve anything this'd be next to local systemdsystemenvironmentgeneratordir systemdsystemenvironmentgeneratordir="$(pkg-config --variable=prefix systemd || echo "/usr")/lib/systemd/system-environment-generators" mkdir -p "${initdir}/${systemdsystemenvironmentgeneratordir}" inst "${moddir}"/import-opts-generator.sh "${systemdsystemenvironmentgeneratordir}"/zfs-import-opts.sh fi } diff --git a/contrib/dracut/90zfs/parse-zfs.sh.in b/contrib/dracut/90zfs/parse-zfs.sh.in index fe786a880699..0f92f5c80cce 100755 --- a/contrib/dracut/90zfs/parse-zfs.sh.in +++ b/contrib/dracut/90zfs/parse-zfs.sh.in @@ -1,63 +1,63 @@ #!/bin/sh # shellcheck disable=SC2034,SC2154 . /lib/dracut-lib.sh # Let the command line override our host id. spl_hostid=$(getarg spl_hostid=) if [ -n "${spl_hostid}" ] ; then info "ZFS: Using hostid from command line: ${spl_hostid}" zgenhostid -f "${spl_hostid}" elif [ -f "/etc/hostid" ] ; then info "ZFS: Using hostid from /etc/hostid: $(hostid)" else warn "ZFS: No hostid found on kernel command line or /etc/hostid." warn "ZFS: Pools may not import correctly." fi wait_for_zfs=0 case "${root}" in ""|zfs|zfs:) # We'll take root unset, root=zfs, or root=zfs: # No root set, so we want to read the bootfs attribute. We # can't do that until udev settles so we'll set dummy values # and hope for the best later on. root="zfs:AUTO" rootok=1 wait_for_zfs=1 info "ZFS: Enabling autodetection of bootfs after udev settles." ;; ZFS=*|zfs:*|FILESYSTEM=*) # root is explicit ZFS root. Parse it now. We can handle # a root=... param in any of the following formats: # root=ZFS=rpool/ROOT # root=zfs:rpool/ROOT # root=zfs:FILESYSTEM=rpool/ROOT # root=FILESYSTEM=rpool/ROOT # root=ZFS=pool+with+space/ROOT+WITH+SPACE (translates to root=ZFS=pool with space/ROOT WITH SPACE) # Strip down to just the pool/fs root="${root#zfs:}" root="${root#FILESYSTEM=}" root="zfs:${root#ZFS=}" # switch + with spaces because kernel cmdline does not allow us to quote parameters - root=$(printf '%s\n' "$root" | sed "s/+/ /g") + root=$(echo "$root" | tr '+' ' ') rootok=1 wait_for_zfs=1 info "ZFS: Set ${root} as bootfs." ;; esac # Make sure Dracut is happy that we have a root and will wait for ZFS # modules to settle before mounting. if [ ${wait_for_zfs} -eq 1 ]; then ln -s /dev/null /dev/root 2>/dev/null initqueuedir="${hookdir}/initqueue/finished" test -d "${initqueuedir}" || { initqueuedir="${hookdir}/initqueue-finished" } echo '[ -e /dev/zfs ]' > "${initqueuedir}/zfs.sh" fi diff --git a/contrib/dracut/90zfs/zfs-generator.sh.in b/contrib/dracut/90zfs/zfs-generator.sh.in index b57c64c688b1..e50b9530c4f0 100755 --- a/contrib/dracut/90zfs/zfs-generator.sh.in +++ b/contrib/dracut/90zfs/zfs-generator.sh.in @@ -1,119 +1,119 @@ #!/bin/sh # shellcheck disable=SC2016,SC1004 grep -wq debug /proc/cmdline && debug=1 [ -n "$debug" ] && echo "zfs-generator: starting" >> /dev/kmsg GENERATOR_DIR="$1" [ -n "$GENERATOR_DIR" ] || { echo "zfs-generator: no generator directory specified, exiting" >> /dev/kmsg exit 1 } [ -f /lib/dracut-lib.sh ] && dracutlib=/lib/dracut-lib.sh [ -f /usr/lib/dracut/modules.d/99base/dracut-lib.sh ] && dracutlib=/usr/lib/dracut/modules.d/99base/dracut-lib.sh command -v getarg >/dev/null 2>&1 || { [ -n "$debug" ] && echo "zfs-generator: loading Dracut library from $dracutlib" >> /dev/kmsg . "$dracutlib" } . /lib/dracut-zfs-lib.sh [ -z "$root" ] && root=$(getarg root=) [ -z "$rootfstype" ] && rootfstype=$(getarg rootfstype=) [ -z "$rootflags" ] && rootflags=$(getarg rootflags=) # If root is not ZFS= or zfs: or rootfstype is not zfs # then we are not supposed to handle it. [ "${root##zfs:}" = "${root}" ] && [ "${root##ZFS=}" = "${root}" ] && [ "$rootfstype" != "zfs" ] && exit 0 case ",${rootflags}," in *,zfsutil,*) ;; ,,) rootflags=zfsutil ;; *) rootflags="zfsutil,${rootflags}" ;; esac if [ "${root}" != "zfs:AUTO" ]; then root="${root##zfs:}" root="${root##ZFS=}" fi [ -n "$debug" ] && echo "zfs-generator: writing extension for sysroot.mount to $GENERATOR_DIR/sysroot.mount.d/zfs-enhancement.conf" >> /dev/kmsg mkdir -p "$GENERATOR_DIR"/sysroot.mount.d "$GENERATOR_DIR"/initrd-root-fs.target.requires "$GENERATOR_DIR"/dracut-pre-mount.service.d { echo "[Unit]" echo "Before=initrd-root-fs.target" echo "After=zfs-import.target" echo echo "[Mount]" if [ "${root}" = "zfs:AUTO" ]; then echo "PassEnvironment=BOOTFS" echo 'What=${BOOTFS}' else echo "What=${root}" fi echo "Type=zfs" echo "Options=${rootflags}" } > "$GENERATOR_DIR"/sysroot.mount.d/zfs-enhancement.conf ln -fs ../sysroot.mount "$GENERATOR_DIR"/initrd-root-fs.target.requires/sysroot.mount if [ "${root}" = "zfs:AUTO" ]; then { echo "[Unit]" echo "Before=initrd-root-fs.target" echo "After=sysroot.mount" echo "DefaultDependencies=no" echo echo "[Service]" echo "Type=oneshot" echo "PassEnvironment=BOOTFS" echo "ExecStart=/bin/sh -c '" ' \ . /lib/dracut-zfs-lib.sh; \ _zfs_nonroot_necessities_cb() { \ zfs mount | grep -m1 -q "^$1 " && return 0; \ echo "Mounting $1 on /sysroot$2"; \ mount -o zfsutil -t zfs "$1" "/sysroot$2"; \ }; \ for_relevant_root_children "${BOOTFS}" _zfs_nonroot_necessities_cb;' \ "'" } > "$GENERATOR_DIR"/zfs-nonroot-necessities.service ln -fs ../zfs-nonroot-necessities.service "$GENERATOR_DIR"/initrd-root-fs.target.requires/zfs-nonroot-necessities.service else # We can solve this statically at generation time, so do! _zfs_generator_cb() { dset="${1}" mpnt="${2}" - unit="sysroot$(echo "$mpnt" | sed 's;/;-;g').mount" + unit="sysroot$(echo "$mpnt" | tr '/' '-').mount" { echo "[Unit]" echo "Before=initrd-root-fs.target" echo "After=sysroot.mount" echo echo "[Mount]" echo "Where=/sysroot${mpnt}" echo "What=${dset}" echo "Type=zfs" echo "Options=zfsutil" } > "$GENERATOR_DIR/${unit}" ln -fs ../"${unit}" "$GENERATOR_DIR"/initrd-root-fs.target.requires/"${unit}" } for_relevant_root_children "${root}" _zfs_generator_cb fi { echo "[Unit]" echo "After=zfs-import.target" } > "$GENERATOR_DIR"/dracut-pre-mount.service.d/zfs-enhancement.conf [ -n "$debug" ] && echo "zfs-generator: finished" >> /dev/kmsg exit 0 diff --git a/contrib/initramfs/scripts/zfs b/contrib/initramfs/scripts/zfs index 6e39f139d845..814547b6fa0c 100644 --- a/contrib/initramfs/scripts/zfs +++ b/contrib/initramfs/scripts/zfs @@ -1,994 +1,993 @@ # ZFS boot stub for initramfs-tools. # # In the initramfs environment, the /init script sources this stub to # override the default functions in the /scripts/local script. # # Enable this by passing boot=zfs on the kernel command line. # # $quiet, $root, $rpool, $bootfs come from the cmdline: # shellcheck disable=SC2154 # Source the common functions . /etc/zfs/zfs-functions # Start interactive shell. # Use debian's panic() if defined, because it allows to prevent shell access # by setting panic in cmdline (e.g. panic=0 or panic=15). # See "4.5 Disable root prompt on the initramfs" of Securing Debian Manual: # https://www.debian.org/doc/manuals/securing-debian-howto/ch4.en.html shell() { if command -v panic > /dev/null 2>&1; then panic else /bin/sh fi } # This runs any scripts that should run before we start importing # pools and mounting any filesystems. pre_mountroot() { if command -v run_scripts > /dev/null 2>&1 then if [ -f "/scripts/local-top" ] || [ -d "/scripts/local-top" ] then [ "$quiet" != "y" ] && \ zfs_log_begin_msg "Running /scripts/local-top" run_scripts /scripts/local-top [ "$quiet" != "y" ] && zfs_log_end_msg fi if [ -f "/scripts/local-premount" ] || [ -d "/scripts/local-premount" ] then [ "$quiet" != "y" ] && \ zfs_log_begin_msg "Running /scripts/local-premount" run_scripts /scripts/local-premount [ "$quiet" != "y" ] && zfs_log_end_msg fi fi } # If plymouth is available, hide the splash image. disable_plymouth() { if [ -x /bin/plymouth ] && /bin/plymouth --ping then /bin/plymouth hide-splash >/dev/null 2>&1 fi } # Get a ZFS filesystem property value. get_fs_value() { fs="$1" value=$2 "${ZFS}" get -H -ovalue "$value" "$fs" 2> /dev/null } # Find the 'bootfs' property on pool $1. # If the property does not contain '/', then ignore this # pool by exporting it again. find_rootfs() { pool="$1" # If 'POOL_IMPORTED' isn't set, no pool imported and therefore # we won't be able to find a root fs. [ -z "${POOL_IMPORTED}" ] && return 1 # If it's already specified, just keep it mounted and exit # User (kernel command line) must be correct. [ -n "${ZFS_BOOTFS}" ] && return 0 # Not set, try to find it in the 'bootfs' property of the pool. # NOTE: zpool does not support 'get -H -ovalue bootfs'... ZFS_BOOTFS=$("${ZPOOL}" list -H -obootfs "$pool") # Make sure it's not '-' and that it starts with /. if [ "${ZFS_BOOTFS}" != "-" ] && \ get_fs_value "${ZFS_BOOTFS}" mountpoint | grep -q '^/$' then # Keep it mounted POOL_IMPORTED=1 return 0 fi # Not boot fs here, export it and later try again.. "${ZPOOL}" export "$pool" POOL_IMPORTED= ZFS_BOOTFS= return 1 } # Support function to get a list of all pools, separated with ';' find_pools() { pools=$("$@" 2> /dev/null | \ - grep -E "pool:|^[a-zA-Z0-9]" | \ - sed 's@.*: @@' | \ + sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \ tr '\n' ';') echo "${pools%%;}" # Return without the last ';'. } # Get a list of all available pools get_pools() { if [ -n "${ZFS_POOL_IMPORT}" ]; then echo "$ZFS_POOL_IMPORT" return 0 fi # Get the base list of available pools. available_pools=$(find_pools "$ZPOOL" import) # Just in case - seen it happen (that a pool isn't visible/found # with a simple "zpool import" but only when using the "-d" # option or setting ZPOOL_IMPORT_PATH). if [ -d "/dev/disk/by-id" ] then npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id) if [ -n "$npools" ] then # Because we have found extra pool(s) here, which wasn't # found 'normally', we need to force USE_DISK_BY_ID to # make sure we're able to actually import it/them later. USE_DISK_BY_ID='yes' if [ -n "$available_pools" ] then # Filter out duplicates (pools found with the simple # "zpool import" but which is also found with the # "zpool import -d ..."). npools=$(echo "$npools" | sed "s,$available_pools,,") # Add the list to the existing list of # available pools available_pools="$available_pools;$npools" else available_pools="$npools" fi fi fi # Filter out any exceptions... if [ -n "$ZFS_POOL_EXCEPTIONS" ] then found="" apools="" OLD_IFS="$IFS" ; IFS=";" for pool in $available_pools do for exception in $ZFS_POOL_EXCEPTIONS do [ "$pool" = "$exception" ] && continue 2 found="$pool" done if [ -n "$found" ] then if [ -n "$apools" ] then apools="$apools;$pool" else apools="$pool" fi fi done IFS="$OLD_IFS" available_pools="$apools" fi # Return list of available pools. echo "$available_pools" } # Import given pool $1 import_pool() { pool="$1" # Verify that the pool isn't already imported # Make as sure as we can to not require '-f' to import. "${ZPOOL}" get name,guid -o value -H 2>/dev/null | grep -Fxq "$pool" && return 0 # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set # to something we can use later with the real import(s). We want to # make sure we find all by* dirs, BUT by-vdev should be first (if it # exists). if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ] then dirs="$(for dir in /dev/disk/by-* do # Ignore by-vdev here - we want it first! echo "$dir" | grep -q /by-vdev && continue [ ! -d "$dir" ] && continue printf "%s" "$dir:" done | sed 's,:$,,g')" if [ -d "/dev/disk/by-vdev" ] then # Add by-vdev at the beginning. ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:" fi # ... and /dev at the very end, just for good measure. ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev" fi # Needs to be exported for "zpool" to catch it. [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH [ "$quiet" != "y" ] && zfs_log_begin_msg \ "Importing pool '${pool}' using defaults" ZFS_CMD="${ZPOOL} import -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}" ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)" ZFS_ERROR="$?" if [ "${ZFS_ERROR}" != 0 ] then [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" if [ -f "${ZPOOL_CACHE}" ] then [ "$quiet" != "y" ] && zfs_log_begin_msg \ "Importing pool '${pool}' using cachefile." ZFS_CMD="${ZPOOL} import -c ${ZPOOL_CACHE} -N ${ZPOOL_FORCE} ${ZPOOL_IMPORT_OPTS}" ZFS_STDERR="$($ZFS_CMD "$pool" 2>&1)" ZFS_ERROR="$?" fi if [ "${ZFS_ERROR}" != 0 ] then [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" disable_plymouth echo "" echo "Command: ${ZFS_CMD} '$pool'" echo "Message: $ZFS_STDERR" echo "Error: $ZFS_ERROR" echo "" echo "Failed to import pool '$pool'." echo "Manually import the pool and exit." shell fi fi [ "$quiet" != "y" ] && zfs_log_end_msg POOL_IMPORTED=1 return 0 } # Load ZFS modules # Loading a module in a initrd require a slightly different approach, # with more logging etc. load_module_initrd() { [ -n "$ROOTDELAY" ] && ZFS_INITRD_PRE_MOUNTROOT_SLEEP="$ROOTDELAY" if [ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" -gt 0 ] 2>/dev/null then if [ "$quiet" != "y" ]; then zfs_log_begin_msg "Sleeping for" \ "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP seconds..." fi sleep "$ZFS_INITRD_PRE_MOUNTROOT_SLEEP" [ "$quiet" != "y" ] && zfs_log_end_msg fi # Wait for all of the /dev/{hd,sd}[a-z] device nodes to appear. if command -v wait_for_udev > /dev/null 2>&1 ; then wait_for_udev 10 elif command -v wait_for_dev > /dev/null 2>&1 ; then wait_for_dev fi # zpool import refuse to import without a valid /proc/self/mounts [ ! -f /proc/self/mounts ] && mount proc /proc # Load the module load_module "zfs" || return 1 if [ "$ZFS_INITRD_POST_MODPROBE_SLEEP" -gt 0 ] 2>/dev/null then if [ "$quiet" != "y" ]; then zfs_log_begin_msg "Sleeping for" \ "$ZFS_INITRD_POST_MODPROBE_SLEEP seconds..." fi sleep "$ZFS_INITRD_POST_MODPROBE_SLEEP" [ "$quiet" != "y" ] && zfs_log_end_msg fi return 0 } # Mount a given filesystem mount_fs() { fs="$1" # Check that the filesystem exists "${ZFS}" list -oname -tfilesystem -H "${fs}" > /dev/null 2>&1 || return 1 # Skip filesystems with canmount=off. The root fs should not have # canmount=off, but ignore it for backwards compatibility just in case. if [ "$fs" != "${ZFS_BOOTFS}" ] then canmount=$(get_fs_value "$fs" canmount) [ "$canmount" = "off" ] && return 0 fi # Need the _original_ datasets mountpoint! mountpoint=$(get_fs_value "$fs" mountpoint) ZFS_CMD="mount -o zfsutil -t zfs" if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ]; then # Can't use the mountpoint property. Might be one of our # clones. Check the 'org.zol:mountpoint' property set in # clone_snap() if that's usable. mountpoint=$(get_fs_value "$fs" org.zol:mountpoint) if [ "$mountpoint" = "legacy" ] || [ "$mountpoint" = "none" ] || [ "$mountpoint" = "-" ] then if [ "$fs" != "${ZFS_BOOTFS}" ]; then # We don't have a proper mountpoint and this # isn't the root fs. return 0 else # Last hail-mary: Hope 'rootmnt' is set! mountpoint="" fi fi # If it's not a legacy filesystem, it can only be a # native one... if [ "$mountpoint" = "legacy" ]; then ZFS_CMD="mount -t zfs" fi fi # Possibly decrypt a filesystem using native encryption. decrypt_fs "$fs" [ "$quiet" != "y" ] && \ zfs_log_begin_msg "Mounting '${fs}' on '${rootmnt}/${mountpoint}'" [ -n "${ZFS_DEBUG}" ] && \ zfs_log_begin_msg "CMD: '$ZFS_CMD ${fs} ${rootmnt}/${mountpoint}'" ZFS_STDERR=$(${ZFS_CMD} "${fs}" "${rootmnt}/${mountpoint}" 2>&1) ZFS_ERROR=$? if [ "${ZFS_ERROR}" != 0 ] then [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" disable_plymouth echo "" echo "Command: ${ZFS_CMD} ${fs} ${rootmnt}/${mountpoint}" echo "Message: $ZFS_STDERR" echo "Error: $ZFS_ERROR" echo "" echo "Failed to mount ${fs} on ${rootmnt}/${mountpoint}." echo "Manually mount the filesystem and exit." shell else [ "$quiet" != "y" ] && zfs_log_end_msg fi return 0 } # Unlock a ZFS native encrypted filesystem. decrypt_fs() { fs="$1" # If pool encryption is active and the zfs command understands '-o encryption' if [ "$(zpool list -H -o feature@encryption "${fs%%/*}")" = 'active' ]; then # Determine dataset that holds key for root dataset ENCRYPTIONROOT="$(get_fs_value "${fs}" encryptionroot)" KEYLOCATION="$(get_fs_value "${ENCRYPTIONROOT}" keylocation)" echo "${ENCRYPTIONROOT}" > /run/zfs_fs_name # If root dataset is encrypted... if ! [ "${ENCRYPTIONROOT}" = "-" ]; then KEYSTATUS="$(get_fs_value "${ENCRYPTIONROOT}" keystatus)" # Continue only if the key needs to be loaded [ "$KEYSTATUS" = "unavailable" ] || return 0 # Do not prompt if key is stored noninteractively, if ! [ "${KEYLOCATION}" = "prompt" ]; then $ZFS load-key "${ENCRYPTIONROOT}" # Prompt with plymouth, if active elif /bin/plymouth --ping 2>/dev/null; then echo "plymouth" > /run/zfs_console_askpwd_cmd for _ in 1 2 3; do plymouth ask-for-password --prompt "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \ $ZFS load-key "${ENCRYPTIONROOT}" && break done # Prompt with systemd, if active elif [ -e /run/systemd/system ]; then echo "systemd-ask-password" > /run/zfs_console_askpwd_cmd for _ in 1 2 3; do systemd-ask-password --no-tty "Encrypted ZFS password for ${ENCRYPTIONROOT}" | \ $ZFS load-key "${ENCRYPTIONROOT}" && break done # Prompt with ZFS tty, otherwise else # Temporarily setting "printk" to "7" allows the prompt to appear even when the "quiet" kernel option has been used echo "load-key" > /run/zfs_console_askpwd_cmd - storeprintk="$(awk '{print $1}' /proc/sys/kernel/printk)" + read -r storeprintk _ < /proc/sys/kernel/printk echo 7 > /proc/sys/kernel/printk $ZFS load-key "${ENCRYPTIONROOT}" echo "$storeprintk" > /proc/sys/kernel/printk fi fi fi return 0 } # Destroy a given filesystem. destroy_fs() { fs="$1" [ "$quiet" != "y" ] && \ zfs_log_begin_msg "Destroying '$fs'" ZFS_CMD="${ZFS} destroy $fs" ZFS_STDERR="$(${ZFS_CMD} 2>&1)" ZFS_ERROR="$?" if [ "${ZFS_ERROR}" != 0 ] then [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" disable_plymouth echo "" echo "Command: $ZFS_CMD" echo "Message: $ZFS_STDERR" echo "Error: $ZFS_ERROR" echo "" echo "Failed to destroy '$fs'. Please make sure that '$fs' is not available." echo "Hint: Try: zfs destroy -Rfn $fs" echo "If this dryrun looks good, then remove the 'n' from '-Rfn' and try again." shell else [ "$quiet" != "y" ] && zfs_log_end_msg fi return 0 } # Clone snapshot $1 to destination filesystem $2 # Set 'canmount=noauto' and 'mountpoint=none' so that we get to keep # manual control over it's mounting (i.e., make sure it's not automatically # mounted with a 'zfs mount -a' in the init/systemd scripts). clone_snap() { snap="$1" destfs="$2" mountpoint="$3" [ "$quiet" != "y" ] && zfs_log_begin_msg "Cloning '$snap' to '$destfs'" # Clone the snapshot into a dataset we can boot from # + We don't want this filesystem to be automatically mounted, we # want control over this here and nowhere else. # + We don't need any mountpoint set for the same reason. # We use the 'org.zol:mountpoint' property to remember the mountpoint. ZFS_CMD="${ZFS} clone -o canmount=noauto -o mountpoint=none" ZFS_CMD="${ZFS_CMD} -o org.zol:mountpoint=${mountpoint}" ZFS_CMD="${ZFS_CMD} $snap $destfs" ZFS_STDERR="$(${ZFS_CMD} 2>&1)" ZFS_ERROR="$?" if [ "${ZFS_ERROR}" != 0 ] then [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" disable_plymouth echo "" echo "Command: $ZFS_CMD" echo "Message: $ZFS_STDERR" echo "Error: $ZFS_ERROR" echo "" echo "Failed to clone snapshot." echo "Make sure that the any problems are corrected and then make sure" echo "that the dataset '$destfs' exists and is bootable." shell else [ "$quiet" != "y" ] && zfs_log_end_msg fi return 0 } # Rollback a given snapshot. rollback_snap() { snap="$1" [ "$quiet" != "y" ] && zfs_log_begin_msg "Rollback $snap" ZFS_CMD="${ZFS} rollback -Rf $snap" ZFS_STDERR="$(${ZFS_CMD} 2>&1)" ZFS_ERROR="$?" if [ "${ZFS_ERROR}" != 0 ] then [ "$quiet" != "y" ] && zfs_log_failure_msg "${ZFS_ERROR}" disable_plymouth echo "" echo "Command: $ZFS_CMD" echo "Message: $ZFS_STDERR" echo "Error: $ZFS_ERROR" echo "" echo "Failed to rollback snapshot." shell else [ "$quiet" != "y" ] && zfs_log_end_msg fi return 0 } # Get a list of snapshots, give them as a numbered list # to the user to choose from. ask_user_snap() { fs="$1" # We need to temporarily disable debugging. Set 'debug' so we # remember to enabled it again. if [ -n "${ZFS_DEBUG}" ]; then unset ZFS_DEBUG set +x debug=1 fi # Because we need the resulting snapshot, which is sent on # stdout to the caller, we use stderr for our questions. echo "What snapshot do you want to boot from?" > /dev/stderr # shellcheck disable=SC2046 IFS=" " set -- $("${ZFS}" list -H -oname -tsnapshot -r "${fs}") i=1 for snap in "$@"; do echo " $i: $snap" i=$((i + 1)) done > /dev/stderr # expr instead of test here because [ a -lt 0 ] errors out, # but expr falls back to lexicographical, which works out right snapnr=0 while expr "$snapnr" "<" 1 > /dev/null || expr "$snapnr" ">" "$#" > /dev/null do printf "%s" "Snap nr [1-$#]? " > /dev/stderr read -r snapnr done # Re-enable debugging. if [ -n "${debug}" ]; then ZFS_DEBUG=1 set -x fi eval echo '$'"$snapnr" } setup_snapshot_booting() { snap="$1" retval=0 # Make sure that the snapshot specified actually exists. if [ ! "$(get_fs_value "${snap}" type)" ] then # Snapshot does not exist (...@ ?) # ask the user for a snapshot to use. snap="$(ask_user_snap "${snap%%@*}")" fi # Separate the full snapshot ('$snap') into it's filesystem and # snapshot names. Would have been nice with a split() function.. rootfs="${snap%%@*}" snapname="${snap##*@}" ZFS_BOOTFS="${rootfs}_${snapname}" if ! grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline then # If the destination dataset for the clone # already exists, destroy it. Recursively if [ "$(get_fs_value "${rootfs}_${snapname}" type)" ]; then filesystems=$("${ZFS}" list -oname -tfilesystem -H \ -r -Sname "${ZFS_BOOTFS}") for fs in $filesystems; do destroy_fs "${fs}" done fi fi # Get all snapshots, recursively (might need to clone /usr, /var etc # as well). for s in $("${ZFS}" list -H -oname -tsnapshot -r "${rootfs}" | \ grep "${snapname}") do if grep -qiE '(^|[^\\](\\\\)* )(rollback)=(on|yes|1)( |$)' /proc/cmdline then # Rollback snapshot rollback_snap "$s" || retval=$((retval + 1)) else # Setup a destination filesystem name. # Ex: Called with 'rpool/ROOT/debian@snap2' # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2 # rpool/ROOT/debian/boot@snap2 => rpool/ROOT/debian_snap2/boot # rpool/ROOT/debian/usr@snap2 => rpool/ROOT/debian_snap2/usr # rpool/ROOT/debian/var@snap2 => rpool/ROOT/debian_snap2/var subfs="${s##$rootfs}" subfs="${subfs%%@$snapname}" destfs="${rootfs}_${snapname}" # base fs. [ -n "$subfs" ] && destfs="${destfs}$subfs" # + sub fs. # Get the mountpoint of the filesystem, to be used # with clone_snap(). If legacy or none, then use # the sub fs value. mountpoint=$(get_fs_value "${s%%@*}" mountpoint) if [ "$mountpoint" = "legacy" ] || \ [ "$mountpoint" = "none" ] then if [ -n "${subfs}" ]; then mountpoint="${subfs}" else mountpoint="/" fi fi # Clone the snapshot into its own # filesystem clone_snap "$s" "${destfs}" "${mountpoint}" || \ retval=$((retval + 1)) fi done # If we haven't return yet, we have a problem... return "${retval}" } # ================================================================ # This is the main function. mountroot() { # ---------------------------------------------------------------- # I N I T I A L S E T U P # ------------ # Run the pre-mount scripts from /scripts/local-top. pre_mountroot # ------------ # Source the default setup variables. [ -r '/etc/default/zfs' ] && . /etc/default/zfs # ------------ # Support debug option if grep -qiE '(^|[^\\](\\\\)* )(zfs_debug|zfs\.debug|zfsdebug)=(on|yes|1)( |$)' /proc/cmdline then ZFS_DEBUG=1 mkdir /var/log #exec 2> /var/log/boot.debug set -x fi # ------------ # Load ZFS module etc. if ! load_module_initrd; then disable_plymouth echo "" echo "Failed to load ZFS modules." echo "Manually load the modules and exit." shell fi # ------------ # Look for the cache file (if any). [ -f "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE [ -s "${ZPOOL_CACHE}" ] || unset ZPOOL_CACHE # ------------ # Compatibility: 'ROOT' is for Debian GNU/Linux (etc), # 'root' is for Redhat/Fedora (etc), # 'REAL_ROOT' is for Gentoo if [ -z "$ROOT" ] then [ -n "$root" ] && ROOT=${root} [ -n "$REAL_ROOT" ] && ROOT=${REAL_ROOT} fi # ------------ # Where to mount the root fs in the initrd - set outside this script # Compatibility: 'rootmnt' is for Debian GNU/Linux (etc), # 'NEWROOT' is for RedHat/Fedora (etc), # 'NEW_ROOT' is for Gentoo if [ -z "$rootmnt" ] then [ -n "$NEWROOT" ] && rootmnt=${NEWROOT} [ -n "$NEW_ROOT" ] && rootmnt=${NEW_ROOT} fi # ------------ # No longer set in the defaults file, but it could have been set in # get_pools() in some circumstances. If it's something, but not 'yes', # it's no good to us. [ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] && \ unset USE_DISK_BY_ID # ---------------------------------------------------------------- # P A R S E C O M M A N D L I N E O P T I O N S # This part is the really ugly part - there's so many options and permutations # 'out there', and if we should make this the 'primary' source for ZFS initrd # scripting, we need/should support them all. # # Supports the following kernel command line argument combinations # (in this order - first match win): # # rpool= (tries to finds bootfs automatically) # bootfs=/ (uses this for rpool - first part) # rpool= bootfs=/ # -B zfs-bootfs=/ (uses this for rpool - first part) # rpool=rpool (default if none of the above is used) # root=/ (uses this for rpool - first part) # root=ZFS=/ (uses this for rpool - first part, without 'ZFS=') # root=zfs:AUTO (tries to detect both pool and rootfs # root=zfs:/ (uses this for rpool - first part, without 'zfs:') # # Option could also be # Option could also be # ------------ # Support force option # In addition, setting one of zfs_force, zfs.force or zfsforce to # 'yes', 'on' or '1' will make sure we force import the pool. # This should (almost) never be needed, but it's here for # completeness. ZPOOL_FORCE="" if grep -qiE '(^|[^\\](\\\\)* )(zfs_force|zfs\.force|zfsforce)=(on|yes|1)( |$)' /proc/cmdline then ZPOOL_FORCE="-f" fi # ------------ # Look for 'rpool' and 'bootfs' parameter [ -n "$rpool" ] && ZFS_RPOOL="${rpool#rpool=}" [ -n "$bootfs" ] && ZFS_BOOTFS="${bootfs#bootfs=}" # ------------ # If we have 'ROOT' (see above), but not 'ZFS_BOOTFS', then use # 'ROOT' [ -n "$ROOT" ] && [ -z "${ZFS_BOOTFS}" ] && ZFS_BOOTFS="$ROOT" # ------------ # Check for the `-B zfs-bootfs=%s/%u,...` kind of parameter. # NOTE: Only use the pool name and dataset. The rest is not # supported by OpenZFS (whatever it's for). if [ -z "$ZFS_RPOOL" ] then # The ${zfs-bootfs} variable is set at the kernel command # line, usually by GRUB, but it cannot be referenced here # directly because bourne variable names cannot contain a # hyphen. # # Reassign the variable by dumping the environment and # stripping the zfs-bootfs= prefix. Let the shell handle # quoting through the eval command: # shellcheck disable=SC2046 eval ZFS_RPOOL=$(set | sed -n -e 's,^zfs-bootfs=,,p') fi # ------------ # No root fs or pool specified - do auto detect. if [ -z "$ZFS_RPOOL" ] && [ -z "${ZFS_BOOTFS}" ] then # Do auto detect. Do this by 'cheating' - set 'root=zfs:AUTO' # which will be caught later ROOT='zfs:AUTO' fi # ---------------------------------------------------------------- # F I N D A N D I M P O R T C O R R E C T P O O L # ------------ if [ "$ROOT" = "zfs:AUTO" ] then # Try to detect both pool and root fs. # If we got here, that means we don't have a hint so as to # the root dataset, but with root=zfs:AUTO on cmdline, # this says "zfs:AUTO" here and interferes with checks later ZFS_BOOTFS= [ "$quiet" != "y" ] && \ zfs_log_begin_msg "Attempting to import additional pools." # Get a list of pools available for import if [ -n "$ZFS_RPOOL" ] then # We've specified a pool - check only that POOLS=$ZFS_RPOOL else POOLS=$(get_pools) fi OLD_IFS="$IFS" ; IFS=";" for pool in $POOLS do [ -z "$pool" ] && continue IFS="$OLD_IFS" import_pool "$pool" IFS="$OLD_IFS" find_rootfs "$pool" && break done IFS="$OLD_IFS" [ "$quiet" != "y" ] && zfs_log_end_msg $ZFS_ERROR else # No auto - use value from the command line option. # Strip 'zfs:' and 'ZFS='. ZFS_BOOTFS="${ROOT#*[:=]}" # Strip everything after the first slash. ZFS_RPOOL="${ZFS_BOOTFS%%/*}" fi # Import the pool (if not already done so in the AUTO check above). if [ -n "$ZFS_RPOOL" ] && [ -z "${POOL_IMPORTED}" ] then [ "$quiet" != "y" ] && \ zfs_log_begin_msg "Importing ZFS root pool '$ZFS_RPOOL'" import_pool "${ZFS_RPOOL}" find_rootfs "${ZFS_RPOOL}" [ "$quiet" != "y" ] && zfs_log_end_msg fi if [ -z "${POOL_IMPORTED}" ] then # No pool imported, this is serious! disable_plymouth echo "" echo "Command: $ZFS_CMD" echo "Message: $ZFS_STDERR" echo "Error: $ZFS_ERROR" echo "" echo "No pool imported. Manually import the root pool" echo "at the command prompt and then exit." echo "Hint: Try: zpool import -N ${ZFS_RPOOL}" shell fi # In case the pool was specified as guid, resolve guid to name pool="$("${ZPOOL}" get name,guid -o name,value -H | \ awk -v pool="${ZFS_RPOOL}" '$2 == pool { print $1 }')" if [ -n "$pool" ]; then # If $ZFS_BOOTFS contains guid, replace the guid portion with $pool ZFS_BOOTFS=$(echo "$ZFS_BOOTFS" | \ sed -e "s/$("${ZPOOL}" get guid -o value "$pool" -H)/$pool/g") ZFS_RPOOL="${pool}" fi # ---------------------------------------------------------------- # P R E P A R E R O O T F I L E S Y S T E M if [ -n "${ZFS_BOOTFS}" ] then # Booting from a snapshot? # Will overwrite the ZFS_BOOTFS variable like so: # rpool/ROOT/debian@snap2 => rpool/ROOT/debian_snap2 echo "${ZFS_BOOTFS}" | grep -q '@' && \ setup_snapshot_booting "${ZFS_BOOTFS}" fi if [ -z "${ZFS_BOOTFS}" ] then # Still nothing! Let the user sort this out. disable_plymouth echo "" echo "Error: Unknown root filesystem - no 'bootfs' pool property and" echo " not specified on the kernel command line." echo "" echo "Manually mount the root filesystem on $rootmnt and then exit." echo "Hint: Try: mount -o zfsutil -t zfs ${ZFS_RPOOL-rpool}/ROOT/system $rootmnt" shell fi # ---------------------------------------------------------------- # M O U N T F I L E S Y S T E M S # * Ideally, the root filesystem would be mounted like this: # # zpool import -R "$rootmnt" -N "$ZFS_RPOOL" # zfs mount -o mountpoint=/ "${ZFS_BOOTFS}" # # but the MOUNTPOINT prefix is preserved on descendent filesystem # after the pivot into the regular root, which later breaks things # like `zfs mount -a` and the /proc/self/mounts refresh. # # * Mount additional filesystems required # Such as /usr, /var, /usr/local etc. # NOTE: Mounted in the order specified in the # ZFS_INITRD_ADDITIONAL_DATASETS variable so take care! # Go through the complete list (recursively) of all filesystems below # the real root dataset filesystems="$("${ZFS}" list -oname -tfilesystem -H -r "${ZFS_BOOTFS}")" OLD_IFS="$IFS" ; IFS=" " for fs in $filesystems; do IFS="$OLD_IFS" mount_fs "$fs" done IFS="$OLD_IFS" for fs in $ZFS_INITRD_ADDITIONAL_DATASETS; do mount_fs "$fs" done touch /run/zfs_unlock_complete if [ -e /run/zfs_unlock_complete_notify ]; then read -r < /run/zfs_unlock_complete_notify fi # ------------ # Debugging information if [ -n "${ZFS_DEBUG}" ] then #exec 2>&1- echo "DEBUG: imported pools:" "${ZPOOL}" list -H echo echo "DEBUG: mounted ZFS filesystems:" mount | grep zfs echo echo "=> waiting for ENTER before continuing because of 'zfsdebug=1'. " printf "%s" " 'c' for shell, 'r' for reboot, 'ENTER' to continue. " read -r b [ "$b" = "c" ] && /bin/sh [ "$b" = "r" ] && reboot -f set +x fi # ------------ # Run local bottom script if command -v run_scripts > /dev/null 2>&1 then if [ -f "/scripts/local-bottom" ] || [ -d "/scripts/local-bottom" ] then [ "$quiet" != "y" ] && \ zfs_log_begin_msg "Running /scripts/local-bottom" run_scripts /scripts/local-bottom [ "$quiet" != "y" ] && zfs_log_end_msg fi fi } diff --git a/etc/init.d/zfs-import.in b/etc/init.d/zfs-import.in index e4bc7b8339fc..130174f74d06 100755 --- a/etc/init.d/zfs-import.in +++ b/etc/init.d/zfs-import.in @@ -1,338 +1,337 @@ #!@DEFAULT_INIT_SHELL@ # # zfs-import This script will import ZFS pools # # chkconfig: 2345 01 99 # description: This script will perform a verbatim import of ZFS pools # during system boot. # probe: true # ### BEGIN INIT INFO # Provides: zfs-import # Required-Start: mtab # Required-Stop: $local_fs mtab # Default-Start: S # Default-Stop: 0 1 6 # X-Start-Before: checkfs # X-Stop-After: zfs-mount # Short-Description: Import ZFS pools # Description: Run the `zpool import` command. ### END INIT INFO # # NOTE: Not having '$local_fs' on Required-Start but only on Required-Stop # is on purpose. If we have '$local_fs' in both (and X-Start-Before=checkfs) # we get conflicts - import needs to be started extremely early, # but not stopped too late. # # Released under the 2-clause BSD license. # # This script is based on debian/zfsutils.zfs.init from the # Debian GNU/kFreeBSD zfsutils 8.1-3 package, written by Aurelien Jarno. # Source the common init script . @sysconfdir@/zfs/zfs-functions # ---------------------------------------------------- do_depend() { before swap after sysfs udev keyword -lxc -openvz -prefix -vserver } # Use the zpool cache file to import pools do_verbatim_import() { if [ -f "$ZPOOL_CACHE" ] then zfs_action "Importing ZFS pool(s)" \ "$ZPOOL" import -c "$ZPOOL_CACHE" -N -a fi } # Support function to get a list of all pools, separated with ';' find_pools() { local pools pools=$("$@" 2> /dev/null | \ - grep -E "pool:|^[a-zA-Z0-9]" | \ - sed 's@.*: @@' | \ + sed -Ee '/pool:|^[a-zA-Z0-9]/!d' -e 's@.*: @@' | \ sort | \ tr '\n' ';') echo "${pools%%;}" # Return without the last ';'. } # Find and import all visible pools, even exported ones do_import_all_visible() { local already_imported available_pools pool npools local exception dir ZPOOL_IMPORT_PATH RET=0 r=1 # In case not shutdown cleanly. # shellcheck disable=SC2154 [ -n "$init" ] && rm -f /etc/dfs/sharetab # Just simplify code later on. if [ -n "$USE_DISK_BY_ID" ] && [ "$USE_DISK_BY_ID" != 'yes' ] then # It's something, but not 'yes' so it's no good to us. unset USE_DISK_BY_ID fi # Find list of already imported pools. already_imported=$(find_pools "$ZPOOL" list -H -oname) available_pools=$(find_pools "$ZPOOL" import) # Just in case - seen it happen (that a pool isn't visible/found # with a simple "zpool import" but only when using the "-d" # option or setting ZPOOL_IMPORT_PATH). if [ -d "/dev/disk/by-id" ] then npools=$(find_pools "$ZPOOL" import -d /dev/disk/by-id) if [ -n "$npools" ] then # Because we have found extra pool(s) here, which wasn't # found 'normally', we need to force USE_DISK_BY_ID to # make sure we're able to actually import it/them later. USE_DISK_BY_ID='yes' if [ -n "$available_pools" ] then # Filter out duplicates (pools found with the simpl # "zpool import" but which is also found with the # "zpool import -d ..."). npools=$(echo "$npools" | sed "s,$available_pools,,") # Add the list to the existing list of # available pools available_pools="$available_pools;$npools" else available_pools="$npools" fi fi fi # Filter out any exceptions... if [ -n "$ZFS_POOL_EXCEPTIONS" ] then local found="" local apools="" OLD_IFS="$IFS" ; IFS=";" for pool in $available_pools do for exception in $ZFS_POOL_EXCEPTIONS do [ "$pool" = "$exception" ] && continue 2 found="$pool" done if [ -n "$found" ] then if [ -n "$apools" ] then apools="$apools;$pool" else apools="$pool" fi fi done IFS="$OLD_IFS" available_pools="$apools" fi # For backwards compatibility, make sure that ZPOOL_IMPORT_PATH is set # to something we can use later with the real import(s). We want to # make sure we find all by* dirs, BUT by-vdev should be first (if it # exists). if [ -n "$USE_DISK_BY_ID" ] && [ -z "$ZPOOL_IMPORT_PATH" ] then local dirs dirs="$(for dir in $(echo /dev/disk/by-*) do # Ignore by-vdev here - we want it first! echo "$dir" | grep -q /by-vdev && continue [ ! -d "$dir" ] && continue printf "%s" "$dir:" done | sed 's,:$,,g')" if [ -d "/dev/disk/by-vdev" ] then # Add by-vdev at the beginning. ZPOOL_IMPORT_PATH="/dev/disk/by-vdev:" fi # Help with getting LUKS partitions etc imported. if [ -d "/dev/mapper" ]; then if [ -n "$ZPOOL_IMPORT_PATH" ]; then ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH:/dev/mapper:" else ZPOOL_IMPORT_PATH="/dev/mapper:" fi fi # ... and /dev at the very end, just for good measure. ZPOOL_IMPORT_PATH="$ZPOOL_IMPORT_PATH$dirs:/dev" fi # Needs to be exported for "zpool" to catch it. [ -n "$ZPOOL_IMPORT_PATH" ] && export ZPOOL_IMPORT_PATH # Mount all available pools (except those set in ZFS_POOL_EXCEPTIONS. # # If not interactive (run from init - variable init='/sbin/init') # we get ONE line for all pools being imported, with just a dot # as status for each pool. # Example: Importing ZFS pool(s)... [OK] # # If it IS interactive (started from the shell manually), then we # get one line per pool importing. # Example: Importing ZFS pool pool1 [OK] # Importing ZFS pool pool2 [OK] # [etc] [ -n "$init" ] && zfs_log_begin_msg "Importing ZFS pool(s)" OLD_IFS="$IFS" ; IFS=";" for pool in $available_pools do [ -z "$pool" ] && continue # We have pools that haven't been imported - import them if [ -n "$init" ] then # Not interactive - a dot for each pool. # Except on Gentoo where this doesn't work. zfs_log_progress_msg "." else # Interactive - one 'Importing ...' line per pool zfs_log_begin_msg "Importing ZFS pool $pool" fi # Import by using ZPOOL_IMPORT_PATH (either set above or in # the config file) _or_ with the 'built in' default search # paths. This is the preferred way. # shellcheck disable=SC2086 "$ZPOOL" import -N ${ZPOOL_IMPORT_OPTS} "$pool" 2> /dev/null r="$?" ; RET=$((RET + r)) if [ "$r" -eq 0 ] then # Output success and process the next pool [ -z "$init" ] && zfs_log_end_msg 0 continue fi # We don't want a fail msg here, we're going to try import # using the cache file soon and that might succeed. [ ! -f "$ZPOOL_CACHE" ] && zfs_log_end_msg "$RET" if [ "$r" -gt 0 ] && [ -f "$ZPOOL_CACHE" ] then # Failed to import without a cache file. Try WITH... if [ -z "$init" ] && check_boolean "$VERBOSE_MOUNT" then # Interactive + Verbose = more information zfs_log_progress_msg " using cache file" fi # shellcheck disable=SC2086 "$ZPOOL" import -c "$ZPOOL_CACHE" -N ${ZPOOL_IMPORT_OPTS} \ "$pool" 2> /dev/null r="$?" ; RET=$((RET + r)) if [ "$r" -eq 0 ] then [ -z "$init" ] && zfs_log_end_msg 0 continue 3 # Next pool fi zfs_log_end_msg "$RET" fi done [ -n "$init" ] && zfs_log_end_msg "$RET" IFS="$OLD_IFS" [ -n "$already_imported" ] && [ -z "$available_pools" ] && return 0 return "$RET" } do_import() { if check_boolean "$ZPOOL_IMPORT_ALL_VISIBLE" then do_import_all_visible else # This is the default option do_verbatim_import fi } # Output the status and list of pools do_status() { check_module_loaded "zfs" || exit 0 "$ZPOOL" status && echo "" && "$ZPOOL" list } do_start() { if check_boolean "$VERBOSE_MOUNT" then zfs_log_begin_msg "Checking if ZFS userspace tools present" fi if checksystem then check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0 check_boolean "$VERBOSE_MOUNT" && \ zfs_log_begin_msg "Loading kernel ZFS infrastructure" if ! load_module "zfs" then check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 1 return 5 fi check_boolean "$VERBOSE_MOUNT" && zfs_log_end_msg 0 do_import && udev_trigger # just to make sure we get zvols. return 0 else return 1 fi } # ---------------------------------------------------- if [ ! -e /sbin/openrc-run ] then case "$1" in start) do_start ;; stop) # no-op ;; status) do_status ;; force-reload|condrestart|reload|restart) # no-op ;; *) [ -n "$1" ] && echo "Error: Unknown command $1." echo "Usage: $0 {start|status}" exit 3 ;; esac exit $? else # Create wrapper functions since Gentoo don't use the case part. depend() { do_depend; } start() { do_start; } status() { do_status; } fi diff --git a/etc/zfs/zfs-functions.in b/etc/zfs/zfs-functions.in index 10fb5b19a829..30441dc35d4b 100644 --- a/etc/zfs/zfs-functions.in +++ b/etc/zfs/zfs-functions.in @@ -1,434 +1,432 @@ # This is a script with common functions etc used by zfs-import, zfs-load-key, # zfs-mount, zfs-share and zfs-zed. # # It is _NOT_ to be called independently # # Released under the 2-clause BSD license. # # This script is based on debian/zfsutils.zfs.init from the # Debian GNU/kFreeBSD zfsutils 8.1-3 package, written by Aurelien Jarno. PATH=/sbin:/bin:/usr/bin:/usr/sbin # Source function library if [ -f /etc/rc.d/init.d/functions ]; then # RedHat and derivatives . /etc/rc.d/init.d/functions elif [ -L /etc/init.d/functions.sh ]; then # Gentoo . /etc/init.d/functions.sh elif [ -f /lib/lsb/init-functions ]; then # LSB, Debian, and derivatives . /lib/lsb/init-functions fi # Of course the functions we need are called differently # on different distributions - it would be way too easy # otherwise!! if type log_failure_msg > /dev/null 2>&1 ; then # LSB functions - fall through zfs_log_begin_msg() { log_begin_msg "$1"; } zfs_log_end_msg() { log_end_msg "$1"; } zfs_log_failure_msg() { log_failure_msg "$1"; } zfs_log_progress_msg() { log_progress_msg "$1"; } elif type success > /dev/null 2>&1 ; then # Fedora/RedHat functions zfs_set_ifs() { # For some reason, the init function library have a problem # with a changed IFS, so this function goes around that. local tIFS="$1" if [ -n "$tIFS" ] then TMP_IFS="$IFS" IFS="$tIFS" fi } zfs_log_begin_msg() { printf "%s" "$1 "; } zfs_log_end_msg() { zfs_set_ifs "$OLD_IFS" if [ "$1" -eq 0 ]; then success else failure fi echo zfs_set_ifs "$TMP_IFS" } zfs_log_failure_msg() { zfs_set_ifs "$OLD_IFS" failure echo zfs_set_ifs "$TMP_IFS" } zfs_log_progress_msg() { printf "%s" "$""$1"; } elif type einfo > /dev/null 2>&1 ; then # Gentoo functions zfs_log_begin_msg() { ebegin "$1"; } zfs_log_end_msg() { eend "$1"; } zfs_log_failure_msg() { eend "$1"; } # zfs_log_progress_msg() { printf "%s" "$1"; } zfs_log_progress_msg() { :; } else # Unknown - simple substitutes. zfs_log_begin_msg() { printf "%s" "$1"; } zfs_log_end_msg() { ret=$1 if [ "$ret" -ge 1 ]; then echo " failed!" else echo " success" fi return "$ret" } zfs_log_failure_msg() { echo "$1"; } zfs_log_progress_msg() { printf "%s" "$1"; } fi # Paths to what we need ZFS="@sbindir@/zfs" ZED="@sbindir@/zed" ZPOOL="@sbindir@/zpool" ZPOOL_CACHE="@sysconfdir@/zfs/zpool.cache" # Sensible defaults ZFS_LOAD_KEY='yes' ZFS_UNLOAD_KEY='no' ZFS_MOUNT='yes' ZFS_UNMOUNT='yes' ZFS_SHARE='yes' ZFS_UNSHARE='yes' # Source zfs configuration, overriding the defaults if [ -f @initconfdir@/zfs ]; then . @initconfdir@/zfs fi # ---------------------------------------------------- export ZFS ZED ZPOOL ZPOOL_CACHE ZFS_LOAD_KEY ZFS_UNLOAD_KEY ZFS_MOUNT ZFS_UNMOUNT \ ZFS_SHARE ZFS_UNSHARE zfs_action() { local MSG="$1"; shift local CMD="$*" local ret zfs_log_begin_msg "$MSG " $CMD ret=$? if [ "$ret" -eq 0 ]; then zfs_log_end_msg $ret else zfs_log_failure_msg $ret fi return $ret } # Returns # 0 if daemon has been started # 1 if daemon was already running # 2 if daemon could not be started # 3 if unsupported # zfs_daemon_start() { local PIDFILE="$1"; shift local DAEMON_BIN="$1"; shift if type start-stop-daemon > /dev/null 2>&1 ; then # LSB functions start-stop-daemon --start --quiet --pidfile "$PIDFILE" \ --exec "$DAEMON_BIN" --test > /dev/null || return 1 # shellcheck disable=SC2086 start-stop-daemon --start --quiet --exec "$DAEMON_BIN" -- \ "$@" || return 2 # On Debian, there's a 'sendsigs' script that will # kill basically everything quite early and zed is stopped # much later than that. We don't want zed to be among them, # so add the zed pid to list of pids to ignore. if [ -f "$PIDFILE" ] && [ -d /run/sendsigs.omit.d ] then ln -sf "$PIDFILE" /run/sendsigs.omit.d/zed fi elif type daemon > /dev/null 2>&1 ; then # Fedora/RedHat functions # shellcheck disable=SC2086 daemon --pidfile "$PIDFILE" "$DAEMON_BIN" "$@" return $? else # Unsupported return 3 fi return 0 } # Returns # 0 if daemon has been stopped # 1 if daemon was already stopped # 2 if daemon could not be stopped # 3 if unsupported # zfs_daemon_stop() { local PIDFILE="$1" local DAEMON_BIN="$2" local DAEMON_NAME="$3" if type start-stop-daemon > /dev/null 2>&1 ; then # LSB functions start-stop-daemon --stop --quiet --retry=TERM/30/KILL/5 \ --pidfile "$PIDFILE" --name "$DAEMON_NAME" ret="$?" [ "$ret" = 0 ] && rm -f "$PIDFILE" return "$ret" elif type killproc > /dev/null 2>&1 ; then # Fedora/RedHat functions killproc -p "$PIDFILE" "$DAEMON_NAME" ret="$?" [ "$ret" = 0 ] && rm -f "$PIDFILE" return "$ret" else # Unsupported return 3 fi return 0 } # Returns status zfs_daemon_status() { local PIDFILE="$1" local DAEMON_BIN="$2" local DAEMON_NAME="$3" if type status_of_proc > /dev/null 2>&1 ; then # LSB functions status_of_proc "$DAEMON_NAME" "$DAEMON_BIN" return $? elif type status > /dev/null 2>&1 ; then # Fedora/RedHat functions status -p "$PIDFILE" "$DAEMON_NAME" return $? else # Unsupported return 3 fi return 0 } zfs_daemon_reload() { local PIDFILE="$1" local DAEMON_NAME="$2" if type start-stop-daemon > /dev/null 2>&1 ; then # LSB functions start-stop-daemon --stop --signal 1 --quiet \ --pidfile "$PIDFILE" --name "$DAEMON_NAME" return $? elif type killproc > /dev/null 2>&1 ; then # Fedora/RedHat functions killproc -p "$PIDFILE" "$DAEMON_NAME" -HUP return $? else # Unsupported return 3 fi return 0 } zfs_installed() { if [ ! -x "$ZPOOL" ]; then return 1 else # Test if it works (will catch missing/broken libs etc) "$ZPOOL" -? > /dev/null 2>&1 return $? fi if [ ! -x "$ZFS" ]; then return 2 else # Test if it works (will catch missing/broken libs etc) "$ZFS" -? > /dev/null 2>&1 return $? fi return 0 } # Trigger udev and wait for it to settle. udev_trigger() { if [ -x /sbin/udevadm ]; then /sbin/udevadm trigger --action=change --subsystem-match=block /sbin/udevadm settle elif [ -x /sbin/udevsettle ]; then /sbin/udevtrigger /sbin/udevsettle fi } # Do a lot of checks to make sure it's 'safe' to continue with the import. checksystem() { if grep -qiE '(^|[^\\](\\\\)* )zfs=(off|no|0)( |$)' /proc/cmdline; then # Called with zfs=(off|no|0) - bail because we don't # want anything import, mounted or shared. # HOWEVER, only do this if we're called at the boot up # (from init), not if we're running interactively (as in # from the shell - we know what we're doing). # shellcheck disable=SC2154 [ -n "$init" ] && exit 3 fi # Check if ZFS is installed. zfs_installed || return 5 # Just make sure that /dev/zfs is created. udev_trigger return 0 } get_root_pool() { # shellcheck disable=SC2046 set -- $(mount | grep ' on / ') [ "$5" = "zfs" ] && echo "${1%%/*}" } # Check if a variable is 'yes' (any case) or '1' # Returns TRUE if set. check_boolean() { local var="$1" echo "$var" | grep -Eiq "^yes$|^on$|^true$|^1$" && return 0 || return 1 } check_module_loaded() { module="$1" [ -r "/sys/module/${module}/version" ] && return 0 || return 1 } load_module() { module="$1" # Load the zfs module stack if ! check_module_loaded "$module"; then if ! /sbin/modprobe "$module"; then return 5 fi fi return 0 } # first parameter is a regular expression that filters mtab read_mtab() { local match="$1" local fs mntpnt fstype opts rest # Unset all MTAB_* variables # shellcheck disable=SC2046 - unset $(env | grep ^MTAB_ | sed 's,=.*,,') + unset $(env | sed -e '/^MTAB_/!d' -e 's,=.*,,') while read -r fs mntpnt fstype opts rest; do if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then # * Fix problems (!?) in the mounts file. It will record # 'rpool 1' as 'rpool\0401' instead of 'rpool\00401' # which seems to be the correct (at least as far as # 'printf' is concerned). # * We need to use the external echo, because the # internal one would interpret the backslash code # (incorrectly), giving us a  instead. mntpnt=$(/bin/echo "$mntpnt" | sed 's,\\0,\\00,g') fs=$(/bin/echo "$fs" | sed 's,\\0,\\00,') # Remove 'unwanted' characters. - mntpnt=$(printf '%b\n' "$mntpnt" | sed -e 's,/,,g' \ - -e 's,-,,g' -e 's,\.,,g' -e 's, ,,g') - fs=$(printf '%b\n' "$fs") + mntpnt=$(printf '%b' "$mntpnt" | tr -d '/. -') + fs=$(printf '%b' "$fs") # Set the variable. eval export "MTAB_$mntpnt=\"$fs\"" fi done < /proc/self/mounts } in_mtab() { local mntpnt="$1" # Remove 'unwanted' characters. - mntpnt=$(printf '%b\n' "$mntpnt" | sed -e 's,/,,g' \ - -e 's,-,,g' -e 's,\.,,g' -e 's, ,,g') + mntpnt=$(printf '%b' "$mntpnt" | tr -d '/. -') local var var="$(eval echo "MTAB_$mntpnt")" [ "$(eval echo "$""$var")" != "" ] return "$?" } # first parameter is a regular expression that filters fstab read_fstab() { local match="$1" local i var # Unset all FSTAB_* variables # shellcheck disable=SC2046 - unset $(env | grep ^FSTAB_ | sed 's,=.*,,') + unset $(env | sed -e '/^FSTAB_/!d' -e 's,=.*,,') i=0 while read -r fs mntpnt fstype opts; do echo "$fs" | grep -qE '^#|^$' && continue echo "$mntpnt" | grep -qE '^none|^swap' && continue echo "$fstype" | grep -qE '^swap' && continue if echo "$fs $mntpnt $fstype $opts" | grep -qE "$match"; then eval export "FSTAB_dev_$i=$fs" - fs=$(printf '%b\n' "$fs" | sed 's,/,_,g') + fs=$(printf '%b' "$fs" | tr '/' '_') eval export "FSTAB_$i=$mntpnt" i=$((i + 1)) fi done < /etc/fstab } in_fstab() { local var var="$(eval echo "FSTAB_$1")" [ "${var}" != "" ] return $? } is_mounted() { local mntpt="$1" local mp while read -r _ mp _; do [ "$mp" = "$mntpt" ] && return 0 done < /proc/self/mounts return 1 } diff --git a/rpm/generic/zfs-dkms.spec.in b/rpm/generic/zfs-dkms.spec.in index aab1d9399077..02be716aa964 100644 --- a/rpm/generic/zfs-dkms.spec.in +++ b/rpm/generic/zfs-dkms.spec.in @@ -1,113 +1,113 @@ %{?!packager: %define packager Brian Behlendorf } %if ! 0%{?rhel}%{?fedora}%{?mageia}%{?suse_version} %define not_rpm 1 %endif # Exclude input files from mangling %global __brp_mangle_shebangs_exclude_from ^/usr/src/.*$ %define module @PACKAGE@ %define mkconf scripts/dkms.mkconf Name: %{module}-dkms Version: @VERSION@ Release: @RELEASE@%{?dist} Summary: Kernel module(s) (dkms) Group: System Environment/Kernel License: @ZFS_META_LICENSE@ URL: https://github.com/openzfs/zfs Source0: %{module}-%{version}.tar.gz BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root-%(%{__id_u} -n) BuildArch: noarch Requires: dkms >= 2.2.0.3 Requires(post): dkms >= 2.2.0.3 Requires(preun): dkms >= 2.2.0.3 Requires: gcc, make, perl, diffutils Requires(post): gcc, make, perl, diffutils %if 0%{?rhel}%{?fedora}%{?mageia}%{?suse_version} Requires: kernel-devel >= @ZFS_META_KVER_MIN@, kernel-devel <= @ZFS_META_KVER_MAX@.999 Requires(post): kernel-devel >= @ZFS_META_KVER_MIN@, kernel-devel <= @ZFS_META_KVER_MAX@.999 Obsoletes: spl-dkms %endif Provides: %{module}-kmod = %{version} AutoReqProv: no %if 0%{?rhel}%{?fedora}%{?suse_version} # We don't directly use it, but if this isn't installed, rpmbuild as root can # crash+corrupt rpmdb # See issue #12071 BuildRequires: ncompress %endif %description This package contains the dkms ZFS kernel modules. %prep %setup -q -n %{module}-%{version} %build %{mkconf} -n %{module} -v %{version} -f dkms.conf %install if [ "$RPM_BUILD_ROOT" != "/" ]; then rm -rf $RPM_BUILD_ROOT fi mkdir -p $RPM_BUILD_ROOT/usr/src/ cp -rf ${RPM_BUILD_DIR}/%{module}-%{version} $RPM_BUILD_ROOT/usr/src/ %clean if [ "$RPM_BUILD_ROOT" != "/" ]; then rm -rf $RPM_BUILD_ROOT fi %files %defattr(-,root,root) /usr/src/%{module}-%{version} %post for POSTINST in /usr/lib/dkms/common.postinst; do if [ -f $POSTINST ]; then $POSTINST %{module} %{version} exit $? fi echo "WARNING: $POSTINST does not exist." done echo -e "ERROR: DKMS version is too old and %{module} was not" echo -e "built with legacy DKMS support." echo -e "You must either rebuild %{module} with legacy postinst" echo -e "support or upgrade DKMS to a more current version." exit 1 %preun # Are we doing an upgrade? if [ "$1" = "1" -o "$1" = "upgrade" ] ; then # Yes we are. Are we upgrading to a new ZFS version? - NEWEST_VER=$(dkms status zfs | sed 's/,//g' | sort -r -V | awk '/installed/{print $2; exit}') + NEWEST_VER=$(dkms status zfs | tr -d , | sort -r -V | awk '/installed/{print $2; exit}') if [ "$NEWEST_VER" != "%{version}" ] ; then # Yes, it's a new ZFS version. We'll uninstall the old module # later on in this script. true else # No, it's probably an upgrade of the same ZFS version # to a new distro (zfs-dkms-0.7.12.fc28->zfs-dkms-0.7.12.fc29). # Don't remove our modules, since the rebuild for the new # distro will automatically delete the old modules. exit 0 fi fi # If we're here then we're doing an uninstall (not upgrade). CONFIG_H="/var/lib/dkms/%{module}/%{version}/*/*/%{module}_config.h" SPEC_META_ALIAS="@PACKAGE@-@VERSION@-@RELEASE@" DKMS_META_ALIAS=`cat $CONFIG_H 2>/dev/null | awk -F'"' '/META_ALIAS\s+"/ { print $2; exit 0 }'` if [ "$SPEC_META_ALIAS" = "$DKMS_META_ALIAS" ]; then echo -e echo -e "Uninstall of %{module} module ($SPEC_META_ALIAS) beginning:" dkms remove -m %{module} -v %{version} --all %{!?not_rpm:--rpm_safe_upgrade} fi exit 0 diff --git a/scripts/kmodtool b/scripts/kmodtool index 26bacf5991d2..b1021596997e 100755 --- a/scripts/kmodtool +++ b/scripts/kmodtool @@ -1,625 +1,625 @@ #!/usr/bin/env bash # shellcheck disable=SC2086 # kmodtool - Helper script for building kernel module RPMs # Copyright (c) 2003-2012 Ville Skyttä , # Thorsten Leemhuis # Nicolas Chauvet # # Permission is hereby granted, free of charge, to any person obtaining # a copy of this software and associated documentation files (the # "Software"), to deal in the Software without restriction, including # without limitation the rights to use, copy, modify, merge, publish, # distribute, sublicense, and/or sell copies of the Software, and to # permit persons to whom the Software is furnished to do so, subject to # the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE # LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION # OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION # WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. shopt -s extglob myprog="kmodtool-${repo}" myver="0.12.1" kmodname= build_kernels="current" kernels_known_variants= kernel_versions= kernel_versions_to_build_for= prefix= filterfile= target= buildroot= dashvariant= error_out() { local errorlevel=${1} shift echo "Error: $*" >&2 # the next line is not multi-line safe -- not needed *yet* echo "%global kmodtool_check echo \"kmodtool error: $*\"; exit ${errorlevel};" exit "${errorlevel}" } print_rpmtemplate_header() { echo echo '%global kmodinstdir_prefix '${prefix}/lib/modules/ echo '%global kmodinstdir_postfix '/extra/${kmodname}/ echo '%global kernel_versions '${kernel_versions} echo } print_akmodtemplate () { echo cat <= %{?epoch:%{epoch}:}%{version} Provides: ${kmodname}-kmod = %{?epoch:%{epoch}:}%{version}-%{release} EOF if [[ ${obsolete_name} ]]; then echo "Provides: akmod-${obsolete_name} = ${obsolete_version}" echo "Obsoletes: akmod-${obsolete_name} < ${obsolete_version}" fi cat < /dev/null & %files -n akmod-${kmodname} %defattr(-,root,root,-) %{_usrsrc}/akmods/* EOF } print_akmodmeta () { cat <= %{?epoch:%{epoch}:}%{version} %if 0%{?rhel} == 6 || 0%{?centos} == 6 Requires(post): module-init-tools Requires(postun): module-init-tools %else Requires(post): kmod Requires(postun): kmod %endif EOF if [[ ${obsolete_name} ]]; then echo "Provides: kmod-${obsolete_name}-${kernel_uname_r} = ${obsolete_version}" echo "Obsoletes: kmod-${obsolete_name}-${kernel_uname_r} < ${obsolete_version}" fi # second part if [[ ! "${customkernel}" ]]; then cat < /dev/null || : elif [[ -f "/lib/modules/${kernel_uname_r}/System.map" ]]; then ${prefix}${depmod_path} -aeF /lib/modules/${kernel_uname_r}/System.map ${kernel_uname_r} > /dev/null || : else ${prefix}${depmod_path} -ae ${kernel_uname_r} &> /dev/null || : fi %postun -n kmod-${kmodname}-${kernel_uname_r} if [[ -f "/boot/System.map-${kernel_uname_r}" ]]; then ${prefix}${depmod_path} -aF /boot/System.map-${kernel_uname_r} ${kernel_uname_r} &> /dev/null || : elif [[ -f "/lib/modules/${kernel_uname_r}/System.map" ]]; then ${prefix}${depmod_path} -aF /lib/modules/${kernel_uname_r}/System.map ${kernel_uname_r} &> /dev/null || : else ${prefix}${depmod_path} -a ${kernel_uname_r} &> /dev/null || : fi EOF else cat < /dev/null || : %postun -n kmod-${kmodname}-${kernel_uname_r} [[ "\$(uname -r)" == "${kernel_uname_r}" ]] && ${prefix}${depmod_path} -a > /dev/null || : EOF fi # third part cat <= %{?epoch:%{epoch}:}%{version}-%{release}" fi if [[ ${obsolete_name} ]]; then echo "Provides: kmod-${obsolete_name}-devel = ${obsolete_version}" echo "Obsoletes: kmod-${obsolete_name}-devel < ${obsolete_version}" fi cat < objects for the newest kernel. %files -n kmod-${kmodname}-devel %defattr(644,root,root,755) %{_usrsrc}/${kmodname}-%{version} EOF if [[ ${obsolete_name} ]]; then echo "%{_usrsrc}/${obsolete_name}-%{version}" fi for kernel in ${1}; do local kernel_uname_r=${kernel} echo "%exclude %{_usrsrc}/${kmodname}-%{version}/${kernel_uname_r}" if [[ ${obsolete_name} ]]; then echo "%exclude %{_usrsrc}/${obsolete_name}-%{version}/${kernel_uname_r}" fi done echo echo } print_rpmtemplate_per_kmoddevelpkg () { if [[ "${1}" == "--custom" ]]; then shift local customkernel=true elif [[ "${1}" == "--redhat" ]]; then # this is needed for akmods shift local redhatkernel=true fi local kernel_uname_r=${1} local kernel_variant="${2:+-${2}}" # first part cat <= %{?epoch:%{epoch}:}%{version}-%{release} %{?KmodsMetaRequires:Requires: %{?KmodsMetaRequires}} EOF if [[ ${obsolete_name} ]]; then echo "Provides: kmod-${obsolete_name}${kernel_variant} = ${obsolete_version}" echo "Obsoletes: kmod-${obsolete_name}${kernel_variant} < ${obsolete_version}" fi cat < -- filter the results with grep --file " echo " --for-kernels -- created templates only for these kernels" echo " --kmodname -- name of the kmod (required)" echo " --devel -- make kmod-devel package" echo " --noakmod -- no akmod package" echo " --repo -- use buildsys-build--kerneldevpkgs" echo " --target -- target-arch (required)" echo " --buildroot -- Build root (place to look for build files)" } while [ "${1}" ] ; do case "${1}" in --filterfile) shift if [[ ! "${1}" ]] ; then error_out 2 "Please provide path to a filter-file together with --filterfile" >&2 elif [[ ! -e "${1}" ]]; then error_out 2 "Filterfile ${1} not found" >&2 fi filterfile="${1}" shift ;; --kmodname) shift if [[ ! "${1}" ]] ; then error_out 2 "Please provide the name of the kmod together with --kmodname" >&2 fi # strip pending -kmod kmodname="${1%%-kmod}" shift ;; --devel) shift devel="true" ;; --prefix) shift if [[ ! "${1}" ]] ; then error_out 2 "Please provide a prefix with --prefix" >&2 fi prefix="${1}" shift ;; --repo) shift if [[ ! "${1}" ]] ; then error_out 2 "Please provide the name of the repo together with --repo" >&2 fi repo=${1} shift ;; --for-kernels) shift if [[ ! "${1}" ]] ; then error_out 2 "Please provide the name of the kmod together with --kmodname" >&2 fi for_kernels="${1}" shift ;; --noakmod) shift noakmod="true" ;; --obsolete-name) shift if [[ ! "${1}" ]] ; then error_out 2 "Please provide the name of the kmod to obsolete together with --obsolete-name" >&2 fi obsolete_name="${1}" shift ;; --obsolete-version) shift if [[ ! "${1}" ]] ; then error_out 2 "Please provide the version of the kmod to obsolete together with --obsolete-version" >&2 fi obsolete_version="${1}" shift ;; --target) shift target="${1}" shift ;; --akmod) shift build_kernels="akmod" ;; --newest) shift build_kernels="newest" ;; --current) shift build_kernels="current" ;; --buildroot) shift buildroot="${1}" shift ;; --help) myprog_help exit 0 ;; --version) echo "${myprog} ${myver}" exit 0 ;; *) echo "Error: Unknown option '${1}'." >&2 usage >&2 exit 2 ;; esac done if [[ -e ./kmodtool-kernel-variants ]]; then kernels_known_variants="$(cat ./kmodtool-kernel-variants)" elif [[ -e /usr/share/kmodtool/kernel-variants ]] ; then kernels_known_variants="$(cat /usr/share/kmodtool/kernel-variants)" else kernels_known_variants="@(smp?(-debug)|PAE?(-debug)|debug|kdump|xen|kirkwood|highbank|imx|omap|tegra)" fi # general sanity checks if [[ ! "${target}" ]]; then error_out 2 "please pass target arch with --target" elif [[ ! "${kmodname}" ]]; then error_out 2 "please pass kmodname with --kmodname" elif [[ ! "${kernels_known_variants}" ]] ; then error_out 2 "could not determine known variants" elif { [[ "${obsolete_name}" ]] && [[ ! "${obsolete_version}" ]]; } || { [[ ! "${obsolete_name}" ]] && [[ "${obsolete_version}" ]]; } ; then error_out 2 "you need to provide both --obsolete-name and --obsolete-version" fi # go if [[ "${for_kernels}" ]]; then # this is easy: print_customrpmtemplate "${for_kernels}" elif [[ "${build_kernels}" == "akmod" ]]; then # do only a akmod package print_akmodtemplate print_akmodmeta else # seems we are on out own to decide for which kernels to build # we need more sanity checks in this case if [[ ! "${repo}" ]]; then error_out 2 "please provide repo name with --repo" elif ! command -v "buildsys-build-${repo}-kerneldevpkgs" &> /dev/null ; then error_out 2 "buildsys-build-${repo}-kerneldevpkgs not found" fi # call buildsys-build-${repo}-kerneldevpkgs to get the list of kernels cmdoptions="--target ${target}" # filterfile to filter list of kernels? if [[ "${filterfile}" ]] ; then cmdoptions="${cmdoptions} --filterfile ${filterfile}" fi kernel_versions_to_build_for="$(buildsys-build-${repo}-kerneldevpkgs --${build_kernels} ${cmdoptions})" returncode=$? if (( returncode != 0 )); then error_out 2 "buildsys-build-${repo}-kerneldevpkgs failed: $(buildsys-build-${repo}-kerneldevpkgs --${build_kernels} ${cmdoptions})" fi if [[ "${build_kernels}" == "current" ]] && [[ ! "${noakmod}" ]]; then print_akmodtemplate fi print_rpmtemplate fi diff --git a/scripts/zfs-tests.sh b/scripts/zfs-tests.sh index 60499e09e249..f871a51d34c2 100755 --- a/scripts/zfs-tests.sh +++ b/scripts/zfs-tests.sh @@ -1,748 +1,748 @@ #!/bin/sh # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License, Version 1.0 only # (the "License"). You may not use this file except in compliance # with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright 2020 OmniOS Community Edition (OmniOSce) Association. # BASE_DIR=$(dirname "$0") SCRIPT_COMMON=common.sh if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then . "${BASE_DIR}/${SCRIPT_COMMON}" else echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 fi PROG=zfs-tests.sh VERBOSE="no" QUIET="" CLEANUP="yes" CLEANUPALL="no" LOOPBACK="yes" STACK_TRACER="no" FILESIZE="4G" DEFAULT_RUNFILES="common.run,$(uname | tr '[:upper:]' '[:lower:]').run" RUNFILES=${RUNFILES:-$DEFAULT_RUNFILES} FILEDIR=${FILEDIR:-/var/tmp} DISKS=${DISKS:-""} SINGLETEST="" SINGLETESTUSER="root" TAGS="" ITERATIONS=1 ZFS_DBGMSG="$STF_SUITE/callbacks/zfs_dbgmsg.ksh" ZFS_DMESG="$STF_SUITE/callbacks/zfs_dmesg.ksh" UNAME=$(uname -s) RERUN="" # Override some defaults if on FreeBSD if [ "$UNAME" = "FreeBSD" ] ; then TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DMESG"} LOSETUP=/sbin/mdconfig DMSETUP=/sbin/gpart else ZFS_MMP="$STF_SUITE/callbacks/zfs_mmp.ksh" TESTFAIL_CALLBACKS=${TESTFAIL_CALLBACKS:-"$ZFS_DBGMSG:$ZFS_DMESG:$ZFS_MMP"} LOSETUP=${LOSETUP:-/sbin/losetup} DMSETUP=${DMSETUP:-/sbin/dmsetup} fi # # Log an informational message when additional verbosity is enabled. # msg() { if [ "$VERBOSE" = "yes" ]; then echo "$@" fi } # # Log a failure message, cleanup, and return an error. # fail() { echo "$PROG: $1" >&2 cleanup exit 1 } cleanup_freebsd_loopback() { for TEST_LOOPBACK in ${LOOPBACKS}; do if [ -c "/dev/${TEST_LOOPBACK}" ]; then sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}" || echo "Failed to destroy: ${TEST_LOOPBACK}" fi done } cleanup_linux_loopback() { for TEST_LOOPBACK in ${LOOPBACKS}; do - LOOP_DEV=$(basename "$TEST_LOOPBACK") + LOOP_DEV="${TEST_LOOPBACK##*/}" DM_DEV=$(sudo "${DMSETUP}" ls 2>/dev/null | \ grep "${LOOP_DEV}" | cut -f1) if [ -n "$DM_DEV" ]; then sudo "${DMSETUP}" remove "${DM_DEV}" || echo "Failed to remove: ${DM_DEV}" fi if [ -n "${TEST_LOOPBACK}" ]; then sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" || echo "Failed to remove: ${TEST_LOOPBACK}" fi done } # # Attempt to remove loopback devices and files which where created earlier # by this script to run the test framework. The '-k' option may be passed # to the script to suppress cleanup for debugging purposes. # cleanup() { if [ "$CLEANUP" = "no" ]; then return 0 fi if [ "$LOOPBACK" = "yes" ]; then if [ "$UNAME" = "FreeBSD" ] ; then cleanup_freebsd_loopback else cleanup_linux_loopback fi fi for TEST_FILE in ${FILES}; do rm -f "${TEST_FILE}" >/dev/null 2>&1 done if [ "$STF_PATH_REMOVE" = "yes" ] && [ -d "$STF_PATH" ]; then rm -Rf "$STF_PATH" fi } trap cleanup EXIT # # Attempt to remove all testpools (testpool.XXX), unopened dm devices, # loopback devices, and files. This is a useful way to cleanup a previous # test run failure which has left the system in an unknown state. This can # be dangerous and should only be used in a dedicated test environment. # cleanup_all() { TEST_POOLS=$(sudo "$ZPOOL" list -H -o name | grep testpool) if [ "$UNAME" = "FreeBSD" ] ; then TEST_LOOPBACKS=$(sudo "${LOSETUP}" -l) else TEST_LOOPBACKS=$(sudo "${LOSETUP}" -a|grep file-vdev|cut -f1 -d:) fi TEST_FILES=$(ls /var/tmp/file-vdev* 2>/dev/null) msg msg "--- Cleanup ---" msg "Removing pool(s): $(echo "${TEST_POOLS}" | tr '\n' ' ')" for TEST_POOL in $TEST_POOLS; do sudo "$ZPOOL" destroy "${TEST_POOL}" done if [ "$UNAME" != "FreeBSD" ] ; then msg "Removing dm(s): $(sudo "${DMSETUP}" ls | grep loop | tr '\n' ' ')" sudo "${DMSETUP}" remove_all fi msg "Removing loopback(s): $(echo "${TEST_LOOPBACKS}" | tr '\n' ' ')" for TEST_LOOPBACK in $TEST_LOOPBACKS; do if [ "$UNAME" = "FreeBSD" ] ; then sudo "${LOSETUP}" -d -u "${TEST_LOOPBACK}" else sudo "${LOSETUP}" -d "${TEST_LOOPBACK}" fi done msg "Removing files(s): $(echo "${TEST_FILES}" | tr '\n' ' ')" for TEST_FILE in $TEST_FILES; do sudo rm -f "${TEST_FILE}" done } # # Takes a name as the only arguments and looks for the following variations # on that name. If one is found it is returned. # # $RUNFILE_DIR/ # $RUNFILE_DIR/.run # # .run # find_runfile() { NAME=$1 RESULT="" if [ -f "$RUNFILE_DIR/$NAME" ]; then RESULT="$RUNFILE_DIR/$NAME" elif [ -f "$RUNFILE_DIR/$NAME.run" ]; then RESULT="$RUNFILE_DIR/$NAME.run" elif [ -f "$NAME" ]; then RESULT="$NAME" elif [ -f "$NAME.run" ]; then RESULT="$NAME.run" fi echo "$RESULT" } # # Symlink file if it appears under any of the given paths. # create_links() { dir_list="$1" file_list="$2" [ -n "$STF_PATH" ] || fail "STF_PATH wasn't correctly set" for i in $file_list; do for j in $dir_list; do [ ! -e "$STF_PATH/$i" ] || continue if [ ! -d "$j/$i" ] && [ -e "$j/$i" ]; then ln -sf "$j/$i" "$STF_PATH/$i" || \ fail "Couldn't link $i" break fi done [ ! -e "$STF_PATH/$i" ] && \ STF_MISSING_BIN="$STF_MISSING_BIN $i" done STF_MISSING_BIN=${STF_MISSING_BIN# } } # # Constrain the path to limit the available binaries to a known set. # When running in-tree a top level ./bin/ directory is created for # convenience, otherwise a temporary directory is used. # constrain_path() { . "$STF_SUITE/include/commands.cfg" # On FreeBSD, base system zfs utils are in /sbin and OpenZFS utils # install to /usr/local/sbin. To avoid testing the wrong utils we # need /usr/local to come before / in the path search order. SYSTEM_DIRS="/usr/local/bin /usr/local/sbin" SYSTEM_DIRS="$SYSTEM_DIRS /usr/bin /usr/sbin /bin /sbin $LIBEXEC_DIR" if [ "$INTREE" = "yes" ]; then # Constrained path set to ./zfs/bin/ STF_PATH="$BIN_DIR" STF_PATH_REMOVE="no" STF_MISSING_BIN="" if [ ! -d "$STF_PATH" ]; then mkdir "$STF_PATH" chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH" fi # Special case links for standard zfs utilities DIRS="$(find "$CMD_DIR" -type d \( ! -name .deps -a \ ! -name .libs \) -print | tr '\n' ' ')" create_links "$DIRS" "$ZFS_FILES" # Special case links for zfs test suite utilities DIRS="$(find "$STF_SUITE" -type d \( ! -name .deps -a \ ! -name .libs \) -print | tr '\n' ' ')" create_links "$DIRS" "$ZFSTEST_FILES" else # Constrained path set to /var/tmp/constrained_path.* SYSTEMDIR=${SYSTEMDIR:-/var/tmp/constrained_path.XXXXXX} STF_PATH=$(mktemp -d "$SYSTEMDIR") STF_PATH_REMOVE="yes" STF_MISSING_BIN="" chmod 755 "$STF_PATH" || fail "Couldn't chmod $STF_PATH" # Special case links for standard zfs utilities create_links "$SYSTEM_DIRS" "$ZFS_FILES" # Special case links for zfs test suite utilities create_links "$STF_SUITE/bin" "$ZFSTEST_FILES" fi # Standard system utilities SYSTEM_FILES="$SYSTEM_FILES_COMMON" if [ "$UNAME" = "FreeBSD" ] ; then SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_FREEBSD" else SYSTEM_FILES="$SYSTEM_FILES $SYSTEM_FILES_LINUX" fi create_links "$SYSTEM_DIRS" "$SYSTEM_FILES" # Exceptions ln -fs "$STF_PATH/awk" "$STF_PATH/nawk" if [ "$UNAME" = "Linux" ] ; then ln -fs /sbin/fsck.ext4 "$STF_PATH/fsck" ln -fs /sbin/mkfs.ext4 "$STF_PATH/newfs" ln -fs "$STF_PATH/gzip" "$STF_PATH/compress" ln -fs "$STF_PATH/gunzip" "$STF_PATH/uncompress" ln -fs "$STF_PATH/exportfs" "$STF_PATH/share" ln -fs "$STF_PATH/exportfs" "$STF_PATH/unshare" elif [ "$UNAME" = "FreeBSD" ] ; then ln -fs /usr/local/bin/ksh93 "$STF_PATH/ksh" fi } # # Output a useful usage message. # usage() { cat << EOF USAGE: $0 [-hvqxkfS] [-s SIZE] [-r RUNFILES] [-t PATH] [-u USER] DESCRIPTION: ZFS Test Suite launch script OPTIONS: -h Show this message -v Verbose zfs-tests.sh output -q Quiet test-runner output -x Remove all testpools, dm, lo, and files (unsafe) -k Disable cleanup after test failure -f Use files only, disables block device tests -S Enable stack tracer (negative performance impact) -c Only create and populate constrained path -R Automatically rerun failing tests -n NFSFILE Use the nfsfile to determine the NFS configuration -I NUM Number of iterations -d DIR Use DIR for files and loopback devices -s SIZE Use vdevs of SIZE (default: 4G) -r RUNFILES Run tests in RUNFILES (default: ${DEFAULT_RUNFILES}) -t PATH Run single test at PATH relative to test suite -T TAGS Comma separated list of tags (default: 'functional') -u USER Run single test as USER (default: root) EXAMPLES: # Run the default (linux) suite of tests and output the configuration used. $0 -v # Run a smaller suite of tests designed to run more quickly. $0 -r linux-fast # Run a single test $0 -t tests/functional/cli_root/zfs_bookmark/zfs_bookmark_cliargs.ksh # Cleanup a previous run of the test suite prior to testing, run the # default (linux) suite of tests and perform no cleanup on exit. $0 -x EOF } while getopts 'hvqxkfScRn:d:s:r:?t:T:u:I:' OPTION; do case $OPTION in h) usage exit 1 ;; v) VERBOSE="yes" ;; q) QUIET="yes" ;; x) CLEANUPALL="yes" ;; k) CLEANUP="no" ;; f) LOOPBACK="no" ;; S) STACK_TRACER="yes" ;; c) constrain_path exit ;; R) RERUN="yes" ;; n) nfsfile=$OPTARG [ -f "$nfsfile" ] || fail "Cannot read file: $nfsfile" export NFS=1 . "$nfsfile" ;; d) FILEDIR="$OPTARG" ;; I) ITERATIONS="$OPTARG" if [ "$ITERATIONS" -le 0 ]; then fail "Iterations must be greater than 0." fi ;; s) FILESIZE="$OPTARG" ;; r) RUNFILES="$OPTARG" ;; t) if [ -n "$SINGLETEST" ]; then fail "-t can only be provided once." fi SINGLETEST="$OPTARG" ;; T) TAGS="$OPTARG" ;; u) SINGLETESTUSER="$OPTARG" ;; ?) usage exit ;; esac done shift $((OPTIND-1)) FILES=${FILES:-"$FILEDIR/file-vdev0 $FILEDIR/file-vdev1 $FILEDIR/file-vdev2"} LOOPBACKS=${LOOPBACKS:-""} if [ -n "$SINGLETEST" ]; then if [ -n "$TAGS" ]; then fail "-t and -T are mutually exclusive." fi RUNFILE_DIR="/var/tmp" RUNFILES="zfs-tests.$$.run" SINGLEQUIET="False" if [ -n "$QUIET" ]; then SINGLEQUIET="True" fi cat >$RUNFILE_DIR/$RUNFILES << EOF [DEFAULT] pre = quiet = $SINGLEQUIET pre_user = root user = $SINGLETESTUSER timeout = 600 post_user = root post = outputdir = /var/tmp/test_results EOF SINGLETESTDIR=$(dirname "$SINGLETEST") SINGLETESTFILE=$(basename "$SINGLETEST") SETUPSCRIPT= CLEANUPSCRIPT= if [ -f "$STF_SUITE/$SINGLETESTDIR/setup.ksh" ]; then SETUPSCRIPT="setup" fi if [ -f "$STF_SUITE/$SINGLETESTDIR/cleanup.ksh" ]; then CLEANUPSCRIPT="cleanup" fi cat >>$RUNFILE_DIR/$RUNFILES << EOF [$SINGLETESTDIR] tests = ['$SINGLETESTFILE'] pre = $SETUPSCRIPT post = $CLEANUPSCRIPT tags = ['functional'] EOF fi # # Use default tag if none was specified # TAGS=${TAGS:='functional'} # # Attempt to locate the runfiles describing the test workload. # R="" IFS=, for RUNFILE in $RUNFILES; do if [ -n "$RUNFILE" ]; then SAVED_RUNFILE="$RUNFILE" RUNFILE=$(find_runfile "$RUNFILE") [ -z "$RUNFILE" ] && fail "Cannot find runfile: $SAVED_RUNFILE" R="$R,$RUNFILE" fi if [ ! -r "$RUNFILE" ]; then fail "Cannot read runfile: $RUNFILE" fi done unset IFS RUNFILES=${R#,} # # This script should not be run as root. Instead the test user, which may # be a normal user account, needs to be configured such that it can # run commands via sudo passwordlessly. # if [ "$(id -u)" = "0" ]; then fail "This script must not be run as root." fi if [ "$(sudo whoami)" != "root" ]; then fail "Passwordless sudo access required." fi # # Constrain the available binaries to a known set. # constrain_path # # Check if ksh exists # if [ "$UNAME" = "FreeBSD" ]; then sudo ln -fs /usr/local/bin/ksh93 /bin/ksh fi [ -e "$STF_PATH/ksh" ] || fail "This test suite requires ksh." [ -e "$STF_SUITE/include/default.cfg" ] || fail \ "Missing $STF_SUITE/include/default.cfg file." # # Verify the ZFS module stack is loaded. # if [ "$STACK_TRACER" = "yes" ]; then sudo "${ZFS_SH}" -S >/dev/null 2>&1 else sudo "${ZFS_SH}" >/dev/null 2>&1 fi # # Attempt to cleanup all previous state for a new test run. # if [ "$CLEANUPALL" = "yes" ]; then cleanup_all fi # # By default preserve any existing pools # NOTE: Since 'zpool list' outputs a newline-delimited list convert $KEEP from # space-delimited to newline-delimited. # if [ -z "${KEEP}" ]; then KEEP="$(sudo "$ZPOOL" list -H -o name)" if [ -z "${KEEP}" ]; then KEEP="rpool" fi else KEEP="$(echo "$KEEP" | tr '[:blank:]' '\n')" fi # # NOTE: The following environment variables are undocumented # and should be used for testing purposes only: # # __ZFS_POOL_EXCLUDE - don't iterate over the pools it lists # __ZFS_POOL_RESTRICT - iterate only over the pools it lists # # See libzfs/libzfs_config.c for more information. # if [ "$UNAME" = "FreeBSD" ] ; then __ZFS_POOL_EXCLUDE="$(echo "$KEEP" | tr -s '\n' ' ')" else __ZFS_POOL_EXCLUDE="$(echo "$KEEP" | sed ':a;N;s/\n/ /g;ba')" fi . "$STF_SUITE/include/default.cfg" # # No DISKS have been provided so a basic file or loopback based devices # must be created for the test suite to use. # if [ -z "${DISKS}" ]; then # # If this is a performance run, prevent accidental use of # loopback devices. # [ "$TAGS" = "perf" ] && fail "Running perf tests without disks." # # Create sparse files for the test suite. These may be used # directory or have loopback devices layered on them. # for TEST_FILE in ${FILES}; do [ -f "$TEST_FILE" ] && fail "Failed file exists: ${TEST_FILE}" truncate -s "${FILESIZE}" "${TEST_FILE}" || fail "Failed creating: ${TEST_FILE} ($?)" done # # If requested setup loopback devices backed by the sparse files. # if [ "$LOOPBACK" = "yes" ]; then test -x "$LOSETUP" || fail "$LOSETUP utility must be installed" for TEST_FILE in ${FILES}; do if [ "$UNAME" = "FreeBSD" ] ; then MDDEVICE=$(sudo "${LOSETUP}" -a -t vnode -f "${TEST_FILE}") if [ -z "$MDDEVICE" ] ; then fail "Failed: ${TEST_FILE} -> loopback" fi DISKS="$DISKS $MDDEVICE" LOOPBACKS="$LOOPBACKS $MDDEVICE" else TEST_LOOPBACK=$(sudo "${LOSETUP}" -f) sudo "${LOSETUP}" "${TEST_LOOPBACK}" "${TEST_FILE}" || fail "Failed: ${TEST_FILE} -> ${TEST_LOOPBACK}" - BASELOOPBACK=$(basename "$TEST_LOOPBACK") + BASELOOPBACK="${TEST_LOOPBACK##*/}" DISKS="$DISKS $BASELOOPBACK" LOOPBACKS="$LOOPBACKS $TEST_LOOPBACK" fi done DISKS=${DISKS# } LOOPBACKS=${LOOPBACKS# } else DISKS="$FILES" fi fi # # It may be desirable to test with fewer disks than the default when running # the performance tests, but the functional tests require at least three. # NUM_DISKS=$(echo "${DISKS}" | awk '{print NF}') if [ "$TAGS" != "perf" ]; then [ "$NUM_DISKS" -lt 3 ] && fail "Not enough disks ($NUM_DISKS/3 minimum)" fi # # Disable SELinux until the ZFS Test Suite has been updated accordingly. # if [ -x "$STF_PATH/setenforce" ]; then sudo setenforce permissive >/dev/null 2>&1 fi # # Enable internal ZFS debug log and clear it. # if [ -e /sys/module/zfs/parameters/zfs_dbgmsg_enable ]; then sudo /bin/sh -c "echo 1 >/sys/module/zfs/parameters/zfs_dbgmsg_enable" sudo /bin/sh -c "echo 0 >/proc/spl/kstat/zfs/dbgmsg" fi msg msg "--- Configuration ---" msg "Runfiles: $RUNFILES" msg "STF_TOOLS: $STF_TOOLS" msg "STF_SUITE: $STF_SUITE" msg "STF_PATH: $STF_PATH" msg "FILEDIR: $FILEDIR" msg "FILES: $FILES" msg "LOOPBACKS: $LOOPBACKS" msg "DISKS: $DISKS" msg "NUM_DISKS: $NUM_DISKS" msg "FILESIZE: $FILESIZE" msg "ITERATIONS: $ITERATIONS" msg "TAGS: $TAGS" msg "STACK_TRACER: $STACK_TRACER" msg "Keep pool(s): $KEEP" msg "Missing util(s): $STF_MISSING_BIN" msg "" export STF_TOOLS export STF_SUITE export STF_PATH export DISKS export FILEDIR export KEEP export __ZFS_POOL_EXCLUDE export TESTFAIL_CALLBACKS export PATH=$STF_PATH if [ "$UNAME" = "FreeBSD" ] ; then mkdir -p "$FILEDIR" || true RESULTS_FILE=$(mktemp -u "${FILEDIR}/zts-results.XXXXXX") REPORT_FILE=$(mktemp -u "${FILEDIR}/zts-report.XXXXXX") else RESULTS_FILE=$(mktemp -u -t zts-results.XXXXXX -p "$FILEDIR") REPORT_FILE=$(mktemp -u -t zts-report.XXXXXX -p "$FILEDIR") fi # # Run all the tests as specified. # msg "${TEST_RUNNER} ${QUIET:+-q}" \ "-c \"${RUNFILES}\"" \ "-T \"${TAGS}\"" \ "-i \"${STF_SUITE}\"" \ "-I \"${ITERATIONS}\"" ${TEST_RUNNER} ${QUIET:+-q} \ -c "${RUNFILES}" \ -T "${TAGS}" \ -i "${STF_SUITE}" \ -I "${ITERATIONS}" \ 2>&1 | tee "$RESULTS_FILE" # # Analyze the results. # ${ZTS_REPORT} ${RERUN:+--no-maybes} "$RESULTS_FILE" >"$REPORT_FILE" RESULT=$? if [ "$RESULT" -eq "2" ] && [ -n "$RERUN" ]; then MAYBES="$($ZTS_REPORT --list-maybes)" TEMP_RESULTS_FILE=$(mktemp -u -t zts-results-tmp.XXXXX -p "$FILEDIR") TEST_LIST=$(mktemp -u -t test-list.XXXXX -p "$FILEDIR") grep "^Test:.*\[FAIL\]" "$RESULTS_FILE" >"$TEMP_RESULTS_FILE" for test_name in $MAYBES; do grep "$test_name " "$TEMP_RESULTS_FILE" >>"$TEST_LIST" done ${TEST_RUNNER} ${QUIET:+-q} \ -c "${RUNFILES}" \ -T "${TAGS}" \ -i "${STF_SUITE}" \ -I "${ITERATIONS}" \ -l "${TEST_LIST}" \ 2>&1 | tee "$RESULTS_FILE" # # Analyze the results. # ${ZTS_REPORT} --no-maybes "$RESULTS_FILE" >"$REPORT_FILE" RESULT=$? fi cat "$REPORT_FILE" RESULTS_DIR=$(awk '/^Log directory/ { print $3 }' "$RESULTS_FILE") if [ -d "$RESULTS_DIR" ]; then cat "$RESULTS_FILE" "$REPORT_FILE" >"$RESULTS_DIR/results" fi rm -f "$RESULTS_FILE" "$REPORT_FILE" if [ -n "$SINGLETEST" ]; then rm -f "$RUNFILES" >/dev/null 2>&1 fi exit ${RESULT} diff --git a/scripts/zfs.sh b/scripts/zfs.sh index 7870b8930cab..940c83ffa28f 100755 --- a/scripts/zfs.sh +++ b/scripts/zfs.sh @@ -1,288 +1,291 @@ #!/bin/sh # # A simple script to load/unload the ZFS module stack. # BASE_DIR=$(dirname "$0") SCRIPT_COMMON=common.sh if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then . "${BASE_DIR}/${SCRIPT_COMMON}" else echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 fi PROG=zfs.sh VERBOSE="no" UNLOAD="no" LOAD="yes" STACK_TRACER="no" ZED_PIDFILE=${ZED_PIDFILE:-/var/run/zed.pid} LDMOD=${LDMOD:-/sbin/modprobe} KMOD_ZLIB_DEFLATE=${KMOD_ZLIB_DEFLATE:-zlib_deflate} KMOD_ZLIB_INFLATE=${KMOD_ZLIB_INFLATE:-zlib_inflate} KMOD_SPL=${KMOD_SPL:-spl} KMOD_ZAVL=${KMOD_ZAVL:-zavl} KMOD_ZNVPAIR=${KMOD_ZNVPAIR:-znvpair} KMOD_ZUNICODE=${KMOD_ZUNICODE:-zunicode} KMOD_ZCOMMON=${KMOD_ZCOMMON:-zcommon} KMOD_ZLUA=${KMOD_ZLUA:-zlua} KMOD_ICP=${KMOD_ICP:-icp} KMOD_ZFS=${KMOD_ZFS:-zfs} KMOD_FREEBSD=${KMOD_FREEBSD:-openzfs} KMOD_ZZSTD=${KMOD_ZZSTD:-zzstd} usage() { cat << EOF USAGE: $0 [hvudS] [module-options] DESCRIPTION: Load/unload the ZFS module stack. OPTIONS: -h Show this message -v Verbose -r Reload modules -u Unload modules -S Enable kernel stack tracer EOF } while getopts 'hvruS' OPTION; do case $OPTION in h) usage exit 1 ;; v) VERBOSE="yes" ;; r) UNLOAD="yes" LOAD="yes" ;; u) UNLOAD="yes" LOAD="no" ;; S) STACK_TRACER="yes" ;; ?) usage exit ;; esac done kill_zed() { if [ -f "$ZED_PIDFILE" ]; then PID=$(cat "$ZED_PIDFILE") kill "$PID" fi } check_modules_linux() { LOADED_MODULES="" MISSING_MODULES="" for KMOD in $KMOD_SPL $KMOD_ZAVL $KMOD_ZNVPAIR $KMOD_ZUNICODE $KMOD_ZCOMMON \ $KMOD_ZLUA $KMOD_ZZSTD $KMOD_ICP $KMOD_ZFS; do - NAME=$(basename "$KMOD" .ko) + NAME="${KMOD##*/}" + NAME="${NAME%.ko}" if lsmod | grep -E -q "^${NAME}"; then LOADED_MODULES="$LOADED_MODULES\t$NAME\n" fi if ! modinfo "$KMOD" >/dev/null 2>&1; then MISSING_MODULES="$MISSING_MODULES\t${KMOD}\n" fi done if [ -n "$LOADED_MODULES" ]; then printf "Unload the kernel modules by running '%s -u':\n" "$PROG" printf "%b" "$LOADED_MODULES" exit 1 fi if [ -n "$MISSING_MODULES" ]; then printf "The following kernel modules can not be found:\n" printf "%b" "$MISSING_MODULES" exit 1 fi return 0 } load_module_linux() { KMOD=$1 FILE=$(modinfo "$KMOD" | awk '/^filename:/ {print $2}') VERSION=$(modinfo "$KMOD" | awk '/^version:/ {print $2}') if [ "$VERBOSE" = "yes" ]; then echo "Loading: $FILE ($VERSION)" fi if ! $LDMOD "$KMOD" >/dev/null 2>&1; then echo "Failed to load $KMOD" return 1 fi return 0 } load_modules_freebsd() { kldload "$KMOD_FREEBSD" || return 1 if [ "$VERBOSE" = "yes" ]; then echo "Successfully loaded ZFS module stack" fi return 0 } load_modules_linux() { mkdir -p /etc/zfs if modinfo "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1; then modprobe "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1 fi if modinfo "$KMOD_ZLIB_INFLATE">/dev/null 2>&1; then modprobe "$KMOD_ZLIB_INFLATE" >/dev/null 2>&1 fi for KMOD in $KMOD_SPL $KMOD_ZAVL $KMOD_ZNVPAIR \ $KMOD_ZUNICODE $KMOD_ZCOMMON $KMOD_ZLUA $KMOD_ZZSTD \ $KMOD_ICP $KMOD_ZFS; do load_module_linux "$KMOD" || return 1 done if [ "$VERBOSE" = "yes" ]; then echo "Successfully loaded ZFS module stack" fi return 0 } unload_module_linux() { KMOD=$1 - NAME=$(basename "$KMOD" .ko) + NAME="${KMOD##*/}" + NAME="${NAME%.ko}" FILE=$(modinfo "$KMOD" | awk '/^filename:/ {print $2}') VERSION=$(modinfo "$KMOD" | awk '/^version:/ {print $2}') if [ "$VERBOSE" = "yes" ]; then echo "Unloading: $KMOD ($VERSION)" fi rmmod "$NAME" || echo "Failed to unload $NAME" return 0 } unload_modules_freebsd() { kldunload "$KMOD_FREEBSD" || echo "Failed to unload $KMOD_FREEBSD" if [ "$VERBOSE" = "yes" ]; then echo "Successfully unloaded ZFS module stack" fi return 0 } unload_modules_linux() { for KMOD in $KMOD_ZFS $KMOD_ICP $KMOD_ZZSTD $KMOD_ZLUA $KMOD_ZCOMMON \ $KMOD_ZUNICODE $KMOD_ZNVPAIR $KMOD_ZAVL $KMOD_SPL; do - NAME=$(basename "$KMOD" .ko) - USE_COUNT=$(lsmod | grep -E "^${NAME} " | awk '{print $3}') + NAME="${KMOD##*/}" + NAME="${NAME%.ko}" + USE_COUNT=$(lsmod | awk '/^'"${NAME}"'/ {print $3}') if [ "$USE_COUNT" = "0" ] ; then unload_module_linux "$KMOD" || return 1 elif [ "$USE_COUNT" != "" ] ; then echo "Module ${NAME} is still in use!" return 1 fi done if modinfo "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1; then modprobe -r "$KMOD_ZLIB_DEFLATE" >/dev/null 2>&1 fi if modinfo "$KMOD_ZLIB_INFLATE">/dev/null 2>&1; then modprobe -r "$KMOD_ZLIB_INFLATE" >/dev/null 2>&1 fi if [ "$VERBOSE" = "yes" ]; then echo "Successfully unloaded ZFS module stack" fi return 0 } stack_clear_linux() { STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size STACK_TRACER_ENABLED=/proc/sys/kernel/stack_tracer_enabled if [ "$STACK_TRACER" = "yes" ] && [ -e "$STACK_MAX_SIZE" ]; then echo 1 >"$STACK_TRACER_ENABLED" echo 0 >"$STACK_MAX_SIZE" fi } stack_check_linux() { STACK_MAX_SIZE=/sys/kernel/debug/tracing/stack_max_size STACK_TRACE=/sys/kernel/debug/tracing/stack_trace STACK_LIMIT=15362 if [ -e "$STACK_MAX_SIZE" ]; then STACK_SIZE=$(cat "$STACK_MAX_SIZE") if [ "$STACK_SIZE" -ge "$STACK_LIMIT" ]; then echo echo "Warning: max stack size $STACK_SIZE bytes" cat "$STACK_TRACE" fi fi } if [ "$(id -u)" != 0 ]; then echo "Must run as root" exit 1 fi UNAME=$(uname -s) if [ "$UNLOAD" = "yes" ]; then kill_zed umount -t zfs -a case $UNAME in FreeBSD) unload_modules_freebsd ;; Linux) stack_check_linux unload_modules_linux ;; esac fi if [ "$LOAD" = "yes" ]; then case $UNAME in FreeBSD) load_modules_freebsd ;; Linux) stack_clear_linux check_modules_linux load_modules_linux "$@" udevadm trigger udevadm settle ;; esac fi exit 0 diff --git a/scripts/zimport.sh b/scripts/zimport.sh index 0e9c01182b8b..03c766cf36c2 100755 --- a/scripts/zimport.sh +++ b/scripts/zimport.sh @@ -1,512 +1,512 @@ #!/usr/bin/env bash # # Verify that an assortment of known good reference pools can be imported # using different versions of OpenZFS code. # # By default references pools for the major ZFS implementation will be # checked against the most recent OpenZFS tags and the master development branch. # Alternate tags or branches may be verified with the '-s option. # Passing the keyword "installed" will instruct the script to test whatever # version is installed. # # Preferentially a reference pool is used for all tests. However, if one # does not exist and the pool-tag matches one of the src-tags then a new # reference pool will be created using binaries from that source build. # This is particularly useful when you need to test your changes before # opening a pull request. The keyword 'all' can be used as short hand # refer to all available reference pools. # # New reference pools may be added by placing a bzip2 compressed tarball # of the pool in the scripts/zfs-images directory and then passing # the -p option. To increase the test coverage reference pools # should be collected for all the major ZFS implementations. Having these # pools easily available is also helpful to the developers. # # Care should be taken to run these tests with a kernel supported by all # the listed tags. Otherwise build failure will cause false positives. # # # EXAMPLES: # # The following example will verify the zfs-0.6.2 tag, the master branch, # and the installed zfs version can correctly import the listed pools. # Note there is no reference pool available for master and installed but # because binaries are available one is automatically constructed. The # working directory is also preserved between runs (-k) preventing the # need to rebuild from source for multiple runs. # # zimport.sh -k -f /var/tmp/zimport \ # -s "zfs-0.6.2 master installed" \ # -p "zevo-1.1.1 zol-0.6.2 zol-0.6.2-173 master installed" # # ------------------------ OpenZFS Source Versions ---------------- # zfs-0.6.2 master 0.6.2-175_g36eb554 # ----------------------------------------------------------------- # Clone ZFS Local Local Skip # Build ZFS Pass Pass Skip # ----------------------------------------------------------------- # zevo-1.1.1 Pass Pass Pass # zol-0.6.2 Pass Pass Pass # zol-0.6.2-173 Fail Pass Pass # master Pass Pass Pass # installed Pass Pass Pass # BASE_DIR=$(dirname "$0") SCRIPT_COMMON=common.sh if [ -f "${BASE_DIR}/${SCRIPT_COMMON}" ]; then . "${BASE_DIR}/${SCRIPT_COMMON}" else echo "Missing helper script ${SCRIPT_COMMON}" && exit 1 fi PROG=zimport.sh SRC_TAGS="zfs-0.6.5.11 master" POOL_TAGS="all master" POOL_CREATE_OPTIONS= TEST_DIR=$(mktemp -u -d -p /var/tmp zimport.XXXXXXXX) KEEP="no" VERBOSE="no" COLOR="yes" REPO="https://github.com/openzfs" IMAGES_DIR="$SCRIPTDIR/zfs-images/" IMAGES_TAR="https://github.com/openzfs/zfs-images/tarball/master" ERROR=0 CONFIG_LOG="configure.log" CONFIG_OPTIONS=${CONFIG_OPTIONS:-""} MAKE_LOG="make.log" MAKE_OPTIONS=${MAKE_OPTIONS:-"-s -j$(nproc)"} COLOR_GREEN="\033[0;32m" COLOR_RED="\033[0;31m" COLOR_BROWN="\033[0;33m" COLOR_RESET="\033[0m" usage() { cat << EOF USAGE: zimport.sh [hvl] [-r repo] [-s src-tag] [-i pool-dir] [-p pool-tag] [-f path] [-o options] DESCRIPTION: ZPOOL import verification tests OPTIONS: -h Show this message -v Verbose -c No color -k Keep temporary directory -r Source repository ($REPO) -s ... Verify OpenZFS versions with the listed tags -i Pool image directory -p ... Verify pools created with the listed tags -f Temporary directory to use -o Additional options to pass to 'zpool create' EOF } while getopts 'hvckr:s:i:p:f:o:?' OPTION; do case $OPTION in h) usage exit 1 ;; v) VERBOSE="yes" ;; c) COLOR="no" ;; k) KEEP="yes" ;; r) REPO="$OPTARG" ;; s) SRC_TAGS="$OPTARG" ;; i) IMAGES_DIR="$OPTARG" ;; p) POOL_TAGS="$OPTARG" ;; f) TEST_DIR="$OPTARG" ;; o) POOL_CREATE_OPTIONS="$OPTARG" ;; ?) usage exit 1 ;; esac done # # Verify the module start is not loaded # if lsmod | grep zfs >/dev/null; then echo "ZFS modules must be unloaded" exit 1 fi # # Create a random directory tree of files and sub-directories to # to act as a copy source for the various regression tests. # populate() { local ROOT=$1 local MAX_DIR_SIZE=$2 local MAX_FILE_SIZE=$3 mkdir -p "$ROOT"/{a,b,c,d,e,f,g}/{h,i} DIRS=$(find "$ROOT") for DIR in $DIRS; do COUNT=$((RANDOM % MAX_DIR_SIZE)) for _ in $(seq $COUNT); do FILE=$(mktemp -p "$DIR") SIZE=$((RANDOM % MAX_FILE_SIZE)) dd if=/dev/urandom of="$FILE" bs=1k \ count="$SIZE" &>/dev/null done done return 0 } SRC_DIR=$(mktemp -d -p /var/tmp/ zfs.src.XXXXXXXX) trap 'rm -Rf "$SRC_DIR"' INT TERM EXIT populate "$SRC_DIR" 10 100 SRC_DIR="$TEST_DIR/src" SRC_DIR_ZFS="$SRC_DIR/zfs" if [ "$COLOR" = "no" ]; then COLOR_GREEN="" COLOR_BROWN="" COLOR_RED="" COLOR_RESET="" fi pass_nonewline() { echo -n -e "${COLOR_GREEN}Pass${COLOR_RESET}\t\t" } skip_nonewline() { echo -n -e "${COLOR_BROWN}Skip${COLOR_RESET}\t\t" } fail_nonewline() { echo -n -e "${COLOR_RED}Fail${COLOR_RESET}\t\t" } # # Log a failure message, cleanup, and return an error. # fail() { echo -e "$PROG: $1" >&2 $ZFS_SH -u >/dev/null 2>&1 exit 1 } # # Set several helper variables which are derived from a source tag. # # ZFS_TAG - The passed zfs-x.y.z tag # ZFS_DIR - The zfs directory name # ZFS_URL - The zfs github URL to fetch the tarball # src_set_vars() { local TAG=$1 ZFS_TAG="$TAG" ZFS_DIR="$SRC_DIR_ZFS/$ZFS_TAG" ZFS_URL="$REPO/zfs/tarball/$ZFS_TAG" if [ "$TAG" = "installed" ]; then ZPOOL_CMD=$(command -v zpool) ZFS_CMD=$(command -v zfs) ZFS_SH="/usr/share/zfs/zfs.sh" else ZPOOL_CMD="./cmd/zpool/zpool" ZFS_CMD="./cmd/zfs/zfs" ZFS_SH="./scripts/zfs.sh" fi } # # Set several helper variables which are derived from a pool name such # as zol-0.6.x, zevo-1.1.1, etc. These refer to example pools from various # ZFS implementations which are used to verify compatibility. # # POOL_TAG - The example pools name in scripts/zfs-images/. # POOL_BZIP - The full path to the example bzip2 compressed pool. # POOL_DIR - The top level test path for this pool. # POOL_DIR_PRISTINE - The directory containing a pristine version of the pool. # POOL_DIR_COPY - The directory containing a working copy of the pool. # POOL_DIR_SRC - Location of a source build if it exists for this pool. # pool_set_vars() { local TAG=$1 POOL_TAG=$TAG POOL_BZIP=$IMAGES_DIR/$POOL_TAG.tar.bz2 POOL_DIR=$TEST_DIR/pools/$POOL_TAG POOL_DIR_PRISTINE=$POOL_DIR/pristine POOL_DIR_COPY=$POOL_DIR/copy POOL_DIR_SRC="$SRC_DIR_ZFS/${POOL_TAG//zol/zfs}" } # # Construct a non-trivial pool given a specific version of the source. More # interesting pools provide better test coverage so this function should # extended as needed to create more realistic pools. # pool_create() { pool_set_vars "$1" src_set_vars "$1" if [ "$POOL_TAG" != "installed" ]; then cd "$POOL_DIR_SRC" || fail "Failed 'cd $POOL_DIR_SRC'" fi $ZFS_SH zfs="spa_config_path=$POOL_DIR_PRISTINE" || \ fail "Failed to load kmods" # Create a file vdev RAIDZ pool. truncate -s 1G \ "$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \ "$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4" || \ fail "Failed 'truncate -s 1G ...'" # shellcheck disable=SC2086 $ZPOOL_CMD create $POOL_CREATE_OPTIONS "$POOL_TAG" raidz \ "$POOL_DIR_PRISTINE/vdev1" "$POOL_DIR_PRISTINE/vdev2" \ "$POOL_DIR_PRISTINE/vdev3" "$POOL_DIR_PRISTINE/vdev4" || \ fail "Failed '$ZPOOL_CMD create $POOL_CREATE_OPTIONS $POOL_TAG ...'" # Create a pool/fs filesystem with some random contents. $ZFS_CMD create "$POOL_TAG/fs" || \ fail "Failed '$ZFS_CMD create $POOL_TAG/fs'" populate "/$POOL_TAG/fs/" 10 100 # Snapshot that filesystem, clone it, remove the files/dirs, # replace them with new files/dirs. $ZFS_CMD snap "$POOL_TAG/fs@snap" || \ fail "Failed '$ZFS_CMD snap $POOL_TAG/fs@snap'" $ZFS_CMD clone "$POOL_TAG/fs@snap" "$POOL_TAG/clone" || \ fail "Failed '$ZFS_CMD clone $POOL_TAG/fs@snap $POOL_TAG/clone'" # shellcheck disable=SC2086 rm -Rf /$POOL_TAG/clone/* populate "/$POOL_TAG/clone/" 10 100 # Scrub the pool, delay slightly, then export it. It is now # somewhat interesting for testing purposes. $ZPOOL_CMD scrub "$POOL_TAG" || \ fail "Failed '$ZPOOL_CMD scrub $POOL_TAG'" sleep 10 $ZPOOL_CMD export "$POOL_TAG" || \ fail "Failed '$ZPOOL_CMD export $POOL_TAG'" $ZFS_SH -u || fail "Failed to unload kmods" } # If the zfs-images directory doesn't exist fetch a copy from Github then # cache it in the $TEST_DIR and update $IMAGES_DIR. if [ ! -d "$IMAGES_DIR" ]; then IMAGES_DIR="$TEST_DIR/zfs-images" mkdir -p "$IMAGES_DIR" curl -sL "$IMAGES_TAR" | \ tar -xz -C "$IMAGES_DIR" --strip-components=1 || \ fail "Failed to download pool images" fi # Given the available images in the zfs-images directory substitute the # list of available images for the reserved keyword 'all'. for TAG in $POOL_TAGS; do if [ "$TAG" = "all" ]; then ALL_TAGS=$(echo "$IMAGES_DIR"/*.tar.bz2 | \ sed "s|$IMAGES_DIR/||g;s|.tar.bz2||g") NEW_TAGS="$NEW_TAGS $ALL_TAGS" else NEW_TAGS="$NEW_TAGS $TAG" fi done POOL_TAGS="$NEW_TAGS" if [ "$VERBOSE" = "yes" ]; then echo "---------------------------- Options ----------------------------" echo "VERBOSE=$VERBOSE" echo "KEEP=$KEEP" echo "REPO=$REPO" echo "SRC_TAGS=$SRC_TAGS" echo "POOL_TAGS=$POOL_TAGS" echo "PATH=$TEST_DIR" echo "POOL_CREATE_OPTIONS=$POOL_CREATE_OPTIONS" echo fi if [ ! -d "$TEST_DIR" ]; then mkdir -p "$TEST_DIR" fi if [ ! -d "$SRC_DIR" ]; then mkdir -p "$SRC_DIR" fi # Print a header for all tags which are being tested. echo "------------------------ OpenZFS Source Versions ----------------" printf "%-16s" " " for TAG in $SRC_TAGS; do src_set_vars "$TAG" if [ "$TAG" = "installed" ]; then ZFS_VERSION=$(modinfo zfs | awk '/version:/ { print $2; exit }') if [ -n "$ZFS_VERSION" ]; then printf "%-16s" "$ZFS_VERSION" else fail "ZFS is not installed" fi else printf "%-16s" "$TAG" fi done echo -e "\n-----------------------------------------------------------------" # # Attempt to generate the tarball from your local git repository, if that # fails then attempt to download the tarball from Github. # printf "%-16s" "Clone ZFS" for TAG in $SRC_TAGS; do src_set_vars "$TAG" if [ -d "$ZFS_DIR" ]; then skip_nonewline elif [ "$ZFS_TAG" = "installed" ]; then skip_nonewline else cd "$SRC_DIR" || fail "Failed 'cd $SRC_DIR'" if [ ! -d "$SRC_DIR_ZFS" ]; then mkdir -p "$SRC_DIR_ZFS" fi git archive --format=tar --prefix="$ZFS_TAG/ $ZFS_TAG" \ -o "$SRC_DIR_ZFS/$ZFS_TAG.tar" &>/dev/null || \ rm "$SRC_DIR_ZFS/$ZFS_TAG.tar" if [ -s "$SRC_DIR_ZFS/$ZFS_TAG.tar" ]; then tar -xf "$SRC_DIR_ZFS/$ZFS_TAG.tar" -C "$SRC_DIR_ZFS" rm "$SRC_DIR_ZFS/$ZFS_TAG.tar" echo -n -e "${COLOR_GREEN}Local${COLOR_RESET}\t\t" else mkdir -p "$ZFS_DIR" || fail "Failed to create $ZFS_DIR" curl -sL "$ZFS_URL" | tar -xz -C "$ZFS_DIR" \ --strip-components=1 || \ fail "Failed to download $ZFS_URL" echo -n -e "${COLOR_GREEN}Remote${COLOR_RESET}\t\t" fi fi done printf "\n" # Build the listed tags printf "%-16s" "Build ZFS" for TAG in $SRC_TAGS; do src_set_vars "$TAG" if [ -f "$ZFS_DIR/module/zfs/zfs.ko" ]; then skip_nonewline elif [ "$ZFS_TAG" = "installed" ]; then skip_nonewline else cd "$ZFS_DIR" || fail "Failed 'cd $ZFS_DIR'" make distclean &>/dev/null ./autogen.sh >>"$CONFIG_LOG" 2>&1 || \ fail "Failed ZFS 'autogen.sh'" # shellcheck disable=SC2086 ./configure $CONFIG_OPTIONS >>"$CONFIG_LOG" 2>&1 || \ fail "Failed ZFS 'configure $CONFIG_OPTIONS'" # shellcheck disable=SC2086 make $MAKE_OPTIONS >>"$MAKE_LOG" 2>&1 || \ fail "Failed ZFS 'make $MAKE_OPTIONS'" pass_nonewline fi done printf "\n" echo "-----------------------------------------------------------------" # Either create a new pool using 'zpool create', or alternately restore an # existing pool from another ZFS implementation for compatibility testing. for TAG in $POOL_TAGS; do pool_set_vars "$TAG" SKIP=0 printf "%-16s" "$POOL_TAG" rm -Rf "$POOL_DIR" mkdir -p "$POOL_DIR_PRISTINE" # Use the existing compressed image if available. if [ -f "$POOL_BZIP" ]; then tar -xjf "$POOL_BZIP" -C "$POOL_DIR_PRISTINE" \ --strip-components=1 || \ fail "Failed 'tar -xjf $POOL_BZIP" # Use the installed version to create the pool. elif [ "$TAG" = "installed" ]; then pool_create "$TAG" # A source build is available to create the pool. elif [ -d "$POOL_DIR_SRC" ]; then pool_create "$TAG" else SKIP=1 fi # Verify 'zpool import' works for all listed source versions. for SRC_TAG in $SRC_TAGS; do if [ $SKIP -eq 1 ]; then skip_nonewline continue fi src_set_vars "$SRC_TAG" if [ "$SRC_TAG" != "installed" ]; then cd "$ZFS_DIR" || fail "Failed 'cd $ZFS_DIR'" fi $ZFS_SH zfs="spa_config_path=$POOL_DIR_COPY" cp -a --sparse=always "$POOL_DIR_PRISTINE" \ "$POOL_DIR_COPY" || \ fail "Failed to copy $POOL_DIR_PRISTINE to $POOL_DIR_COPY" POOL_NAME=$($ZPOOL_CMD import -d "$POOL_DIR_COPY" | \ - awk '/pool:/ { print $2; exit 0 }') + awk '/pool:/ { print $2; exit }') if ! $ZPOOL_CMD import -N -d "$POOL_DIR_COPY" "$POOL_NAME" &>/dev/null; then fail_nonewline ERROR=1 else $ZPOOL_CMD export "$POOL_NAME" || \ fail "Failed to export pool" pass_nonewline fi rm -Rf "$POOL_DIR_COPY" $ZFS_SH -u || fail "Failed to unload kmods" done printf "\n" done if [ "$KEEP" = "no" ]; then rm -Rf "$TEST_DIR" fi exit $ERROR diff --git a/tests/zfs-tests/include/libtest.shlib b/tests/zfs-tests/include/libtest.shlib index 8a41b960f8d9..dd43b02a6868 100644 --- a/tests/zfs-tests/include/libtest.shlib +++ b/tests/zfs-tests/include/libtest.shlib @@ -1,4298 +1,4296 @@ # # CDDL HEADER START # # The contents of this file are subject to the terms of the # Common Development and Distribution License (the "License"). # You may not use this file except in compliance with the License. # # You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE # or http://www.opensolaris.org/os/licensing. # See the License for the specific language governing permissions # and limitations under the License. # # When distributing Covered Code, include this CDDL HEADER in each # file and include the License file at usr/src/OPENSOLARIS.LICENSE. # If applicable, add the following below this CDDL HEADER, with the # fields enclosed by brackets "[]" replaced with your own identifying # information: Portions Copyright [yyyy] [name of copyright owner] # # CDDL HEADER END # # # Copyright (c) 2009, Sun Microsystems Inc. All rights reserved. # Copyright (c) 2012, 2020, Delphix. All rights reserved. # Copyright (c) 2017, Tim Chase. All rights reserved. # Copyright (c) 2017, Nexenta Systems Inc. All rights reserved. # Copyright (c) 2017, Lawrence Livermore National Security LLC. # Copyright (c) 2017, Datto Inc. All rights reserved. # Copyright (c) 2017, Open-E Inc. All rights reserved. # Copyright (c) 2021, The FreeBSD Foundation. # Use is subject to license terms. # . ${STF_TOOLS}/include/logapi.shlib . ${STF_SUITE}/include/math.shlib . ${STF_SUITE}/include/blkdev.shlib . ${STF_SUITE}/include/tunables.cfg # # Apply constrained path when available. This is required since the # PATH may have been modified by sudo's secure_path behavior. # if [ -n "$STF_PATH" ]; then export PATH="$STF_PATH" fi # # Generic dot version comparison function # # Returns success when version $1 is greater than or equal to $2. # function compare_version_gte { if [[ "$(printf "$1\n$2" | sort -V | tail -n1)" == "$1" ]]; then return 0 else return 1 fi } # Linux kernel version comparison function # # $1 Linux version ("4.10", "2.6.32") or blank for installed Linux version # # Used for comparison: if [ $(linux_version) -ge $(linux_version "2.6.32") ] # function linux_version { typeset ver="$1" [[ -z "$ver" ]] && ver=$(uname -r | grep -Eo "^[0-9]+\.[0-9]+\.[0-9]+") typeset version=$(echo $ver | cut -d '.' -f 1) typeset major=$(echo $ver | cut -d '.' -f 2) typeset minor=$(echo $ver | cut -d '.' -f 3) [[ -z "$version" ]] && version=0 [[ -z "$major" ]] && major=0 [[ -z "$minor" ]] && minor=0 echo $((version * 10000 + major * 100 + minor)) } # Determine if this is a Linux test system # # Return 0 if platform Linux, 1 if otherwise function is_linux { if [[ $(uname -o) == "GNU/Linux" ]]; then return 0 else return 1 fi } # Determine if this is an illumos test system # # Return 0 if platform illumos, 1 if otherwise function is_illumos { if [[ $(uname -o) == "illumos" ]]; then return 0 else return 1 fi } # Determine if this is a FreeBSD test system # # Return 0 if platform FreeBSD, 1 if otherwise function is_freebsd { if [[ $(uname -o) == "FreeBSD" ]]; then return 0 else return 1 fi } # Determine if this is a DilOS test system # # Return 0 if platform DilOS, 1 if otherwise function is_dilos { typeset ID="" [[ -f /etc/os-release ]] && . /etc/os-release if [[ $ID == "dilos" ]]; then return 0 else return 1 fi } # Determine if this is a 32-bit system # # Return 0 if platform is 32-bit, 1 if otherwise function is_32bit { if [[ $(getconf LONG_BIT) == "32" ]]; then return 0 else return 1 fi } # Determine if kmemleak is enabled # # Return 0 if kmemleak is enabled, 1 if otherwise function is_kmemleak { if is_linux && [[ -e /sys/kernel/debug/kmemleak ]]; then return 0 else return 1 fi } # Determine whether a dataset is mounted # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs # # Return 0 if dataset is mounted; 1 if unmounted; 2 on error function ismounted { typeset fstype=$2 [[ -z $fstype ]] && fstype=zfs typeset out dir name ret case $fstype in zfs) if [[ "$1" == "/"* ]] ; then for out in $(zfs mount | awk '{print $2}'); do [[ $1 == $out ]] && return 0 done else for out in $(zfs mount | awk '{print $1}'); do [[ $1 == $out ]] && return 0 done fi ;; ufs|nfs) if is_freebsd; then mount -pt $fstype | while read dev dir _t _flags; do [[ "$1" == "$dev" || "$1" == "$dir" ]] && return 0 done else out=$(df -F $fstype $1 2>/dev/null) ret=$? (($ret != 0)) && return $ret dir=${out%%\(*} dir=${dir%% *} name=${out##*\(} name=${name%%\)*} name=${name%% *} [[ "$1" == "$dir" || "$1" == "$name" ]] && return 0 fi ;; ext*) out=$(df -t $fstype $1 2>/dev/null) return $? ;; zvol) if [[ -L "$ZVOL_DEVDIR/$1" ]]; then link=$(readlink -f $ZVOL_DEVDIR/$1) [[ -n "$link" ]] && \ mount | grep -q "^$link" && \ return 0 fi ;; esac return 1 } # Return 0 if a dataset is mounted; 1 otherwise # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs function mounted { ismounted $1 $2 (($? == 0)) && return 0 return 1 } # Return 0 if a dataset is unmounted; 1 otherwise # # $1 dataset name # $2 filesystem type; optional - defaulted to zfs function unmounted { ismounted $1 $2 (($? == 1)) && return 0 return 1 } # split line on "," # # $1 - line to split function splitline { - echo $1 | sed "s/,/ /g" + echo $1 | tr ',' ' ' } function default_setup { default_setup_noexit "$@" log_pass } function default_setup_no_mountpoint { default_setup_noexit "$1" "$2" "$3" "yes" log_pass } # # Given a list of disks, setup storage pools and datasets. # function default_setup_noexit { typeset disklist=$1 typeset container=$2 typeset volume=$3 typeset no_mountpoint=$4 log_note begin default_setup_noexit if is_global_zone; then if poolexists $TESTPOOL ; then destroy_pool $TESTPOOL fi [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL log_must zpool create -f $TESTPOOL $disklist else reexport_pool fi rm -rf $TESTDIR || log_unresolved Could not remove $TESTDIR mkdir -p $TESTDIR || log_unresolved Could not create $TESTDIR log_must zfs create $TESTPOOL/$TESTFS if [[ -z $no_mountpoint ]]; then log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS fi if [[ -n $container ]]; then rm -rf $TESTDIR1 || \ log_unresolved Could not remove $TESTDIR1 mkdir -p $TESTDIR1 || \ log_unresolved Could not create $TESTDIR1 log_must zfs create $TESTPOOL/$TESTCTR log_must zfs set canmount=off $TESTPOOL/$TESTCTR log_must zfs create $TESTPOOL/$TESTCTR/$TESTFS1 if [[ -z $no_mountpoint ]]; then log_must zfs set mountpoint=$TESTDIR1 \ $TESTPOOL/$TESTCTR/$TESTFS1 fi fi if [[ -n $volume ]]; then if is_global_zone ; then log_must zfs create -V $VOLSIZE $TESTPOOL/$TESTVOL block_device_wait else log_must zfs create $TESTPOOL/$TESTVOL fi fi } # # Given a list of disks, setup a storage pool, file system and # a container. # function default_container_setup { typeset disklist=$1 default_setup "$disklist" "true" } # # Given a list of disks, setup a storage pool,file system # and a volume. # function default_volume_setup { typeset disklist=$1 default_setup "$disklist" "" "true" } # # Given a list of disks, setup a storage pool,file system, # a container and a volume. # function default_container_volume_setup { typeset disklist=$1 default_setup "$disklist" "true" "true" } # # Create a snapshot on a filesystem or volume. Defaultly create a snapshot on # filesystem # # $1 Existing filesystem or volume name. Default, $TESTPOOL/$TESTFS # $2 snapshot name. Default, $TESTSNAP # function create_snapshot { typeset fs_vol=${1:-$TESTPOOL/$TESTFS} typeset snap=${2:-$TESTSNAP} [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." [[ -z $snap ]] && log_fail "Snapshot's name is undefined." if snapexists $fs_vol@$snap; then log_fail "$fs_vol@$snap already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." log_must zfs snapshot $fs_vol@$snap } # # Create a clone from a snapshot, default clone name is $TESTCLONE. # # $1 Existing snapshot, $TESTPOOL/$TESTFS@$TESTSNAP is default. # $2 Clone name, $TESTPOOL/$TESTCLONE is default. # function create_clone # snapshot clone { typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} typeset clone=${2:-$TESTPOOL/$TESTCLONE} [[ -z $snap ]] && \ log_fail "Snapshot name is undefined." [[ -z $clone ]] && \ log_fail "Clone name is undefined." log_must zfs clone $snap $clone } # # Create a bookmark of the given snapshot. Defaultly create a bookmark on # filesystem. # # $1 Existing filesystem or volume name. Default, $TESTFS # $2 Existing snapshot name. Default, $TESTSNAP # $3 bookmark name. Default, $TESTBKMARK # function create_bookmark { typeset fs_vol=${1:-$TESTFS} typeset snap=${2:-$TESTSNAP} typeset bkmark=${3:-$TESTBKMARK} [[ -z $fs_vol ]] && log_fail "Filesystem or volume's name is undefined." [[ -z $snap ]] && log_fail "Snapshot's name is undefined." [[ -z $bkmark ]] && log_fail "Bookmark's name is undefined." if bkmarkexists $fs_vol#$bkmark; then log_fail "$fs_vol#$bkmark already exists." fi datasetexists $fs_vol || \ log_fail "$fs_vol must exist." snapexists $fs_vol@$snap || \ log_fail "$fs_vol@$snap must exist." log_must zfs bookmark $fs_vol@$snap $fs_vol#$bkmark } # # Create a temporary clone result of an interrupted resumable 'zfs receive' # $1 Destination filesystem name. Must not exist, will be created as the result # of this function along with its %recv temporary clone # $2 Source filesystem name. Must not exist, will be created and destroyed # function create_recv_clone { typeset recvfs="$1" typeset sendfs="${2:-$TESTPOOL/create_recv_clone}" typeset snap="$sendfs@snap1" typeset incr="$sendfs@snap2" typeset mountpoint="$TESTDIR/create_recv_clone" typeset sendfile="$TESTDIR/create_recv_clone.zsnap" [[ -z $recvfs ]] && log_fail "Recv filesystem's name is undefined." datasetexists $recvfs && log_fail "Recv filesystem must not exist." datasetexists $sendfs && log_fail "Send filesystem must not exist." log_must zfs create -o mountpoint="$mountpoint" $sendfs log_must zfs snapshot $snap log_must eval "zfs send $snap | zfs recv -u $recvfs" log_must mkfile 1m "$mountpoint/data" log_must zfs snapshot $incr log_must eval "zfs send -i $snap $incr | dd bs=10K count=1 \ iflag=fullblock > $sendfile" log_mustnot eval "zfs recv -su $recvfs < $sendfile" destroy_dataset "$sendfs" "-r" log_must rm -f "$sendfile" if [[ $(get_prop 'inconsistent' "$recvfs/%recv") -ne 1 ]]; then log_fail "Error creating temporary $recvfs/%recv clone" fi } function default_mirror_setup { default_mirror_setup_noexit $1 $2 $3 log_pass } # # Given a pair of disks, set up a storage pool and dataset for the mirror # @parameters: $1 the primary side of the mirror # $2 the secondary side of the mirror # @uses: ZPOOL ZFS TESTPOOL TESTFS function default_mirror_setup_noexit { readonly func="default_mirror_setup_noexit" typeset primary=$1 typeset secondary=$2 [[ -z $primary ]] && \ log_fail "$func: No parameters passed" [[ -z $secondary ]] && \ log_fail "$func: No secondary partition passed" [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL log_must zpool create -f $TESTPOOL mirror $@ log_must zfs create $TESTPOOL/$TESTFS log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS } # # create a number of mirrors. # We create a number($1) of 2 way mirrors using the pairs of disks named # on the command line. These mirrors are *not* mounted # @parameters: $1 the number of mirrors to create # $... the devices to use to create the mirrors on # @uses: ZPOOL ZFS TESTPOOL function setup_mirrors { typeset -i nmirrors=$1 shift while ((nmirrors > 0)); do log_must test -n "$1" -a -n "$2" [[ -d /$TESTPOOL$nmirrors ]] && rm -rf /$TESTPOOL$nmirrors log_must zpool create -f $TESTPOOL$nmirrors mirror $1 $2 shift 2 ((nmirrors = nmirrors - 1)) done } # # create a number of raidz pools. # We create a number($1) of 2 raidz pools using the pairs of disks named # on the command line. These pools are *not* mounted # @parameters: $1 the number of pools to create # $... the devices to use to create the pools on # @uses: ZPOOL ZFS TESTPOOL function setup_raidzs { typeset -i nraidzs=$1 shift while ((nraidzs > 0)); do log_must test -n "$1" -a -n "$2" [[ -d /$TESTPOOL$nraidzs ]] && rm -rf /$TESTPOOL$nraidzs log_must zpool create -f $TESTPOOL$nraidzs raidz $1 $2 shift 2 ((nraidzs = nraidzs - 1)) done } # # Destroy the configured testpool mirrors. # the mirrors are of the form ${TESTPOOL}{number} # @uses: ZPOOL ZFS TESTPOOL function destroy_mirrors { default_cleanup_noexit log_pass } # # Given a minimum of two disks, set up a storage pool and dataset for the raid-z # $1 the list of disks # function default_raidz_setup { typeset disklist="$*" disks=(${disklist[*]}) if [[ ${#disks[*]} -lt 2 ]]; then log_fail "A raid-z requires a minimum of two disks." fi [[ -d /$TESTPOOL ]] && rm -rf /$TESTPOOL log_must zpool create -f $TESTPOOL raidz $disklist log_must zfs create $TESTPOOL/$TESTFS log_must zfs set mountpoint=$TESTDIR $TESTPOOL/$TESTFS log_pass } # # Common function used to cleanup storage pools and datasets. # # Invoked at the start of the test suite to ensure the system # is in a known state, and also at the end of each set of # sub-tests to ensure errors from one set of tests doesn't # impact the execution of the next set. function default_cleanup { default_cleanup_noexit log_pass } # # Utility function used to list all available pool names. # # NOTE: $KEEP is a variable containing pool names, separated by a newline # character, that must be excluded from the returned list. # function get_all_pools { zpool list -H -o name | grep -Fvx "$KEEP" | grep -v "$NO_POOLS" } function default_cleanup_noexit { typeset pool="" # # Destroying the pool will also destroy any # filesystems it contains. # if is_global_zone; then zfs unmount -a > /dev/null 2>&1 ALL_POOLS=$(get_all_pools) # Here, we loop through the pools we're allowed to # destroy, only destroying them if it's safe to do # so. while [ ! -z ${ALL_POOLS} ] do for pool in ${ALL_POOLS} do if safe_to_destroy_pool $pool ; then destroy_pool $pool fi done ALL_POOLS=$(get_all_pools) done zfs mount -a else typeset fs="" for fs in $(zfs list -H -o name \ | grep "^$ZONE_POOL/$ZONE_CTR[01234]/"); do destroy_dataset "$fs" "-Rf" done # Need cleanup here to avoid garbage dir left. for fs in $(zfs list -H -o name); do [[ $fs == /$ZONE_POOL ]] && continue [[ -d $fs ]] && log_must rm -rf $fs/* done # # Reset the $ZONE_POOL/$ZONE_CTR[01234] file systems property to # the default value # for fs in $(zfs list -H -o name); do if [[ $fs == $ZONE_POOL/$ZONE_CTR[01234] ]]; then log_must zfs set reservation=none $fs log_must zfs set recordsize=128K $fs log_must zfs set mountpoint=/$fs $fs typeset enc="" enc=$(get_prop encryption $fs) if [[ $? -ne 0 ]] || [[ -z "$enc" ]] || \ [[ "$enc" == "off" ]]; then log_must zfs set checksum=on $fs fi log_must zfs set compression=off $fs log_must zfs set atime=on $fs log_must zfs set devices=off $fs log_must zfs set exec=on $fs log_must zfs set setuid=on $fs log_must zfs set readonly=off $fs log_must zfs set snapdir=hidden $fs log_must zfs set aclmode=groupmask $fs log_must zfs set aclinherit=secure $fs fi done fi [[ -d $TESTDIR ]] && \ log_must rm -rf $TESTDIR disk1=${DISKS%% *} if is_mpath_device $disk1; then delete_partitions fi rm -f $TEST_BASE_DIR/{err,out} } # # Common function used to cleanup storage pools, file systems # and containers. # function default_container_cleanup { if ! is_global_zone; then reexport_pool fi ismounted $TESTPOOL/$TESTCTR/$TESTFS1 [[ $? -eq 0 ]] && \ log_must zfs unmount $TESTPOOL/$TESTCTR/$TESTFS1 destroy_dataset "$TESTPOOL/$TESTCTR/$TESTFS1" "-R" destroy_dataset "$TESTPOOL/$TESTCTR" "-Rf" [[ -e $TESTDIR1 ]] && \ log_must rm -rf $TESTDIR1 > /dev/null 2>&1 default_cleanup } # # Common function used to cleanup snapshot of file system or volume. Default to # delete the file system's snapshot # # $1 snapshot name # function destroy_snapshot { typeset snap=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} if ! snapexists $snap; then log_fail "'$snap' does not exist." fi # # For the sake of the value which come from 'get_prop' is not equal # to the really mountpoint when the snapshot is unmounted. So, firstly # check and make sure this snapshot's been mounted in current system. # typeset mtpt="" if ismounted $snap; then mtpt=$(get_prop mountpoint $snap) (($? != 0)) && \ log_fail "get_prop mountpoint $snap failed." fi destroy_dataset "$snap" [[ $mtpt != "" && -d $mtpt ]] && \ log_must rm -rf $mtpt } # # Common function used to cleanup clone. # # $1 clone name # function destroy_clone { typeset clone=${1:-$TESTPOOL/$TESTCLONE} if ! datasetexists $clone; then log_fail "'$clone' does not existed." fi # With the same reason in destroy_snapshot typeset mtpt="" if ismounted $clone; then mtpt=$(get_prop mountpoint $clone) (($? != 0)) && \ log_fail "get_prop mountpoint $clone failed." fi destroy_dataset "$clone" [[ $mtpt != "" && -d $mtpt ]] && \ log_must rm -rf $mtpt } # # Common function used to cleanup bookmark of file system or volume. Default # to delete the file system's bookmark. # # $1 bookmark name # function destroy_bookmark { typeset bkmark=${1:-$TESTPOOL/$TESTFS#$TESTBKMARK} if ! bkmarkexists $bkmark; then log_fail "'$bkmarkp' does not existed." fi destroy_dataset "$bkmark" } # Return 0 if a snapshot exists; $? otherwise # # $1 - snapshot name function snapexists { zfs list -H -t snapshot "$1" > /dev/null 2>&1 return $? } # # Return 0 if a bookmark exists; $? otherwise # # $1 - bookmark name # function bkmarkexists { zfs list -H -t bookmark "$1" > /dev/null 2>&1 return $? } # # Return 0 if a hold exists; $? otherwise # # $1 - hold tag # $2 - snapshot name # function holdexists { zfs holds "$2" | awk '{ print $2 }' | grep "$1" > /dev/null 2>&1 return $? } # # Set a property to a certain value on a dataset. # Sets a property of the dataset to the value as passed in. # @param: # $1 dataset who's property is being set # $2 property to set # $3 value to set property to # @return: # 0 if the property could be set. # non-zero otherwise. # @use: ZFS # function dataset_setprop { typeset fn=dataset_setprop if (($# < 3)); then log_note "$fn: Insufficient parameters (need 3, had $#)" return 1 fi typeset output= output=$(zfs set $2=$3 $1 2>&1) typeset rv=$? if ((rv != 0)); then log_note "Setting property on $1 failed." log_note "property $2=$3" log_note "Return Code: $rv" log_note "Output: $output" return $rv fi return 0 } # # Assign suite defined dataset properties. # This function is used to apply the suite's defined default set of # properties to a dataset. # @parameters: $1 dataset to use # @uses: ZFS COMPRESSION_PROP CHECKSUM_PROP # @returns: # 0 if the dataset has been altered. # 1 if no pool name was passed in. # 2 if the dataset could not be found. # 3 if the dataset could not have it's properties set. # function dataset_set_defaultproperties { typeset dataset="$1" [[ -z $dataset ]] && return 1 typeset confset= typeset -i found=0 for confset in $(zfs list); do if [[ $dataset = $confset ]]; then found=1 break fi done [[ $found -eq 0 ]] && return 2 if [[ -n $COMPRESSION_PROP ]]; then dataset_setprop $dataset compression $COMPRESSION_PROP || \ return 3 log_note "Compression set to '$COMPRESSION_PROP' on $dataset" fi if [[ -n $CHECKSUM_PROP ]]; then dataset_setprop $dataset checksum $CHECKSUM_PROP || \ return 3 log_note "Checksum set to '$CHECKSUM_PROP' on $dataset" fi return 0 } # # Check a numeric assertion # @parameter: $@ the assertion to check # @output: big loud notice if assertion failed # @use: log_fail # function assert { (($@)) || log_fail "$@" } # # Function to format partition size of a disk # Given a disk cxtxdx reduces all partitions # to 0 size # function zero_partitions # { typeset diskname=$1 typeset i if is_freebsd; then gpart destroy -F $diskname elif is_linux; then DSK=$DEV_DSKDIR/$diskname DSK=$(echo $DSK | sed -e "s|//|/|g") log_must parted $DSK -s -- mklabel gpt blockdev --rereadpt $DSK 2>/dev/null block_device_wait else for i in 0 1 3 4 5 6 7 do log_must set_partition $i "" 0mb $diskname done fi return 0 } # # Given a slice, size and disk, this function # formats the slice to the specified size. # Size should be specified with units as per # the `format` command requirements eg. 100mb 3gb # # NOTE: This entire interface is problematic for the Linux parted utility # which requires the end of the partition to be specified. It would be # best to retire this interface and replace it with something more flexible. # At the moment a best effort is made. # # arguments: function set_partition { typeset -i slicenum=$1 typeset start=$2 typeset size=$3 typeset disk=${4#$DEV_DSKDIR/} disk=${disk#$DEV_RDSKDIR/} case "$(uname)" in Linux) if [[ -z $size || -z $disk ]]; then log_fail "The size or disk name is unspecified." fi disk=$DEV_DSKDIR/$disk typeset size_mb=${size%%[mMgG]} size_mb=${size_mb%%[mMgG][bB]} if [[ ${size:1:1} == 'g' ]]; then ((size_mb = size_mb * 1024)) fi # Create GPT partition table when setting slice 0 or # when the device doesn't already contain a GPT label. parted $disk -s -- print 1 >/dev/null typeset ret_val=$? if [[ $slicenum -eq 0 || $ret_val -ne 0 ]]; then parted $disk -s -- mklabel gpt if [[ $? -ne 0 ]]; then log_note "Failed to create GPT partition table on $disk" return 1 fi fi # When no start is given align on the first cylinder. if [[ -z "$start" ]]; then start=1 fi # Determine the cylinder size for the device and using # that calculate the end offset in cylinders. typeset -i cly_size_kb=0 cly_size_kb=$(parted -m $disk -s -- \ unit cyl print | head -3 | tail -1 | \ awk -F '[:k.]' '{print $4}') ((end = (size_mb * 1024 / cly_size_kb) + start)) parted $disk -s -- \ mkpart part$slicenum ${start}cyl ${end}cyl typeset ret_val=$? if [[ $ret_val -ne 0 ]]; then log_note "Failed to create partition $slicenum on $disk" return 1 fi blockdev --rereadpt $disk 2>/dev/null block_device_wait $disk ;; FreeBSD) if [[ -z $size || -z $disk ]]; then log_fail "The size or disk name is unspecified." fi disk=$DEV_DSKDIR/$disk if [[ $slicenum -eq 0 ]] || ! gpart show $disk >/dev/null 2>&1; then gpart destroy -F $disk >/dev/null 2>&1 gpart create -s GPT $disk if [[ $? -ne 0 ]]; then log_note "Failed to create GPT partition table on $disk" return 1 fi fi typeset index=$((slicenum + 1)) if [[ -n $start ]]; then start="-b $start" fi gpart add -t freebsd-zfs $start -s $size -i $index $disk if [[ $ret_val -ne 0 ]]; then log_note "Failed to create partition $slicenum on $disk" return 1 fi block_device_wait $disk ;; *) if [[ -z $slicenum || -z $size || -z $disk ]]; then log_fail "The slice, size or disk name is unspecified." fi typeset format_file=/var/tmp/format_in.$$ echo "partition" >$format_file echo "$slicenum" >> $format_file echo "" >> $format_file echo "" >> $format_file echo "$start" >> $format_file echo "$size" >> $format_file echo "label" >> $format_file echo "" >> $format_file echo "q" >> $format_file echo "q" >> $format_file format -e -s -d $disk -f $format_file typeset ret_val=$? rm -f $format_file ;; esac if [[ $ret_val -ne 0 ]]; then log_note "Unable to format $disk slice $slicenum to $size" return 1 fi return 0 } # # Delete all partitions on all disks - this is specifically for the use of multipath # devices which currently can only be used in the test suite as raw/un-partitioned # devices (ie a zpool cannot be created on a whole mpath device that has partitions) # function delete_partitions { typeset disk if [[ -z $DISKSARRAY ]]; then DISKSARRAY=$DISKS fi if is_linux; then typeset -i part for disk in $DISKSARRAY; do for (( part = 1; part < MAX_PARTITIONS; part++ )); do typeset partition=${disk}${SLICE_PREFIX}${part} parted $DEV_DSKDIR/$disk -s rm $part > /dev/null 2>&1 if lsblk | grep -qF ${partition}; then log_fail "Partition ${partition} not deleted" else log_note "Partition ${partition} deleted" fi done done elif is_freebsd; then for disk in $DISKSARRAY; do if gpart destroy -F $disk; then log_note "Partitions for ${disk} deleted" else log_fail "Partitions for ${disk} not deleted" fi done fi } # # Get the end cyl of the given slice # function get_endslice # { typeset disk=$1 typeset slice=$2 if [[ -z $disk || -z $slice ]] ; then log_fail "The disk name or slice number is unspecified." fi case "$(uname)" in Linux) endcyl=$(parted -s $DEV_DSKDIR/$disk -- unit cyl print | \ - grep "part${slice}" | \ - awk '{print $3}' | \ - sed 's,cyl,,') + awk "/part${slice}/"' {sub(/cyl/, "", $3); print $3}') ((endcyl = (endcyl + 1))) ;; FreeBSD) disk=${disk#/dev/zvol/} disk=${disk%p*} slice=$((slice + 1)) endcyl=$(gpart show $disk | \ awk -v slice=$slice '$3 == slice { print $1 + $2 }') ;; *) disk=${disk#/dev/dsk/} disk=${disk#/dev/rdsk/} disk=${disk%s*} typeset -i ratio=0 ratio=$(prtvtoc /dev/rdsk/${disk}s2 | \ grep "sectors\/cylinder" | \ awk '{print $2}') if ((ratio == 0)); then return fi typeset -i endcyl=$(prtvtoc -h /dev/rdsk/${disk}s2 | nawk -v token="$slice" '{if ($1==token) print $6}') ((endcyl = (endcyl + 1) / ratio)) ;; esac echo $endcyl } # # Given a size,disk and total slice number, this function formats the # disk slices from 0 to the total slice number with the same specified # size. # function partition_disk # { typeset -i i=0 typeset slice_size=$1 typeset disk_name=$2 typeset total_slices=$3 typeset cyl zero_partitions $disk_name while ((i < $total_slices)); do if ! is_linux; then if ((i == 2)); then ((i = i + 1)) continue fi fi log_must set_partition $i "$cyl" $slice_size $disk_name cyl=$(get_endslice $disk_name $i) ((i = i+1)) done } # # This function continues to write to a filenum number of files into dirnum # number of directories until either file_write returns an error or the # maximum number of files per directory have been written. # # Usage: # fill_fs [destdir] [dirnum] [filenum] [bytes] [num_writes] [data] # # Return value: 0 on success # non 0 on error # # Where : # destdir: is the directory where everything is to be created under # dirnum: the maximum number of subdirectories to use, -1 no limit # filenum: the maximum number of files per subdirectory # bytes: number of bytes to write # num_writes: number of types to write out bytes # data: the data that will be written # # E.g. # fill_fs /testdir 20 25 1024 256 0 # # Note: bytes * num_writes equals the size of the testfile # function fill_fs # destdir dirnum filenum bytes num_writes data { typeset destdir=${1:-$TESTDIR} typeset -i dirnum=${2:-50} typeset -i filenum=${3:-50} typeset -i bytes=${4:-8192} typeset -i num_writes=${5:-10240} typeset data=${6:-0} mkdir -p $destdir/{1..$dirnum} for f in $destdir/{1..$dirnum}/$TESTFILE{1..$filenum}; do file_write -o create -f $f -b $bytes -c $num_writes -d $data \ || return $? done return 0 } # # Simple function to get the specified property. If unable to # get the property then exits. # # Note property is in 'parsable' format (-p) # function get_prop # property dataset { typeset prop_val typeset prop=$1 typeset dataset=$2 prop_val=$(zfs get -pH -o value $prop $dataset 2>/dev/null) if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for dataset " \ "$dataset" return 1 fi echo "$prop_val" return 0 } # # Simple function to get the specified property of pool. If unable to # get the property then exits. # # Note property is in 'parsable' format (-p) # function get_pool_prop # property pool { typeset prop_val typeset prop=$1 typeset pool=$2 if poolexists $pool ; then prop_val=$(zpool get -pH $prop $pool 2>/dev/null | tail -1 | \ awk '{print $3}') if [[ $? -ne 0 ]]; then log_note "Unable to get $prop property for pool " \ "$pool" return 1 fi else log_note "Pool $pool not exists." return 1 fi echo "$prop_val" return 0 } # Return 0 if a pool exists; $? otherwise # # $1 - pool name function poolexists { typeset pool=$1 if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi zpool get name "$pool" > /dev/null 2>&1 return $? } # Return 0 if all the specified datasets exist; $? otherwise # # $1-n dataset name function datasetexists { if (($# == 0)); then log_note "No dataset name given." return 1 fi while (($# > 0)); do zfs get name $1 > /dev/null 2>&1 || \ return $? shift done return 0 } # return 0 if none of the specified datasets exists, otherwise return 1. # # $1-n dataset name function datasetnonexists { if (($# == 0)); then log_note "No dataset name given." return 1 fi while (($# > 0)); do zfs list -H -t filesystem,snapshot,volume $1 > /dev/null 2>&1 \ && return 1 shift done return 0 } function is_shared_freebsd { typeset fs=$1 pgrep -q mountd && showmount -E | grep -qx $fs } function is_shared_illumos { typeset fs=$1 typeset mtpt for mtpt in `share | awk '{print $2}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done typeset stat=$(svcs -H -o STA nfs/server:default) if [[ $stat != "ON" ]]; then log_note "Current nfs/server status: $stat" fi return 1 } function is_shared_linux { typeset fs=$1 typeset mtpt for mtpt in `share | awk '{print $1}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done return 1 } # # Given a mountpoint, or a dataset name, determine if it is shared via NFS. # # Returns 0 if shared, 1 otherwise. # function is_shared { typeset fs=$1 typeset mtpt if [[ $fs != "/"* ]] ; then if datasetnonexists "$fs" ; then return 1 else mtpt=$(get_prop mountpoint "$fs") case $mtpt in none|legacy|-) return 1 ;; *) fs=$mtpt ;; esac fi fi case $(uname) in FreeBSD) is_shared_freebsd "$fs" ;; Linux) is_shared_linux "$fs" ;; *) is_shared_illumos "$fs" ;; esac } function is_exported_illumos { typeset fs=$1 typeset mtpt for mtpt in `awk '{print $1}' /etc/dfs/sharetab` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done return 1 } function is_exported_freebsd { typeset fs=$1 typeset mtpt for mtpt in `awk '{print $1}' /etc/zfs/exports` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done return 1 } function is_exported_linux { typeset fs=$1 typeset mtpt for mtpt in `awk '{print $1}' /etc/exports.d/zfs.exports` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done return 1 } # # Given a mountpoint, or a dataset name, determine if it is exported via # the os-specific NFS exports file. # # Returns 0 if exported, 1 otherwise. # function is_exported { typeset fs=$1 typeset mtpt if [[ $fs != "/"* ]] ; then if datasetnonexists "$fs" ; then return 1 else mtpt=$(get_prop mountpoint "$fs") case $mtpt in none|legacy|-) return 1 ;; *) fs=$mtpt ;; esac fi fi case $(uname) in FreeBSD) is_exported_freebsd "$fs" ;; Linux) is_exported_linux "$fs" ;; *) is_exported_illumos "$fs" ;; esac } # # Given a dataset name determine if it is shared via SMB. # # Returns 0 if shared, 1 otherwise. # function is_shared_smb { typeset fs=$1 typeset mtpt if datasetnonexists "$fs" ; then return 1 else - fs=$(echo $fs | sed 's@/@_@g') + fs=$(echo $fs | tr / _) fi if is_linux; then for mtpt in `net usershare list | awk '{print $1}'` ; do if [[ $mtpt == $fs ]] ; then return 0 fi done return 1 else log_note "Currently unsupported by the test framework" return 1 fi } # # Given a mountpoint, determine if it is not shared via NFS. # # Returns 0 if not shared, 1 otherwise. # function not_shared { typeset fs=$1 is_shared $fs if (($? == 0)); then return 1 fi return 0 } # # Given a dataset determine if it is not shared via SMB. # # Returns 0 if not shared, 1 otherwise. # function not_shared_smb { typeset fs=$1 is_shared_smb $fs if (($? == 0)); then return 1 fi return 0 } # # Helper function to unshare a mountpoint. # function unshare_fs #fs { typeset fs=$1 is_shared $fs || is_shared_smb $fs if (($? == 0)); then zfs unshare $fs || log_fail "zfs unshare $fs failed" fi return 0 } # # Helper function to share a NFS mountpoint. # function share_nfs #fs { typeset fs=$1 if is_linux; then is_shared $fs if (($? != 0)); then log_must share "*:$fs" fi else is_shared $fs if (($? != 0)); then log_must share -F nfs $fs fi fi return 0 } # # Helper function to unshare a NFS mountpoint. # function unshare_nfs #fs { typeset fs=$1 if is_linux; then is_shared $fs if (($? == 0)); then log_must unshare -u "*:$fs" fi else is_shared $fs if (($? == 0)); then log_must unshare -F nfs $fs fi fi return 0 } # # Helper function to show NFS shares. # function showshares_nfs { if is_linux; then share -v else share -F nfs fi return 0 } # # Helper function to show SMB shares. # function showshares_smb { if is_linux; then net usershare list else share -F smb fi return 0 } function check_nfs { if is_linux; then share -s elif is_freebsd; then showmount -e else log_unsupported "Unknown platform" fi if [[ $? -ne 0 ]]; then log_unsupported "The NFS utilities are not installed" fi } # # Check NFS server status and trigger it online. # function setup_nfs_server { # Cannot share directory in non-global zone. # if ! is_global_zone; then log_note "Cannot trigger NFS server by sharing in LZ." return fi if is_linux; then # # Re-synchronize /var/lib/nfs/etab with /etc/exports and # /etc/exports.d./* to provide a clean test environment. # log_must share -r log_note "NFS server must be started prior to running ZTS." return elif is_freebsd; then kill -s HUP $(cat /var/run/mountd.pid) log_note "NFS server must be started prior to running ZTS." return fi typeset nfs_fmri="svc:/network/nfs/server:default" if [[ $(svcs -Ho STA $nfs_fmri) != "ON" ]]; then # # Only really sharing operation can enable NFS server # to online permanently. # typeset dummy=/tmp/dummy if [[ -d $dummy ]]; then log_must rm -rf $dummy fi log_must mkdir $dummy log_must share $dummy # # Waiting for fmri's status to be the final status. # Otherwise, in transition, an asterisk (*) is appended for # instances, unshare will reverse status to 'DIS' again. # # Waiting for 1's at least. # log_must sleep 1 timeout=10 while [[ timeout -ne 0 && $(svcs -Ho STA $nfs_fmri) == *'*' ]] do log_must sleep 1 ((timeout -= 1)) done log_must unshare $dummy log_must rm -rf $dummy fi log_note "Current NFS status: '$(svcs -Ho STA,FMRI $nfs_fmri)'" } # # To verify whether calling process is in global zone # # Return 0 if in global zone, 1 in non-global zone # function is_global_zone { if is_linux || is_freebsd; then return 0 else typeset cur_zone=$(zonename 2>/dev/null) if [[ $cur_zone != "global" ]]; then return 1 fi return 0 fi } # # Verify whether test is permitted to run from # global zone, local zone, or both # # $1 zone limit, could be "global", "local", or "both"(no limit) # # Return 0 if permitted, otherwise exit with log_unsupported # function verify_runnable # zone limit { typeset limit=$1 [[ -z $limit ]] && return 0 if is_global_zone ; then case $limit in global|both) ;; local) log_unsupported "Test is unable to run from "\ "global zone." ;; *) log_note "Warning: unknown limit $limit - " \ "use both." ;; esac else case $limit in local|both) ;; global) log_unsupported "Test is unable to run from "\ "local zone." ;; *) log_note "Warning: unknown limit $limit - " \ "use both." ;; esac reexport_pool fi return 0 } # Return 0 if create successfully or the pool exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - pool name # $2-n - [keyword] devs_list function create_pool #pool devs_list { typeset pool=${1%%/*} shift if [[ -z $pool ]]; then log_note "Missing pool name." return 1 fi if poolexists $pool ; then destroy_pool $pool fi if is_global_zone ; then [[ -d /$pool ]] && rm -rf /$pool log_must zpool create -f $pool $@ fi return 0 } # Return 0 if destroy successfully or the pool exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - pool name # Destroy pool with the given parameters. function destroy_pool #pool { typeset pool=${1%%/*} typeset mtpt if [[ -z $pool ]]; then log_note "No pool name given." return 1 fi if is_global_zone ; then if poolexists "$pool" ; then mtpt=$(get_prop mountpoint "$pool") # At times, syseventd/udev activity can cause attempts # to destroy a pool to fail with EBUSY. We retry a few # times allowing failures before requiring the destroy # to succeed. log_must_busy zpool destroy -f $pool [[ -d $mtpt ]] && \ log_must rm -rf $mtpt else log_note "Pool does not exist. ($pool)" return 1 fi fi return 0 } # Return 0 if created successfully; $? otherwise # # $1 - dataset name # $2-n - dataset options function create_dataset #dataset dataset_options { typeset dataset=$1 shift if [[ -z $dataset ]]; then log_note "Missing dataset name." return 1 fi if datasetexists $dataset ; then destroy_dataset $dataset fi log_must zfs create $@ $dataset return 0 } # Return 0 if destroy successfully or the dataset exists; $? otherwise # Note: In local zones, this function should return 0 silently. # # $1 - dataset name # $2 - custom arguments for zfs destroy # Destroy dataset with the given parameters. function destroy_dataset #dataset #args { typeset dataset=$1 typeset mtpt typeset args=${2:-""} if [[ -z $dataset ]]; then log_note "No dataset name given." return 1 fi if is_global_zone ; then if datasetexists "$dataset" ; then mtpt=$(get_prop mountpoint "$dataset") log_must_busy zfs destroy $args $dataset [[ -d $mtpt ]] && \ log_must rm -rf $mtpt else log_note "Dataset does not exist. ($dataset)" return 1 fi fi return 0 } # # Firstly, create a pool with 5 datasets. Then, create a single zone and # export the 5 datasets to it. In addition, we also add a ZFS filesystem # and a zvol device to the zone. # # $1 zone name # $2 zone root directory prefix # $3 zone ip # function zfs_zones_setup #zone_name zone_root zone_ip { typeset zone_name=${1:-$(hostname)-z} typeset zone_root=${2:-"/zone_root"} typeset zone_ip=${3:-"10.1.1.10"} typeset prefix_ctr=$ZONE_CTR typeset pool_name=$ZONE_POOL typeset -i cntctr=5 typeset -i i=0 # Create pool and 5 container within it # [[ -d /$pool_name ]] && rm -rf /$pool_name log_must zpool create -f $pool_name $DISKS while ((i < cntctr)); do log_must zfs create $pool_name/$prefix_ctr$i ((i += 1)) done # create a zvol log_must zfs create -V 1g $pool_name/zone_zvol block_device_wait # # If current system support slog, add slog device for pool # if verify_slog_support ; then typeset sdevs="$TEST_BASE_DIR/sdev1 $TEST_BASE_DIR/sdev2" log_must mkfile $MINVDEVSIZE $sdevs log_must zpool add $pool_name log mirror $sdevs fi # this isn't supported just yet. # Create a filesystem. In order to add this to # the zone, it must have it's mountpoint set to 'legacy' # log_must zfs create $pool_name/zfs_filesystem # log_must zfs set mountpoint=legacy $pool_name/zfs_filesystem [[ -d $zone_root ]] && \ log_must rm -rf $zone_root/$zone_name [[ ! -d $zone_root ]] && \ log_must mkdir -p -m 0700 $zone_root/$zone_name # Create zone configure file and configure the zone # typeset zone_conf=/tmp/zone_conf.$$ echo "create" > $zone_conf echo "set zonepath=$zone_root/$zone_name" >> $zone_conf echo "set autoboot=true" >> $zone_conf i=0 while ((i < cntctr)); do echo "add dataset" >> $zone_conf echo "set name=$pool_name/$prefix_ctr$i" >> \ $zone_conf echo "end" >> $zone_conf ((i += 1)) done # add our zvol to the zone echo "add device" >> $zone_conf echo "set match=/dev/zvol/dsk/$pool_name/zone_zvol" >> $zone_conf echo "end" >> $zone_conf # add a corresponding zvol rdsk to the zone echo "add device" >> $zone_conf echo "set match=$ZVOL_RDEVDIR/$pool_name/zone_zvol" >> $zone_conf echo "end" >> $zone_conf # once it's supported, we'll add our filesystem to the zone # echo "add fs" >> $zone_conf # echo "set type=zfs" >> $zone_conf # echo "set special=$pool_name/zfs_filesystem" >> $zone_conf # echo "set dir=/export/zfs_filesystem" >> $zone_conf # echo "end" >> $zone_conf echo "verify" >> $zone_conf echo "commit" >> $zone_conf log_must zonecfg -z $zone_name -f $zone_conf log_must rm -f $zone_conf # Install the zone zoneadm -z $zone_name install if (($? == 0)); then log_note "SUCCESS: zoneadm -z $zone_name install" else log_fail "FAIL: zoneadm -z $zone_name install" fi # Install sysidcfg file # typeset sysidcfg=$zone_root/$zone_name/root/etc/sysidcfg echo "system_locale=C" > $sysidcfg echo "terminal=dtterm" >> $sysidcfg echo "network_interface=primary {" >> $sysidcfg echo "hostname=$zone_name" >> $sysidcfg echo "}" >> $sysidcfg echo "name_service=NONE" >> $sysidcfg echo "root_password=mo791xfZ/SFiw" >> $sysidcfg echo "security_policy=NONE" >> $sysidcfg echo "timezone=US/Eastern" >> $sysidcfg # Boot this zone log_must zoneadm -z $zone_name boot } # # Reexport TESTPOOL & TESTPOOL(1-4) # function reexport_pool { typeset -i cntctr=5 typeset -i i=0 while ((i < cntctr)); do if ((i == 0)); then TESTPOOL=$ZONE_POOL/$ZONE_CTR$i if ! ismounted $TESTPOOL; then log_must zfs mount $TESTPOOL fi else eval TESTPOOL$i=$ZONE_POOL/$ZONE_CTR$i if eval ! ismounted \$TESTPOOL$i; then log_must eval zfs mount \$TESTPOOL$i fi fi ((i += 1)) done } # # Verify a given disk or pool state # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_state # pool disk state{online,offline,degraded} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 [[ -z $pool ]] || [[ -z $state ]] \ && log_fail "Arguments invalid or missing" if [[ -z $disk ]]; then #check pool state only zpool get -H -o value health $pool \ | grep -i "$state" > /dev/null 2>&1 else zpool status -v $pool | grep "$disk" \ | grep -i "$state" > /dev/null 2>&1 fi return $? } # # Get the mountpoint of snapshot # For the snapshot use /.zfs/snapshot/ # as its mountpoint # function snapshot_mountpoint { typeset dataset=${1:-$TESTPOOL/$TESTFS@$TESTSNAP} if [[ $dataset != *@* ]]; then log_fail "Error name of snapshot '$dataset'." fi typeset fs=${dataset%@*} typeset snap=${dataset#*@} if [[ -z $fs || -z $snap ]]; then log_fail "Error name of snapshot '$dataset'." fi echo $(get_prop mountpoint $fs)/.zfs/snapshot/$snap } # # Given a device and 'ashift' value verify it's correctly set on every label # function verify_ashift # device ashift { typeset device="$1" typeset ashift="$2" zdb -e -lll $device | awk -v ashift=$ashift '/ashift: / { if (ashift != $2) exit 1; else count++; } END { if (count != 4) exit 1; else exit 0; }' return $? } # # Given a pool and file system, this function will verify the file system # using the zdb internal tool. Note that the pool is exported and imported # to ensure it has consistent state. # function verify_filesys # pool filesystem dir { typeset pool="$1" typeset filesys="$2" typeset zdbout="/tmp/zdbout.$$" shift shift typeset dirs=$@ typeset search_path="" log_note "Calling zdb to verify filesystem '$filesys'" zfs unmount -a > /dev/null 2>&1 log_must zpool export $pool if [[ -n $dirs ]] ; then for dir in $dirs ; do search_path="$search_path -d $dir" done fi log_must zpool import $search_path $pool zdb -cudi $filesys > $zdbout 2>&1 if [[ $? != 0 ]]; then log_note "Output: zdb -cudi $filesys" cat $zdbout log_fail "zdb detected errors with: '$filesys'" fi log_must zfs mount -a log_must rm -rf $zdbout } # # Given a pool issue a scrub and verify that no checksum errors are reported. # function verify_pool { typeset pool=${1:-$TESTPOOL} log_must zpool scrub $pool log_must wait_scrubbed $pool typeset -i cksum=$(zpool status $pool | awk ' !NF { isvdev = 0 } isvdev { errors += $NF } /CKSUM$/ { isvdev = 1 } END { print errors } ') if [[ $cksum != 0 ]]; then log_must zpool status -v log_fail "Unexpected CKSUM errors found on $pool ($cksum)" fi } # # Given a pool, and this function list all disks in the pool # function get_disklist # pool { typeset disklist="" disklist=$(zpool iostat -v $1 | nawk '(NR >4) {print $1}' | \ grep -v "\-\-\-\-\-" | \ egrep -v -e "^(mirror|raidz[1-3]|draid[1-3]|spare|log|cache|special|dedup)|\-[0-9]$") echo $disklist } # # Given a pool, and this function list all disks in the pool with their full # path (like "/dev/sda" instead of "sda"). # function get_disklist_fullpath # pool { args="-P $1" get_disklist $args } # /** # This function kills a given list of processes after a time period. We use # this in the stress tests instead of STF_TIMEOUT so that we can have processes # run for a fixed amount of time, yet still pass. Tests that hit STF_TIMEOUT # would be listed as FAIL, which we don't want : we're happy with stress tests # running for a certain amount of time, then finishing. # # @param $1 the time in seconds after which we should terminate these processes # @param $2..$n the processes we wish to terminate. # */ function stress_timeout { typeset -i TIMEOUT=$1 shift typeset cpids="$@" log_note "Waiting for child processes($cpids). " \ "It could last dozens of minutes, please be patient ..." log_must sleep $TIMEOUT log_note "Killing child processes after ${TIMEOUT} stress timeout." typeset pid for pid in $cpids; do ps -p $pid > /dev/null 2>&1 if (($? == 0)); then log_must kill -USR1 $pid fi done } # # Verify a given hotspare disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_hotspare_state # pool disk state{inuse,avail} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk "spares") if [[ $state != ${cur_state} ]]; then return 1 fi return 0 } # # Wait until a hotspare transitions to a given state or times out. # # Return 0 when pool/disk matches expected state, 1 on timeout. # function wait_hotspare_state # pool disk state timeout { typeset pool=$1 typeset disk=${2#*$DEV_DSKDIR/} typeset state=$3 typeset timeout=${4:-60} typeset -i i=0 while [[ $i -lt $timeout ]]; do if check_hotspare_state $pool $disk $state; then return 0 fi i=$((i+1)) sleep 1 done return 1 } # # Verify a given slog disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_slog_state # pool disk state{online,offline,unavail} { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk "logs") if [[ $state != ${cur_state} ]]; then return 1 fi return 0 } # # Verify a given vdev disk is inuse or avail # # Return 0 is pool/disk matches expected state, 1 otherwise # function check_vdev_state # pool disk state{online,offline,unavail} { typeset pool=$1 typeset disk=${2#*$DEV_DSKDIR/} typeset state=$3 cur_state=$(get_device_state $pool $disk) if [[ $state != ${cur_state} ]]; then return 1 fi return 0 } # # Wait until a vdev transitions to a given state or times out. # # Return 0 when pool/disk matches expected state, 1 on timeout. # function wait_vdev_state # pool disk state timeout { typeset pool=$1 typeset disk=${2#*$DEV_DSKDIR/} typeset state=$3 typeset timeout=${4:-60} typeset -i i=0 while [[ $i -lt $timeout ]]; do if check_vdev_state $pool $disk $state; then return 0 fi i=$((i+1)) sleep 1 done return 1 } # # Check the output of 'zpool status -v ', # and to see if the content of contain the specified. # # Return 0 is contain, 1 otherwise # function check_pool_status # pool token keyword { typeset pool=$1 typeset token=$2 typeset keyword=$3 typeset verbose=${4:-false} scan=$(zpool status -v "$pool" 2>/dev/null | nawk -v token="$token:" ' ($1==token) {print $0}') if [[ $verbose == true ]]; then log_note $scan fi echo $scan | egrep -i "$keyword" > /dev/null 2>&1 return $? } # # The following functions are instance of check_pool_status() # is_pool_resilvering - to check if the pool resilver is in progress # is_pool_resilvered - to check if the pool resilver is completed # is_pool_scrubbing - to check if the pool scrub is in progress # is_pool_scrubbed - to check if the pool scrub is completed # is_pool_scrub_stopped - to check if the pool scrub is stopped # is_pool_scrub_paused - to check if the pool scrub has paused # is_pool_removing - to check if the pool removing is a vdev # is_pool_removed - to check if the pool remove is completed # is_pool_discarding - to check if the pool checkpoint is being discarded # function is_pool_resilvering #pool { check_pool_status "$1" "scan" \ "resilver[ ()0-9A-Za-z:_-]* in progress since" $2 return $? } function is_pool_resilvered #pool { check_pool_status "$1" "scan" "resilvered " $2 return $? } function is_pool_scrubbing #pool { check_pool_status "$1" "scan" "scrub in progress since " $2 return $? } function is_pool_scrubbed #pool { check_pool_status "$1" "scan" "scrub repaired" $2 return $? } function is_pool_scrub_stopped #pool { check_pool_status "$1" "scan" "scrub canceled" $2 return $? } function is_pool_scrub_paused #pool { check_pool_status "$1" "scan" "scrub paused since " $2 return $? } function is_pool_removing #pool { check_pool_status "$1" "remove" "in progress since " return $? } function is_pool_removed #pool { check_pool_status "$1" "remove" "completed on" return $? } function is_pool_discarding #pool { check_pool_status "$1" "checkpoint" "discarding" return $? } function wait_for_degraded { typeset pool=$1 typeset timeout=${2:-30} typeset t0=$SECONDS while :; do [[ $(get_pool_prop health $pool) == "DEGRADED" ]] && break log_note "$pool is not yet degraded." sleep 1 if ((SECONDS - t0 > $timeout)); then log_note "$pool not degraded after $timeout seconds." return 1 fi done return 0 } # # Use create_pool()/destroy_pool() to clean up the information in # in the given disk to avoid slice overlapping. # function cleanup_devices #vdevs { typeset pool="foopool$$" for vdev in $@; do zero_partitions $vdev done poolexists $pool && destroy_pool $pool create_pool $pool $@ destroy_pool $pool return 0 } #/** # A function to find and locate free disks on a system or from given # disks as the parameter. It works by locating disks that are in use # as swap devices and dump devices, and also disks listed in /etc/vfstab # # $@ given disks to find which are free, default is all disks in # the test system # # @return a string containing the list of available disks #*/ function find_disks { # Trust provided list, no attempt is made to locate unused devices. if is_linux || is_freebsd; then echo "$@" return fi sfi=/tmp/swaplist.$$ dmpi=/tmp/dumpdev.$$ max_finddisksnum=${MAX_FINDDISKSNUM:-6} swap -l > $sfi dumpadm > $dmpi 2>/dev/null # write an awk script that can process the output of format # to produce a list of disks we know about. Note that we have # to escape "$2" so that the shell doesn't interpret it while # we're creating the awk script. # ------------------- cat > /tmp/find_disks.awk </dev/null | /tmp/find_disks.awk)} rm /tmp/find_disks.awk unused="" for disk in $disks; do # Check for mounted grep "${disk}[sp]" /etc/mnttab >/dev/null (($? == 0)) && continue # Check for swap grep "${disk}[sp]" $sfi >/dev/null (($? == 0)) && continue # check for dump device grep "${disk}[sp]" $dmpi >/dev/null (($? == 0)) && continue # check to see if this disk hasn't been explicitly excluded # by a user-set environment variable echo "${ZFS_HOST_DEVICES_IGNORE}" | grep "${disk}" > /dev/null (($? == 0)) && continue unused_candidates="$unused_candidates $disk" done rm $sfi rm $dmpi # now just check to see if those disks do actually exist # by looking for a device pointing to the first slice in # each case. limit the number to max_finddisksnum count=0 for disk in $unused_candidates; do if is_disk_device $DEV_DSKDIR/${disk}s0 && \ [ $count -lt $max_finddisksnum ]; then unused="$unused $disk" # do not impose limit if $@ is provided [[ -z $@ ]] && ((count = count + 1)) fi done # finally, return our disk list echo $unused } function add_user_freebsd # { typeset group=$1 typeset user=$2 typeset basedir=$3 # Check to see if the user exists. if id $user > /dev/null 2>&1; then return 0 fi # Assign 1000 as the base uid typeset -i uid=1000 while true; do typeset -i ret pw useradd -u $uid -g $group -d $basedir/$user -m -n $user ret=$? case $ret in 0) break ;; # The uid is not unique 65) ((uid += 1)) ;; *) return 1 ;; esac if [[ $uid == 65000 ]]; then log_fail "No user id available under 65000 for $user" fi done # Silence MOTD touch $basedir/$user/.hushlogin return 0 } # # Delete the specified user. # # $1 login name # function del_user_freebsd # { typeset user=$1 if id $user > /dev/null 2>&1; then log_must pw userdel $user fi return 0 } # # Select valid gid and create specified group. # # $1 group name # function add_group_freebsd # { typeset group=$1 # See if the group already exists. if pw groupshow $group >/dev/null 2>&1; then return 0 fi # Assign 1000 as the base gid typeset -i gid=1000 while true; do pw groupadd -g $gid -n $group > /dev/null 2>&1 typeset -i ret=$? case $ret in 0) return 0 ;; # The gid is not unique 65) ((gid += 1)) ;; *) return 1 ;; esac if [[ $gid == 65000 ]]; then log_fail "No user id available under 65000 for $group" fi done } # # Delete the specified group. # # $1 group name # function del_group_freebsd # { typeset group=$1 pw groupdel -n $group > /dev/null 2>&1 typeset -i ret=$? case $ret in # Group does not exist, or was deleted successfully. 0|6|65) return 0 ;; # Name already exists as a group name 9) log_must pw groupdel $group ;; *) return 1 ;; esac return 0 } function add_user_illumos # { typeset group=$1 typeset user=$2 typeset basedir=$3 log_must useradd -g $group -d $basedir/$user -m $user return 0 } function del_user_illumos # { typeset user=$1 if id $user > /dev/null 2>&1; then log_must_retry "currently used" 6 userdel $user fi return 0 } function add_group_illumos # { typeset group=$1 typeset -i gid=100 while true; do groupadd -g $gid $group > /dev/null 2>&1 typeset -i ret=$? case $ret in 0) return 0 ;; # The gid is not unique 4) ((gid += 1)) ;; *) return 1 ;; esac done } function del_group_illumos # { typeset group=$1 groupmod -n $grp $grp > /dev/null 2>&1 typeset -i ret=$? case $ret in # Group does not exist. 6) return 0 ;; # Name already exists as a group name 9) log_must groupdel $grp ;; *) return 1 ;; esac } function add_user_linux # { typeset group=$1 typeset user=$2 typeset basedir=$3 log_must useradd -g $group -d $basedir/$user -m $user # Add new users to the same group and the command line utils. # This allows them to be run out of the original users home # directory as long as it permissioned to be group readable. cmd_group=$(stat --format="%G" $(which zfs)) log_must usermod -a -G $cmd_group $user return 0 } function del_user_linux # { typeset user=$1 if id $user > /dev/null 2>&1; then log_must_retry "currently used" 6 userdel $user fi return 0 } function add_group_linux # { typeset group=$1 # Assign 100 as the base gid, a larger value is selected for # Linux because for many distributions 1000 and under are reserved. while true; do groupadd $group > /dev/null 2>&1 typeset -i ret=$? case $ret in 0) return 0 ;; *) return 1 ;; esac done } function del_group_linux # { typeset group=$1 getent group $group > /dev/null 2>&1 typeset -i ret=$? case $ret in # Group does not exist. 2) return 0 ;; # Name already exists as a group name 0) log_must groupdel $group ;; *) return 1 ;; esac return 0 } # # Add specified user to specified group # # $1 group name # $2 user name # $3 base of the homedir (optional) # function add_user # { typeset group=$1 typeset user=$2 typeset basedir=${3:-"/var/tmp"} if ((${#group} == 0 || ${#user} == 0)); then log_fail "group name or user name are not defined." fi case $(uname) in FreeBSD) add_user_freebsd "$group" "$user" "$basedir" ;; Linux) add_user_linux "$group" "$user" "$basedir" ;; *) add_user_illumos "$group" "$user" "$basedir" ;; esac return 0 } # # Delete the specified user. # # $1 login name # $2 base of the homedir (optional) # function del_user # { typeset user=$1 typeset basedir=${2:-"/var/tmp"} if ((${#user} == 0)); then log_fail "login name is necessary." fi case $(uname) in FreeBSD) del_user_freebsd "$user" ;; Linux) del_user_linux "$user" ;; *) del_user_illumos "$user" ;; esac [[ -d $basedir/$user ]] && rm -fr $basedir/$user return 0 } # # Select valid gid and create specified group. # # $1 group name # function add_group # { typeset group=$1 if ((${#group} == 0)); then log_fail "group name is necessary." fi case $(uname) in FreeBSD) add_group_freebsd "$group" ;; Linux) add_group_linux "$group" ;; *) add_group_illumos "$group" ;; esac return 0 } # # Delete the specified group. # # $1 group name # function del_group # { typeset group=$1 if ((${#group} == 0)); then log_fail "group name is necessary." fi case $(uname) in FreeBSD) del_group_freebsd "$group" ;; Linux) del_group_linux "$group" ;; *) del_group_illumos "$group" ;; esac return 0 } # # This function will return true if it's safe to destroy the pool passed # as argument 1. It checks for pools based on zvols and files, and also # files contained in a pool that may have a different mountpoint. # function safe_to_destroy_pool { # $1 the pool name typeset pool="" typeset DONT_DESTROY="" # We check that by deleting the $1 pool, we're not # going to pull the rug out from other pools. Do this # by looking at all other pools, ensuring that they # aren't built from files or zvols contained in this pool. for pool in $(zpool list -H -o name) do ALTMOUNTPOOL="" # this is a list of the top-level directories in each of the # files that make up the path to the files the pool is based on FILEPOOL=$(zpool status -v $pool | grep /$1/ | \ awk '{print $1}') # this is a list of the zvols that make up the pool ZVOLPOOL=$(zpool status -v $pool | grep "$ZVOL_DEVDIR/$1$" \ | awk '{print $1}') # also want to determine if it's a file-based pool using an # alternate mountpoint... POOL_FILE_DIRS=$(zpool status -v $pool | \ grep / | awk '{print $1}' | \ awk -F/ '{print $2}' | grep -v "dev") for pooldir in $POOL_FILE_DIRS do OUTPUT=$(zfs list -H -r -o mountpoint $1 | \ grep "${pooldir}$" | awk '{print $1}') ALTMOUNTPOOL="${ALTMOUNTPOOL}${OUTPUT}" done if [ ! -z "$ZVOLPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $ZVOLPOOL on $1" fi if [ ! -z "$FILEPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $FILEPOOL on $1" fi if [ ! -z "$ALTMOUNTPOOL" ] then DONT_DESTROY="true" log_note "Pool $pool is built from $ALTMOUNTPOOL on $1" fi done if [ -z "${DONT_DESTROY}" ] then return 0 else log_note "Warning: it is not safe to destroy $1!" return 1 fi } # # Verify zfs operation with -p option work as expected # $1 operation, value could be create, clone or rename # $2 dataset type, value could be fs or vol # $3 dataset name # $4 new dataset name # function verify_opt_p_ops { typeset ops=$1 typeset datatype=$2 typeset dataset=$3 typeset newdataset=$4 if [[ $datatype != "fs" && $datatype != "vol" ]]; then log_fail "$datatype is not supported." fi # check parameters accordingly case $ops in create) newdataset=$dataset dataset="" if [[ $datatype == "vol" ]]; then ops="create -V $VOLSIZE" fi ;; clone) if [[ -z $newdataset ]]; then log_fail "newdataset should not be empty" \ "when ops is $ops." fi log_must datasetexists $dataset log_must snapexists $dataset ;; rename) if [[ -z $newdataset ]]; then log_fail "newdataset should not be empty" \ "when ops is $ops." fi log_must datasetexists $dataset ;; *) log_fail "$ops is not supported." ;; esac # make sure the upper level filesystem does not exist destroy_dataset "${newdataset%/*}" "-rRf" # without -p option, operation will fail log_mustnot zfs $ops $dataset $newdataset log_mustnot datasetexists $newdataset ${newdataset%/*} # with -p option, operation should succeed log_must zfs $ops -p $dataset $newdataset block_device_wait if ! datasetexists $newdataset ; then log_fail "-p option does not work for $ops" fi # when $ops is create or clone, redo the operation still return zero if [[ $ops != "rename" ]]; then log_must zfs $ops -p $dataset $newdataset fi return 0 } # # Get configuration of pool # $1 pool name # $2 config name # function get_config { typeset pool=$1 typeset config=$2 typeset alt_root if ! poolexists "$pool" ; then return 1 fi alt_root=$(zpool list -H $pool | awk '{print $NF}') if [[ $alt_root == "-" ]]; then value=$(zdb -C $pool | grep "$config:" | awk -F: \ '{print $2}') else value=$(zdb -e $pool | grep "$config:" | awk -F: \ '{print $2}') fi if [[ -n $value ]] ; then value=${value#'} value=${value%'} fi echo $value return 0 } # # Privated function. Random select one of items from arguments. # # $1 count # $2-n string # function _random_get { typeset cnt=$1 shift typeset str="$@" typeset -i ind ((ind = RANDOM % cnt + 1)) typeset ret=$(echo "$str" | cut -f $ind -d ' ') echo $ret } # # Random select one of item from arguments which include NONE string # function random_get_with_non { typeset -i cnt=$# ((cnt =+ 1)) _random_get "$cnt" "$@" } # # Random select one of item from arguments which doesn't include NONE string # function random_get { _random_get "$#" "$@" } # # Detect if the current system support slog # function verify_slog_support { typeset dir=$TEST_BASE_DIR/disk.$$ typeset pool=foo.$$ typeset vdev=$dir/a typeset sdev=$dir/b mkdir -p $dir mkfile $MINVDEVSIZE $vdev $sdev typeset -i ret=0 if ! zpool create -n $pool $vdev log $sdev > /dev/null 2>&1; then ret=1 fi rm -r $dir return $ret } # # The function will generate a dataset name with specific length # $1, the length of the name # $2, the base string to construct the name # function gen_dataset_name { typeset -i len=$1 typeset basestr="$2" typeset -i baselen=${#basestr} typeset -i iter=0 typeset l_name="" if ((len % baselen == 0)); then ((iter = len / baselen)) else ((iter = len / baselen + 1)) fi while ((iter > 0)); do l_name="${l_name}$basestr" ((iter -= 1)) done echo $l_name } # # Get cksum tuple of dataset # $1 dataset name # # sample zdb output: # Dataset data/test [ZPL], ID 355, cr_txg 2413856, 31.0K, 7 objects, rootbp # DVA[0]=<0:803046400:200> DVA[1]=<0:81199000:200> [L0 DMU objset] fletcher4 # lzjb LE contiguous unique double size=800L/200P birth=2413856L/2413856P # fill=7 cksum=11ce125712:643a9c18ee2:125e25238fca0:254a3f74b59744 function datasetcksum { typeset cksum sync cksum=$(zdb -vvv $1 | grep "^Dataset $1 \[" | grep "cksum" \ | awk -F= '{print $7}') echo $cksum } # # Get cksum of file # #1 file path # function checksum { typeset cksum cksum=$(cksum $1 | awk '{print $1}') echo $cksum } # # Get the given disk/slice state from the specific field of the pool # function get_device_state #pool disk field("", "spares","logs") { typeset pool=$1 typeset disk=${2#$DEV_DSKDIR/} typeset field=${3:-$pool} state=$(zpool status -v "$pool" 2>/dev/null | \ nawk -v device=$disk -v pool=$pool -v field=$field \ 'BEGIN {startconfig=0; startfield=0; } /config:/ {startconfig=1} (startconfig==1) && ($1==field) {startfield=1; next;} (startfield==1) && ($1==device) {print $2; exit;} (startfield==1) && ($1==field || $1 ~ "^spares$" || $1 ~ "^logs$") {startfield=0}') echo $state } # # print the given directory filesystem type # # $1 directory name # function get_fstype { typeset dir=$1 if [[ -z $dir ]]; then log_fail "Usage: get_fstype " fi # # $ df -n / # / : ufs # df -n $dir | awk '{print $3}' } # # Given a disk, label it to VTOC regardless what label was on the disk # $1 disk # function labelvtoc { typeset disk=$1 if [[ -z $disk ]]; then log_fail "The disk name is unspecified." fi typeset label_file=/var/tmp/labelvtoc.$$ typeset arch=$(uname -p) if is_linux || is_freebsd; then log_note "Currently unsupported by the test framework" return 1 fi if [[ $arch == "i386" ]]; then echo "label" > $label_file echo "0" >> $label_file echo "" >> $label_file echo "q" >> $label_file echo "q" >> $label_file fdisk -B $disk >/dev/null 2>&1 # wait a while for fdisk finishes sleep 60 elif [[ $arch == "sparc" ]]; then echo "label" > $label_file echo "0" >> $label_file echo "" >> $label_file echo "" >> $label_file echo "" >> $label_file echo "q" >> $label_file else log_fail "unknown arch type" fi format -e -s -d $disk -f $label_file typeset -i ret_val=$? rm -f $label_file # # wait the format to finish # sleep 60 if ((ret_val != 0)); then log_fail "unable to label $disk as VTOC." fi return 0 } # # check if the system was installed as zfsroot or not # return: 0 if zfsroot, non-zero if not # function is_zfsroot { df -n / | grep zfs > /dev/null 2>&1 return $? } # # get the root filesystem name if it's zfsroot system. # # return: root filesystem name function get_rootfs { typeset rootfs="" if is_freebsd; then rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}') elif ! is_linux; then rootfs=$(awk '{if ($2 == "/" && $3 == "zfs") print $1}' \ /etc/mnttab) fi if [[ -z "$rootfs" ]]; then log_fail "Can not get rootfs" fi zfs list $rootfs > /dev/null 2>&1 if (($? == 0)); then echo $rootfs else log_fail "This is not a zfsroot system." fi } # # get the rootfs's pool name # return: # rootpool name # function get_rootpool { typeset rootfs="" typeset rootpool="" if is_freebsd; then rootfs=$(mount -p | awk '$2 == "/" && $3 == "zfs" {print $1}') elif ! is_linux; then rootfs=$(awk '{if ($2 == "/" && $3 =="zfs") print $1}' \ /etc/mnttab) fi if [[ -z "$rootfs" ]]; then log_fail "Can not get rootpool" fi zfs list $rootfs > /dev/null 2>&1 if (($? == 0)); then echo ${rootfs%%/*} else log_fail "This is not a zfsroot system." fi } # # Get the word numbers from a string separated by white space # function get_word_count { echo $1 | wc -w } # # To verify if the require numbers of disks is given # function verify_disk_count { typeset -i min=${2:-1} typeset -i count=$(get_word_count "$1") if ((count < min)); then log_untested "A minimum of $min disks is required to run." \ " You specified $count disk(s)" fi } function ds_is_volume { typeset type=$(get_prop type $1) [[ $type = "volume" ]] && return 0 return 1 } function ds_is_filesystem { typeset type=$(get_prop type $1) [[ $type = "filesystem" ]] && return 0 return 1 } function ds_is_snapshot { typeset type=$(get_prop type $1) [[ $type = "snapshot" ]] && return 0 return 1 } # # Check if Trusted Extensions are installed and enabled # function is_te_enabled { svcs -H -o state labeld 2>/dev/null | grep "enabled" if (($? != 0)); then return 1 else return 0 fi } # Utility function to determine if a system has multiple cpus. function is_mp { if is_linux; then (($(nproc) > 1)) elif is_freebsd; then sysctl -n kern.smp.cpus else (($(psrinfo | wc -l) > 1)) fi return $? } function get_cpu_freq { if is_linux; then lscpu | awk '/CPU MHz/ { print $3 }' elif is_freebsd; then sysctl -n hw.clockrate else psrinfo -v 0 | awk '/processor operates at/ {print $6}' fi } # Run the given command as the user provided. function user_run { typeset user=$1 shift log_note "user: $user" log_note "cmd: $*" typeset out=$TEST_BASE_DIR/out typeset err=$TEST_BASE_DIR/err sudo -Eu $user env PATH="$PATH" ksh <<<"$*" >$out 2>$err typeset res=$? log_note "out: $(<$out)" log_note "err: $(<$err)" return $res } # # Check if the pool contains the specified vdevs # # $1 pool # $2..n ... # # Return 0 if the vdevs are contained in the pool, 1 if any of the specified # vdevs is not in the pool, and 2 if pool name is missing. # function vdevs_in_pool { typeset pool=$1 typeset vdev if [[ -z $pool ]]; then log_note "Missing pool name." return 2 fi shift # We could use 'zpool list' to only get the vdevs of the pool but we # can't reference a mirror/raidz vdev using its ID (i.e mirror-0), # therefore we use the 'zpool status' output. typeset tmpfile=$(mktemp) zpool status -v "$pool" | grep -A 1000 "config:" >$tmpfile for vdev in $@; do grep -w ${vdev##*/} $tmpfile >/dev/null 2>&1 [[ $? -ne 0 ]] && return 1 done rm -f $tmpfile return 0; } function get_max { typeset -l i max=$1 shift for i in "$@"; do max=$((max > i ? max : i)) done echo $max } function get_min { typeset -l i min=$1 shift for i in "$@"; do min=$((min < i ? min : i)) done echo $min } # Write data that can be compressed into a directory function write_compressible { typeset dir=$1 typeset megs=$2 typeset nfiles=${3:-1} typeset bs=${4:-1024k} typeset fname=${5:-file} [[ -d $dir ]] || log_fail "No directory: $dir" # Under Linux fio is not currently used since its behavior can # differ significantly across versions. This includes missing # command line options and cases where the --buffer_compress_* # options fail to behave as expected. if is_linux; then typeset file_bytes=$(to_bytes $megs) typeset bs_bytes=4096 typeset blocks=$(($file_bytes / $bs_bytes)) for (( i = 0; i < $nfiles; i++ )); do truncate -s $file_bytes $dir/$fname.$i # Write every third block to get 66% compression. for (( j = 0; j < $blocks; j += 3 )); do dd if=/dev/urandom of=$dir/$fname.$i \ seek=$j bs=$bs_bytes count=1 \ conv=notrunc >/dev/null 2>&1 done done else log_must eval "fio \ --name=job \ --fallocate=0 \ --minimal \ --randrepeat=0 \ --buffer_compress_percentage=66 \ --buffer_compress_chunk=4096 \ --directory=$dir \ --numjobs=$nfiles \ --nrfiles=$nfiles \ --rw=write \ --bs=$bs \ --filesize=$megs \ --filename_format='$fname.\$jobnum' >/dev/null" fi } function get_objnum { typeset pathname=$1 typeset objnum [[ -e $pathname ]] || log_fail "No such file or directory: $pathname" if is_freebsd; then objnum=$(stat -f "%i" $pathname) else objnum=$(stat -c %i $pathname) fi echo $objnum } # # Sync data to the pool # # $1 pool name # $2 boolean to force uberblock (and config including zpool cache file) update # function sync_pool #pool { typeset pool=${1:-$TESTPOOL} typeset force=${2:-false} if [[ $force == true ]]; then log_must zpool sync -f $pool else log_must zpool sync $pool fi return 0 } # # Wait for zpool 'freeing' property drops to zero. # # $1 pool name # function wait_freeing #pool { typeset pool=${1:-$TESTPOOL} while true; do [[ "0" == "$(zpool list -Ho freeing $pool)" ]] && break log_must sleep 1 done } # # Wait for every device replace operation to complete # # $1 pool name # function wait_replacing #pool { typeset pool=${1:-$TESTPOOL} while true; do [[ "" == "$(zpool status $pool | awk '/replacing-[0-9]+/ {print $1}')" ]] && break log_must sleep 1 done } # # Wait for a pool to be scrubbed # # $1 pool name # function wait_scrubbed { typeset pool=${1:-$TESTPOOL} while ! is_pool_scrubbed $pool ; do sleep 1 done } # Backup the zed.rc in our test directory so that we can edit it for our test. # # Returns: Backup file name. You will need to pass this to zed_rc_restore(). function zed_rc_backup { zedrc_backup="$(mktemp)" cp $ZEDLET_DIR/zed.rc $zedrc_backup echo $zedrc_backup } function zed_rc_restore { mv $1 $ZEDLET_DIR/zed.rc } # # Setup custom environment for the ZED. # # $@ Optional list of zedlets to run under zed. function zed_setup { if ! is_linux; then log_unsupported "No zed on $(uname)" fi if [[ ! -d $ZEDLET_DIR ]]; then log_must mkdir $ZEDLET_DIR fi if [[ ! -e $VDEVID_CONF ]]; then log_must touch $VDEVID_CONF fi if [[ -e $VDEVID_CONF_ETC ]]; then log_fail "Must not have $VDEVID_CONF_ETC file present on system" fi EXTRA_ZEDLETS=$@ # Create a symlink for /etc/zfs/vdev_id.conf file. log_must ln -s $VDEVID_CONF $VDEVID_CONF_ETC # Setup minimal ZED configuration. Individual test cases should # add additional ZEDLETs as needed for their specific test. log_must cp ${ZEDLET_ETC_DIR}/zed.rc $ZEDLET_DIR log_must cp ${ZEDLET_ETC_DIR}/zed-functions.sh $ZEDLET_DIR # Scripts must only be user writable. if [[ -n "$EXTRA_ZEDLETS" ]] ; then saved_umask=$(umask) log_must umask 0022 for i in $EXTRA_ZEDLETS ; do log_must cp ${ZEDLET_LIBEXEC_DIR}/$i $ZEDLET_DIR done log_must umask $saved_umask fi # Customize the zed.rc file to enable the full debug log. log_must sed -i '/\#ZED_DEBUG_LOG=.*/d' $ZEDLET_DIR/zed.rc echo "ZED_DEBUG_LOG=$ZED_DEBUG_LOG" >>$ZEDLET_DIR/zed.rc } # # Cleanup custom ZED environment. # # $@ Optional list of zedlets to remove from our test zed.d directory. function zed_cleanup { if ! is_linux; then return fi EXTRA_ZEDLETS=$@ log_must rm -f ${ZEDLET_DIR}/zed.rc log_must rm -f ${ZEDLET_DIR}/zed-functions.sh log_must rm -f ${ZEDLET_DIR}/all-syslog.sh log_must rm -f ${ZEDLET_DIR}/all-debug.sh log_must rm -f ${ZEDLET_DIR}/state if [[ -n "$EXTRA_ZEDLETS" ]] ; then for i in $EXTRA_ZEDLETS ; do log_must rm -f ${ZEDLET_DIR}/$i done fi log_must rm -f $ZED_LOG log_must rm -f $ZED_DEBUG_LOG log_must rm -f $VDEVID_CONF_ETC log_must rm -f $VDEVID_CONF rmdir $ZEDLET_DIR } # # Check if ZED is currently running, if not start ZED. # function zed_start { if ! is_linux; then return fi # ZEDLET_DIR=/var/tmp/zed if [[ ! -d $ZEDLET_DIR ]]; then log_must mkdir $ZEDLET_DIR fi # Verify the ZED is not already running. pgrep -x zed > /dev/null if (($? == 0)); then log_note "ZED already running" else log_note "Starting ZED" # run ZED in the background and redirect foreground logging # output to $ZED_LOG. log_must truncate -s 0 $ZED_DEBUG_LOG log_must eval "zed -vF -d $ZEDLET_DIR -P $PATH" \ "-s $ZEDLET_DIR/state -j 1 2>$ZED_LOG &" fi return 0 } # # Kill ZED process # function zed_stop { if ! is_linux; then return fi log_note "Stopping ZED" while true; do zedpids="$(pgrep -x zed)" [ "$?" -ne 0 ] && break log_must kill $zedpids sleep 1 done return 0 } # # Drain all zevents # function zed_events_drain { while [ $(zpool events -H | wc -l) -ne 0 ]; do sleep 1 zpool events -c >/dev/null done } # Set a variable in zed.rc to something, un-commenting it in the process. # # $1 variable # $2 value function zed_rc_set { var="$1" val="$2" # Remove the line cmd="'/$var/d'" eval sed -i $cmd $ZEDLET_DIR/zed.rc # Add it at the end echo "$var=$val" >> $ZEDLET_DIR/zed.rc } # # Check is provided device is being active used as a swap device. # function is_swap_inuse { typeset device=$1 if [[ -z $device ]] ; then log_note "No device specified." return 1 fi if is_linux; then swapon -s | grep -w $(readlink -f $device) > /dev/null 2>&1 elif is_freebsd; then swapctl -l | grep -w $device else swap -l | grep -w $device > /dev/null 2>&1 fi return $? } # # Setup a swap device using the provided device. # function swap_setup { typeset swapdev=$1 if is_linux; then log_must eval "mkswap $swapdev > /dev/null 2>&1" log_must swapon $swapdev elif is_freebsd; then log_must swapctl -a $swapdev else log_must swap -a $swapdev fi return 0 } # # Cleanup a swap device on the provided device. # function swap_cleanup { typeset swapdev=$1 if is_swap_inuse $swapdev; then if is_linux; then log_must swapoff $swapdev elif is_freebsd; then log_must swapoff $swapdev else log_must swap -d $swapdev fi fi return 0 } # # Set a global system tunable (64-bit value) # # $1 tunable name (use a NAME defined in tunables.cfg) # $2 tunable values # function set_tunable64 { set_tunable_impl "$1" "$2" Z } # # Set a global system tunable (32-bit value) # # $1 tunable name (use a NAME defined in tunables.cfg) # $2 tunable values # function set_tunable32 { set_tunable_impl "$1" "$2" W } function set_tunable_impl { typeset name="$1" typeset value="$2" typeset mdb_cmd="$3" typeset module="${4:-zfs}" eval "typeset tunable=\$$name" case "$tunable" in UNSUPPORTED) log_unsupported "Tunable '$name' is unsupported on $(uname)" ;; "") log_fail "Tunable '$name' must be added to tunables.cfg" ;; *) ;; esac [[ -z "$value" ]] && return 1 [[ -z "$mdb_cmd" ]] && return 1 case "$(uname)" in Linux) typeset zfs_tunables="/sys/module/$module/parameters" [[ -w "$zfs_tunables/$tunable" ]] || return 1 cat >"$zfs_tunables/$tunable" <<<"$value" return $? ;; FreeBSD) sysctl vfs.zfs.$tunable=$value return "$?" ;; SunOS) [[ "$module" -eq "zfs" ]] || return 1 echo "${tunable}/${mdb_cmd}0t${value}" | mdb -kw return $? ;; esac } # # Get a global system tunable # # $1 tunable name (use a NAME defined in tunables.cfg) # function get_tunable { get_tunable_impl "$1" } function get_tunable_impl { typeset name="$1" typeset module="${2:-zfs}" eval "typeset tunable=\$$name" case "$tunable" in UNSUPPORTED) log_unsupported "Tunable '$name' is unsupported on $(uname)" ;; "") log_fail "Tunable '$name' must be added to tunables.cfg" ;; *) ;; esac case "$(uname)" in Linux) typeset zfs_tunables="/sys/module/$module/parameters" [[ -f "$zfs_tunables/$tunable" ]] || return 1 cat $zfs_tunables/$tunable return $? ;; FreeBSD) sysctl -n vfs.zfs.$tunable ;; SunOS) [[ "$module" -eq "zfs" ]] || return 1 ;; esac return 1 } # # Prints the current time in seconds since UNIX Epoch. # function current_epoch { printf '%(%s)T' } # # Get decimal value of global uint32_t variable using mdb. # function mdb_get_uint32 { typeset variable=$1 typeset value value=$(mdb -k -e "$variable/X | ::eval .=U") if [[ $? -ne 0 ]]; then log_fail "Failed to get value of '$variable' from mdb." return 1 fi echo $value return 0 } # # Set global uint32_t variable to a decimal value using mdb. # function mdb_set_uint32 { typeset variable=$1 typeset value=$2 mdb -kw -e "$variable/W 0t$value" > /dev/null if [[ $? -ne 0 ]]; then echo "Failed to set '$variable' to '$value' in mdb." return 1 fi return 0 } # # Set global scalar integer variable to a hex value using mdb. # Note: Target should have CTF data loaded. # function mdb_ctf_set_int { typeset variable=$1 typeset value=$2 mdb -kw -e "$variable/z $value" > /dev/null if [[ $? -ne 0 ]]; then echo "Failed to set '$variable' to '$value' in mdb." return 1 fi return 0 } # # Compute MD5 digest for given file or stdin if no file given. # Note: file path must not contain spaces # function md5digest { typeset file=$1 case $(uname) in FreeBSD) md5 -q $file ;; *) md5sum -b $file | awk '{ print $1 }' ;; esac } # # Compute SHA256 digest for given file or stdin if no file given. # Note: file path must not contain spaces # function sha256digest { typeset file=$1 case $(uname) in FreeBSD) sha256 -q $file ;; *) sha256sum -b $file | awk '{ print $1 }' ;; esac } function new_fs # { case $(uname) in FreeBSD) newfs "$@" ;; *) echo y | newfs -v "$@" ;; esac } function stat_size # { typeset path=$1 case $(uname) in FreeBSD) stat -f %z "$path" ;; *) stat -c %s "$path" ;; esac } function stat_ctime # { typeset path=$1 case $(uname) in FreeBSD) stat -f %c "$path" ;; *) stat -c %Z "$path" ;; esac } function stat_crtime # { typeset path=$1 case $(uname) in FreeBSD) stat -f %B "$path" ;; *) stat -c %W "$path" ;; esac } # Run a command as if it was being run in a TTY. # # Usage: # # faketty command # function faketty { if is_freebsd; then script -q /dev/null env "$@" else script --return --quiet -c "$*" /dev/null fi } # # Produce a random permutation of the integers in a given range (inclusive). # function range_shuffle # begin end { typeset -i begin=$1 typeset -i end=$2 seq ${begin} ${end} | sort -R } # # Cross-platform xattr helpers # function get_xattr # name path { typeset name=$1 typeset path=$2 case $(uname) in FreeBSD) getextattr -qq user "${name}" "${path}" ;; *) attr -qg "${name}" "${path}" ;; esac } function set_xattr # name value path { typeset name=$1 typeset value=$2 typeset path=$3 case $(uname) in FreeBSD) setextattr user "${name}" "${value}" "${path}" ;; *) attr -qs "${name}" -V "${value}" "${path}" ;; esac } function set_xattr_stdin # name value { typeset name=$1 typeset path=$2 case $(uname) in FreeBSD) setextattr -i user "${name}" "${path}" ;; *) attr -qs "${name}" "${path}" ;; esac } function rm_xattr # name path { typeset name=$1 typeset path=$2 case $(uname) in FreeBSD) rmextattr -q user "${name}" "${path}" ;; *) attr -qr "${name}" "${path}" ;; esac } function ls_xattr # path { typeset path=$1 case $(uname) in FreeBSD) lsextattr -qq user "${path}" ;; *) attr -ql "${path}" ;; esac } function kstat # stat flags? { typeset stat=$1 typeset flags=${2-"-n"} case $(uname) in FreeBSD) sysctl $flags kstat.zfs.misc.$stat ;; Linux) typeset zfs_kstat="/proc/spl/kstat/zfs/$stat" [[ -f "$zfs_kstat" ]] || return 1 cat $zfs_kstat ;; *) false ;; esac } function get_arcstat # stat { typeset stat=$1 case $(uname) in FreeBSD) kstat arcstats.$stat ;; Linux) kstat arcstats | awk "/$stat/ { print \$3 }" ;; *) false ;; esac } function punch_hole # offset length file { typeset offset=$1 typeset length=$2 typeset file=$3 case $(uname) in FreeBSD) truncate -d -o $offset -l $length "$file" ;; Linux) fallocate --punch-hole --offset $offset --length $length "$file" ;; *) false ;; esac } # # Wait for the specified arcstat to reach non-zero quiescence. # If echo is 1 echo the value after reaching quiescence, otherwise # if echo is 0 print the arcstat we are waiting on. # function arcstat_quiescence # stat echo { typeset stat=$1 typeset echo=$2 typeset do_once=true if [[ $echo -eq 0 ]]; then echo "Waiting for arcstat $1 quiescence." fi while $do_once || [ $stat1 -ne $stat2 ] || [ $stat2 -eq 0 ]; do typeset stat1=$(get_arcstat $stat) sleep 2 typeset stat2=$(get_arcstat $stat) do_once=false done if [[ $echo -eq 1 ]]; then echo $stat2 fi } function arcstat_quiescence_noecho # stat { typeset stat=$1 arcstat_quiescence $stat 0 } function arcstat_quiescence_echo # stat { typeset stat=$1 arcstat_quiescence $stat 1 } # # Given an array of pids, wait until all processes # have completed and check their return status. # function wait_for_children #children { rv=0 children=("$@") for child in "${children[@]}" do child_exit=0 wait ${child} || child_exit=$? if [ $child_exit -ne 0 ]; then echo "child ${child} failed with ${child_exit}" rv=1 fi done return $rv } diff --git a/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh b/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh index c99a82c5db71..07da7e96306e 100755 --- a/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh +++ b/tests/zfs-tests/tests/functional/reservation/reservation_021_neg.ksh @@ -1,72 +1,72 @@ #!/bin/ksh -p # # This file and its contents are supplied under the terms of the # Common Development and Distribution License ("CDDL"), version 1.0. # You may only use this file in accordance with the terms of version # 1.0 of the CDDL. # # A full copy of the text of the CDDL should have accompanied this # source. A copy of the CDDL is also available via the Internet at # http://www.illumos.org/license/CDDL. # # # Copyright 2018 Joyent, Inc. # . $STF_SUITE/include/libtest.shlib . $STF_SUITE/tests/functional/reservation/reservation.shlib # # DESCRIPTION: # # The use of refreservation=auto on a filesystem does not change the # refreservation and results in an error. # # STRATEGY: # 1) Create a filesystem # 2) Verify that zfs set refreservation=auto fails without changing # refreservation from none. # 3) Set refreservation to a valid value. # 4) Verify that zfs set refreservation=auto fails without changing # refreservation from the previous value. # verify_runnable "both" -fs=$TESTPOOL/$TESTFS/$(basename $0).$$ +fs=$TESTPOOL/$TESTFS/${0##*/}.$$ function cleanup { destroy_dataset "$fs" "-f" } log_onexit cleanup log_assert "refreservation=auto on a filesystem generates an error without" \ "changing refreservation" space_avail=$(get_prop available $TESTPOOL) (( fs_size = space_avail / 4 )) # Create a filesystem with no refreservation log_must zfs create $fs resv=$(get_prop refreservation $fs) log_must test $resv -eq 0 # Verify that refreservation=auto fails without altering refreservation log_mustnot zfs set refreservation=auto $fs resv=$(get_prop refreservation $fs) log_must test $resv -eq 0 # Set refreservation and verify log_must zfs set refreservation=$fs_size $fs resv=$(get_prop refreservation $fs) log_must test $resv -eq $fs_size # Verify that refreservation=auto fails without altering refreservation log_mustnot zfs set refreservation=auto $fs resv=$(get_prop refreservation $fs) log_must test $resv -eq $fs_size log_pass "refreservation=auto does not work on filesystems, as expected"